]> git.proxmox.com Git - mirror_zfs.git/blob - man/man5/zfs-module-parameters.5
Change target size of metaslabs from 256GB to 16GB
[mirror_zfs.git] / man / man5 / zfs-module-parameters.5
1 '\" te
2 .\" Copyright (c) 2013 by Turbo Fredriksson <turbo@bayour.com>. All rights reserved.
3 .\" Copyright (c) 2017 Datto Inc.
4 .\" Copyright (c) 2018 by Delphix. All rights reserved.
5 .\" The contents of this file are subject to the terms of the Common Development
6 .\" and Distribution License (the "License"). You may not use this file except
7 .\" in compliance with the License. You can obtain a copy of the license at
8 .\" usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing.
9 .\"
10 .\" See the License for the specific language governing permissions and
11 .\" limitations under the License. When distributing Covered Code, include this
12 .\" CDDL HEADER in each file and include the License file at
13 .\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this
14 .\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
15 .\" own identifying information:
16 .\" Portions Copyright [yyyy] [name of copyright owner]
17 .TH ZFS-MODULE-PARAMETERS 5 "Oct 28, 2017"
18 .SH NAME
19 zfs\-module\-parameters \- ZFS module parameters
20 .SH DESCRIPTION
21 .sp
22 .LP
23 Description of the different parameters to the ZFS module.
24
25 .SS "Module parameters"
26 .sp
27 .LP
28
29 .sp
30 .ne 2
31 .na
32 \fBdbuf_cache_max_bytes\fR (ulong)
33 .ad
34 .RS 12n
35 Maximum size in bytes of the dbuf cache. When \fB0\fR this value will default
36 to \fB1/2^dbuf_cache_shift\fR (1/32) of the target ARC size, otherwise the
37 provided value in bytes will be used. The behavior of the dbuf cache and its
38 associated settings can be observed via the \fB/proc/spl/kstat/zfs/dbufstats\fR
39 kstat.
40 .sp
41 Default value: \fB0\fR.
42 .RE
43
44 .sp
45 .ne 2
46 .na
47 \fBdbuf_metadata_cache_max_bytes\fR (ulong)
48 .ad
49 .RS 12n
50 Maximum size in bytes of the metadata dbuf cache. When \fB0\fR this value will
51 default to \fB1/2^dbuf_cache_shift\fR (1/16) of the target ARC size, otherwise
52 the provided value in bytes will be used. The behavior of the metadata dbuf
53 cache and its associated settings can be observed via the
54 \fB/proc/spl/kstat/zfs/dbufstats\fR kstat.
55 .sp
56 Default value: \fB0\fR.
57 .RE
58
59 .sp
60 .ne 2
61 .na
62 \fBdbuf_cache_hiwater_pct\fR (uint)
63 .ad
64 .RS 12n
65 The percentage over \fBdbuf_cache_max_bytes\fR when dbufs must be evicted
66 directly.
67 .sp
68 Default value: \fB10\fR%.
69 .RE
70
71 .sp
72 .ne 2
73 .na
74 \fBdbuf_cache_lowater_pct\fR (uint)
75 .ad
76 .RS 12n
77 The percentage below \fBdbuf_cache_max_bytes\fR when the evict thread stops
78 evicting dbufs.
79 .sp
80 Default value: \fB10\fR%.
81 .RE
82
83 .sp
84 .ne 2
85 .na
86 \fBdbuf_cache_shift\fR (int)
87 .ad
88 .RS 12n
89 Set the size of the dbuf cache, \fBdbuf_cache_max_bytes\fR, to a log2 fraction
90 of the target arc size.
91 .sp
92 Default value: \fB5\fR.
93 .RE
94
95 .sp
96 .ne 2
97 .na
98 \fBdbuf_metadata_cache_shift\fR (int)
99 .ad
100 .RS 12n
101 Set the size of the dbuf metadata cache, \fBdbuf_metadata_cache_max_bytes\fR,
102 to a log2 fraction of the target arc size.
103 .sp
104 Default value: \fB6\fR.
105 .RE
106
107 .sp
108 .ne 2
109 .na
110 \fBignore_hole_birth\fR (int)
111 .ad
112 .RS 12n
113 When set, the hole_birth optimization will not be used, and all holes will
114 always be sent on zfs send. Useful if you suspect your datasets are affected
115 by a bug in hole_birth.
116 .sp
117 Use \fB1\fR for on (default) and \fB0\fR for off.
118 .RE
119
120 .sp
121 .ne 2
122 .na
123 \fBl2arc_feed_again\fR (int)
124 .ad
125 .RS 12n
126 Turbo L2ARC warm-up. When the L2ARC is cold the fill interval will be set as
127 fast as possible.
128 .sp
129 Use \fB1\fR for yes (default) and \fB0\fR to disable.
130 .RE
131
132 .sp
133 .ne 2
134 .na
135 \fBl2arc_feed_min_ms\fR (ulong)
136 .ad
137 .RS 12n
138 Min feed interval in milliseconds. Requires \fBl2arc_feed_again=1\fR and only
139 applicable in related situations.
140 .sp
141 Default value: \fB200\fR.
142 .RE
143
144 .sp
145 .ne 2
146 .na
147 \fBl2arc_feed_secs\fR (ulong)
148 .ad
149 .RS 12n
150 Seconds between L2ARC writing
151 .sp
152 Default value: \fB1\fR.
153 .RE
154
155 .sp
156 .ne 2
157 .na
158 \fBl2arc_headroom\fR (ulong)
159 .ad
160 .RS 12n
161 How far through the ARC lists to search for L2ARC cacheable content, expressed
162 as a multiplier of \fBl2arc_write_max\fR
163 .sp
164 Default value: \fB2\fR.
165 .RE
166
167 .sp
168 .ne 2
169 .na
170 \fBl2arc_headroom_boost\fR (ulong)
171 .ad
172 .RS 12n
173 Scales \fBl2arc_headroom\fR by this percentage when L2ARC contents are being
174 successfully compressed before writing. A value of 100 disables this feature.
175 .sp
176 Default value: \fB200\fR%.
177 .RE
178
179 .sp
180 .ne 2
181 .na
182 \fBl2arc_noprefetch\fR (int)
183 .ad
184 .RS 12n
185 Do not write buffers to L2ARC if they were prefetched but not used by
186 applications
187 .sp
188 Use \fB1\fR for yes (default) and \fB0\fR to disable.
189 .RE
190
191 .sp
192 .ne 2
193 .na
194 \fBl2arc_norw\fR (int)
195 .ad
196 .RS 12n
197 No reads during writes
198 .sp
199 Use \fB1\fR for yes and \fB0\fR for no (default).
200 .RE
201
202 .sp
203 .ne 2
204 .na
205 \fBl2arc_write_boost\fR (ulong)
206 .ad
207 .RS 12n
208 Cold L2ARC devices will have \fBl2arc_write_max\fR increased by this amount
209 while they remain cold.
210 .sp
211 Default value: \fB8,388,608\fR.
212 .RE
213
214 .sp
215 .ne 2
216 .na
217 \fBl2arc_write_max\fR (ulong)
218 .ad
219 .RS 12n
220 Max write bytes per interval
221 .sp
222 Default value: \fB8,388,608\fR.
223 .RE
224
225 .sp
226 .ne 2
227 .na
228 \fBmetaslab_aliquot\fR (ulong)
229 .ad
230 .RS 12n
231 Metaslab granularity, in bytes. This is roughly similar to what would be
232 referred to as the "stripe size" in traditional RAID arrays. In normal
233 operation, ZFS will try to write this amount of data to a top-level vdev
234 before moving on to the next one.
235 .sp
236 Default value: \fB524,288\fR.
237 .RE
238
239 .sp
240 .ne 2
241 .na
242 \fBmetaslab_bias_enabled\fR (int)
243 .ad
244 .RS 12n
245 Enable metaslab group biasing based on its vdev's over- or under-utilization
246 relative to the pool.
247 .sp
248 Use \fB1\fR for yes (default) and \fB0\fR for no.
249 .RE
250
251 .sp
252 .ne 2
253 .na
254 \fBmetaslab_force_ganging\fR (ulong)
255 .ad
256 .RS 12n
257 Make some blocks above a certain size be gang blocks. This option is used
258 by the test suite to facilitate testing.
259 .sp
260 Default value: \fB16,777,217\fR.
261 .RE
262
263 .sp
264 .ne 2
265 .na
266 \fBzfs_metaslab_segment_weight_enabled\fR (int)
267 .ad
268 .RS 12n
269 Enable/disable segment-based metaslab selection.
270 .sp
271 Use \fB1\fR for yes (default) and \fB0\fR for no.
272 .RE
273
274 .sp
275 .ne 2
276 .na
277 \fBzfs_metaslab_switch_threshold\fR (int)
278 .ad
279 .RS 12n
280 When using segment-based metaslab selection, continue allocating
281 from the active metaslab until \fBzfs_metaslab_switch_threshold\fR
282 worth of buckets have been exhausted.
283 .sp
284 Default value: \fB2\fR.
285 .RE
286
287 .sp
288 .ne 2
289 .na
290 \fBmetaslab_debug_load\fR (int)
291 .ad
292 .RS 12n
293 Load all metaslabs during pool import.
294 .sp
295 Use \fB1\fR for yes and \fB0\fR for no (default).
296 .RE
297
298 .sp
299 .ne 2
300 .na
301 \fBmetaslab_debug_unload\fR (int)
302 .ad
303 .RS 12n
304 Prevent metaslabs from being unloaded.
305 .sp
306 Use \fB1\fR for yes and \fB0\fR for no (default).
307 .RE
308
309 .sp
310 .ne 2
311 .na
312 \fBmetaslab_fragmentation_factor_enabled\fR (int)
313 .ad
314 .RS 12n
315 Enable use of the fragmentation metric in computing metaslab weights.
316 .sp
317 Use \fB1\fR for yes (default) and \fB0\fR for no.
318 .RE
319
320 .sp
321 .ne 2
322 .na
323 \fBzfs_vdev_default_ms_count\fR (int)
324 .ad
325 .RS 12n
326 When a vdev is added target this number of metaslabs per top-level vdev.
327 .sp
328 Default value: \fB200\fR.
329 .RE
330
331 .sp
332 .ne 2
333 .na
334 \fBzfs_vdev_min_ms_count\fR (int)
335 .ad
336 .RS 12n
337 Minimum number of metaslabs to create in a top-level vdev.
338 .sp
339 Default value: \fB16\fR.
340 .RE
341
342 .sp
343 .ne 2
344 .na
345 \fBvdev_ms_count_limit\fR (int)
346 .ad
347 .RS 12n
348 Practical upper limit of total metaslabs per top-level vdev.
349 .sp
350 Default value: \fB131,072\fR.
351 .RE
352
353 .sp
354 .ne 2
355 .na
356 \fBmetaslab_preload_enabled\fR (int)
357 .ad
358 .RS 12n
359 Enable metaslab group preloading.
360 .sp
361 Use \fB1\fR for yes (default) and \fB0\fR for no.
362 .RE
363
364 .sp
365 .ne 2
366 .na
367 \fBmetaslab_lba_weighting_enabled\fR (int)
368 .ad
369 .RS 12n
370 Give more weight to metaslabs with lower LBAs, assuming they have
371 greater bandwidth as is typically the case on a modern constant
372 angular velocity disk drive.
373 .sp
374 Use \fB1\fR for yes (default) and \fB0\fR for no.
375 .RE
376
377 .sp
378 .ne 2
379 .na
380 \fBspa_config_path\fR (charp)
381 .ad
382 .RS 12n
383 SPA config file
384 .sp
385 Default value: \fB/etc/zfs/zpool.cache\fR.
386 .RE
387
388 .sp
389 .ne 2
390 .na
391 \fBspa_asize_inflation\fR (int)
392 .ad
393 .RS 12n
394 Multiplication factor used to estimate actual disk consumption from the
395 size of data being written. The default value is a worst case estimate,
396 but lower values may be valid for a given pool depending on its
397 configuration. Pool administrators who understand the factors involved
398 may wish to specify a more realistic inflation factor, particularly if
399 they operate close to quota or capacity limits.
400 .sp
401 Default value: \fB24\fR.
402 .RE
403
404 .sp
405 .ne 2
406 .na
407 \fBspa_load_print_vdev_tree\fR (int)
408 .ad
409 .RS 12n
410 Whether to print the vdev tree in the debugging message buffer during pool import.
411 Use 0 to disable and 1 to enable.
412 .sp
413 Default value: \fB0\fR.
414 .RE
415
416 .sp
417 .ne 2
418 .na
419 \fBspa_load_verify_data\fR (int)
420 .ad
421 .RS 12n
422 Whether to traverse data blocks during an "extreme rewind" (\fB-X\fR)
423 import. Use 0 to disable and 1 to enable.
424
425 An extreme rewind import normally performs a full traversal of all
426 blocks in the pool for verification. If this parameter is set to 0,
427 the traversal skips non-metadata blocks. It can be toggled once the
428 import has started to stop or start the traversal of non-metadata blocks.
429 .sp
430 Default value: \fB1\fR.
431 .RE
432
433 .sp
434 .ne 2
435 .na
436 \fBspa_load_verify_metadata\fR (int)
437 .ad
438 .RS 12n
439 Whether to traverse blocks during an "extreme rewind" (\fB-X\fR)
440 pool import. Use 0 to disable and 1 to enable.
441
442 An extreme rewind import normally performs a full traversal of all
443 blocks in the pool for verification. If this parameter is set to 0,
444 the traversal is not performed. It can be toggled once the import has
445 started to stop or start the traversal.
446 .sp
447 Default value: \fB1\fR.
448 .RE
449
450 .sp
451 .ne 2
452 .na
453 \fBspa_load_verify_maxinflight\fR (int)
454 .ad
455 .RS 12n
456 Maximum concurrent I/Os during the traversal performed during an "extreme
457 rewind" (\fB-X\fR) pool import.
458 .sp
459 Default value: \fB10000\fR.
460 .RE
461
462 .sp
463 .ne 2
464 .na
465 \fBspa_slop_shift\fR (int)
466 .ad
467 .RS 12n
468 Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space
469 in the pool to be consumed. This ensures that we don't run the pool
470 completely out of space, due to unaccounted changes (e.g. to the MOS).
471 It also limits the worst-case time to allocate space. If we have
472 less than this amount of free space, most ZPL operations (e.g. write,
473 create) will return ENOSPC.
474 .sp
475 Default value: \fB5\fR.
476 .RE
477
478 .sp
479 .ne 2
480 .na
481 \fBvdev_removal_max_span\fR (int)
482 .ad
483 .RS 12n
484 During top-level vdev removal, chunks of data are copied from the vdev
485 which may include free space in order to trade bandwidth for IOPS.
486 This parameter determines the maximum span of free space (in bytes)
487 which will be included as "unnecessary" data in a chunk of copied data.
488
489 The default value here was chosen to align with
490 \fBzfs_vdev_read_gap_limit\fR, which is a similar concept when doing
491 regular reads (but there's no reason it has to be the same).
492 .sp
493 Default value: \fB32,768\fR.
494 .RE
495
496 .sp
497 .ne 2
498 .na
499 \fBzfetch_array_rd_sz\fR (ulong)
500 .ad
501 .RS 12n
502 If prefetching is enabled, disable prefetching for reads larger than this size.
503 .sp
504 Default value: \fB1,048,576\fR.
505 .RE
506
507 .sp
508 .ne 2
509 .na
510 \fBzfetch_max_distance\fR (uint)
511 .ad
512 .RS 12n
513 Max bytes to prefetch per stream (default 8MB).
514 .sp
515 Default value: \fB8,388,608\fR.
516 .RE
517
518 .sp
519 .ne 2
520 .na
521 \fBzfetch_max_streams\fR (uint)
522 .ad
523 .RS 12n
524 Max number of streams per zfetch (prefetch streams per file).
525 .sp
526 Default value: \fB8\fR.
527 .RE
528
529 .sp
530 .ne 2
531 .na
532 \fBzfetch_min_sec_reap\fR (uint)
533 .ad
534 .RS 12n
535 Min time before an active prefetch stream can be reclaimed
536 .sp
537 Default value: \fB2\fR.
538 .RE
539
540 .sp
541 .ne 2
542 .na
543 \fBzfs_arc_dnode_limit\fR (ulong)
544 .ad
545 .RS 12n
546 When the number of bytes consumed by dnodes in the ARC exceeds this number of
547 bytes, try to unpin some of it in response to demand for non-metadata. This
548 value acts as a ceiling to the amount of dnode metadata, and defaults to 0 which
549 indicates that a percent which is based on \fBzfs_arc_dnode_limit_percent\fR of
550 the ARC meta buffers that may be used for dnodes.
551
552 See also \fBzfs_arc_meta_prune\fR which serves a similar purpose but is used
553 when the amount of metadata in the ARC exceeds \fBzfs_arc_meta_limit\fR rather
554 than in response to overall demand for non-metadata.
555
556 .sp
557 Default value: \fB0\fR.
558 .RE
559
560 .sp
561 .ne 2
562 .na
563 \fBzfs_arc_dnode_limit_percent\fR (ulong)
564 .ad
565 .RS 12n
566 Percentage that can be consumed by dnodes of ARC meta buffers.
567 .sp
568 See also \fBzfs_arc_dnode_limit\fR which serves a similar purpose but has a
569 higher priority if set to nonzero value.
570 .sp
571 Default value: \fB10\fR%.
572 .RE
573
574 .sp
575 .ne 2
576 .na
577 \fBzfs_arc_dnode_reduce_percent\fR (ulong)
578 .ad
579 .RS 12n
580 Percentage of ARC dnodes to try to scan in response to demand for non-metadata
581 when the number of bytes consumed by dnodes exceeds \fBzfs_arc_dnode_limit\fR.
582
583 .sp
584 Default value: \fB10\fR% of the number of dnodes in the ARC.
585 .RE
586
587 .sp
588 .ne 2
589 .na
590 \fBzfs_arc_average_blocksize\fR (int)
591 .ad
592 .RS 12n
593 The ARC's buffer hash table is sized based on the assumption of an average
594 block size of \fBzfs_arc_average_blocksize\fR (default 8K). This works out
595 to roughly 1MB of hash table per 1GB of physical memory with 8-byte pointers.
596 For configurations with a known larger average block size this value can be
597 increased to reduce the memory footprint.
598
599 .sp
600 Default value: \fB8192\fR.
601 .RE
602
603 .sp
604 .ne 2
605 .na
606 \fBzfs_arc_evict_batch_limit\fR (int)
607 .ad
608 .RS 12n
609 Number ARC headers to evict per sub-list before proceeding to another sub-list.
610 This batch-style operation prevents entire sub-lists from being evicted at once
611 but comes at a cost of additional unlocking and locking.
612 .sp
613 Default value: \fB10\fR.
614 .RE
615
616 .sp
617 .ne 2
618 .na
619 \fBzfs_arc_grow_retry\fR (int)
620 .ad
621 .RS 12n
622 If set to a non zero value, it will replace the arc_grow_retry value with this value.
623 The arc_grow_retry value (default 5) is the number of seconds the ARC will wait before
624 trying to resume growth after a memory pressure event.
625 .sp
626 Default value: \fB0\fR.
627 .RE
628
629 .sp
630 .ne 2
631 .na
632 \fBzfs_arc_lotsfree_percent\fR (int)
633 .ad
634 .RS 12n
635 Throttle I/O when free system memory drops below this percentage of total
636 system memory. Setting this value to 0 will disable the throttle.
637 .sp
638 Default value: \fB10\fR%.
639 .RE
640
641 .sp
642 .ne 2
643 .na
644 \fBzfs_arc_max\fR (ulong)
645 .ad
646 .RS 12n
647 Max arc size of ARC in bytes. If set to 0 then it will consume 1/2 of system
648 RAM. This value must be at least 67108864 (64 megabytes).
649 .sp
650 This value can be changed dynamically with some caveats. It cannot be set back
651 to 0 while running and reducing it below the current ARC size will not cause
652 the ARC to shrink without memory pressure to induce shrinking.
653 .sp
654 Default value: \fB0\fR.
655 .RE
656
657 .sp
658 .ne 2
659 .na
660 \fBzfs_arc_meta_adjust_restarts\fR (ulong)
661 .ad
662 .RS 12n
663 The number of restart passes to make while scanning the ARC attempting
664 the free buffers in order to stay below the \fBzfs_arc_meta_limit\fR.
665 This value should not need to be tuned but is available to facilitate
666 performance analysis.
667 .sp
668 Default value: \fB4096\fR.
669 .RE
670
671 .sp
672 .ne 2
673 .na
674 \fBzfs_arc_meta_limit\fR (ulong)
675 .ad
676 .RS 12n
677 The maximum allowed size in bytes that meta data buffers are allowed to
678 consume in the ARC. When this limit is reached meta data buffers will
679 be reclaimed even if the overall arc_c_max has not been reached. This
680 value defaults to 0 which indicates that a percent which is based on
681 \fBzfs_arc_meta_limit_percent\fR of the ARC may be used for meta data.
682 .sp
683 This value my be changed dynamically except that it cannot be set back to 0
684 for a specific percent of the ARC; it must be set to an explicit value.
685 .sp
686 Default value: \fB0\fR.
687 .RE
688
689 .sp
690 .ne 2
691 .na
692 \fBzfs_arc_meta_limit_percent\fR (ulong)
693 .ad
694 .RS 12n
695 Percentage of ARC buffers that can be used for meta data.
696
697 See also \fBzfs_arc_meta_limit\fR which serves a similar purpose but has a
698 higher priority if set to nonzero value.
699
700 .sp
701 Default value: \fB75\fR%.
702 .RE
703
704 .sp
705 .ne 2
706 .na
707 \fBzfs_arc_meta_min\fR (ulong)
708 .ad
709 .RS 12n
710 The minimum allowed size in bytes that meta data buffers may consume in
711 the ARC. This value defaults to 0 which disables a floor on the amount
712 of the ARC devoted meta data.
713 .sp
714 Default value: \fB0\fR.
715 .RE
716
717 .sp
718 .ne 2
719 .na
720 \fBzfs_arc_meta_prune\fR (int)
721 .ad
722 .RS 12n
723 The number of dentries and inodes to be scanned looking for entries
724 which can be dropped. This may be required when the ARC reaches the
725 \fBzfs_arc_meta_limit\fR because dentries and inodes can pin buffers
726 in the ARC. Increasing this value will cause to dentry and inode caches
727 to be pruned more aggressively. Setting this value to 0 will disable
728 pruning the inode and dentry caches.
729 .sp
730 Default value: \fB10,000\fR.
731 .RE
732
733 .sp
734 .ne 2
735 .na
736 \fBzfs_arc_meta_strategy\fR (int)
737 .ad
738 .RS 12n
739 Define the strategy for ARC meta data buffer eviction (meta reclaim strategy).
740 A value of 0 (META_ONLY) will evict only the ARC meta data buffers.
741 A value of 1 (BALANCED) indicates that additional data buffers may be evicted if
742 that is required to in order to evict the required number of meta data buffers.
743 .sp
744 Default value: \fB1\fR.
745 .RE
746
747 .sp
748 .ne 2
749 .na
750 \fBzfs_arc_min\fR (ulong)
751 .ad
752 .RS 12n
753 Min arc size of ARC in bytes. If set to 0 then arc_c_min will default to
754 consuming the larger of 32M or 1/32 of total system memory.
755 .sp
756 Default value: \fB0\fR.
757 .RE
758
759 .sp
760 .ne 2
761 .na
762 \fBzfs_arc_min_prefetch_ms\fR (int)
763 .ad
764 .RS 12n
765 Minimum time prefetched blocks are locked in the ARC, specified in ms.
766 A value of \fB0\fR will default to 1000 ms.
767 .sp
768 Default value: \fB0\fR.
769 .RE
770
771 .sp
772 .ne 2
773 .na
774 \fBzfs_arc_min_prescient_prefetch_ms\fR (int)
775 .ad
776 .RS 12n
777 Minimum time "prescient prefetched" blocks are locked in the ARC, specified
778 in ms. These blocks are meant to be prefetched fairly aggresively ahead of
779 the code that may use them. A value of \fB0\fR will default to 6000 ms.
780 .sp
781 Default value: \fB0\fR.
782 .RE
783
784 .sp
785 .ne 2
786 .na
787 \fBzfs_max_missing_tvds\fR (int)
788 .ad
789 .RS 12n
790 Number of missing top-level vdevs which will be allowed during
791 pool import (only in read-only mode).
792 .sp
793 Default value: \fB0\fR
794 .RE
795
796 .sp
797 .ne 2
798 .na
799 \fBzfs_multilist_num_sublists\fR (int)
800 .ad
801 .RS 12n
802 To allow more fine-grained locking, each ARC state contains a series
803 of lists for both data and meta data objects. Locking is performed at
804 the level of these "sub-lists". This parameters controls the number of
805 sub-lists per ARC state, and also applies to other uses of the
806 multilist data structure.
807 .sp
808 Default value: \fB4\fR or the number of online CPUs, whichever is greater
809 .RE
810
811 .sp
812 .ne 2
813 .na
814 \fBzfs_arc_overflow_shift\fR (int)
815 .ad
816 .RS 12n
817 The ARC size is considered to be overflowing if it exceeds the current
818 ARC target size (arc_c) by a threshold determined by this parameter.
819 The threshold is calculated as a fraction of arc_c using the formula
820 "arc_c >> \fBzfs_arc_overflow_shift\fR".
821
822 The default value of 8 causes the ARC to be considered to be overflowing
823 if it exceeds the target size by 1/256th (0.3%) of the target size.
824
825 When the ARC is overflowing, new buffer allocations are stalled until
826 the reclaim thread catches up and the overflow condition no longer exists.
827 .sp
828 Default value: \fB8\fR.
829 .RE
830
831 .sp
832 .ne 2
833 .na
834
835 \fBzfs_arc_p_min_shift\fR (int)
836 .ad
837 .RS 12n
838 If set to a non zero value, this will update arc_p_min_shift (default 4)
839 with the new value.
840 arc_p_min_shift is used to shift of arc_c for calculating both min and max
841 max arc_p
842 .sp
843 Default value: \fB0\fR.
844 .RE
845
846 .sp
847 .ne 2
848 .na
849 \fBzfs_arc_p_dampener_disable\fR (int)
850 .ad
851 .RS 12n
852 Disable arc_p adapt dampener
853 .sp
854 Use \fB1\fR for yes (default) and \fB0\fR to disable.
855 .RE
856
857 .sp
858 .ne 2
859 .na
860 \fBzfs_arc_shrink_shift\fR (int)
861 .ad
862 .RS 12n
863 If set to a non zero value, this will update arc_shrink_shift (default 7)
864 with the new value.
865 .sp
866 Default value: \fB0\fR.
867 .RE
868
869 .sp
870 .ne 2
871 .na
872 \fBzfs_arc_pc_percent\fR (uint)
873 .ad
874 .RS 12n
875 Percent of pagecache to reclaim arc to
876
877 This tunable allows ZFS arc to play more nicely with the kernel's LRU
878 pagecache. It can guarantee that the arc size won't collapse under scanning
879 pressure on the pagecache, yet still allows arc to be reclaimed down to
880 zfs_arc_min if necessary. This value is specified as percent of pagecache
881 size (as measured by NR_FILE_PAGES) where that percent may exceed 100. This
882 only operates during memory pressure/reclaim.
883 .sp
884 Default value: \fB0\fR% (disabled).
885 .RE
886
887 .sp
888 .ne 2
889 .na
890 \fBzfs_arc_sys_free\fR (ulong)
891 .ad
892 .RS 12n
893 The target number of bytes the ARC should leave as free memory on the system.
894 Defaults to the larger of 1/64 of physical memory or 512K. Setting this
895 option to a non-zero value will override the default.
896 .sp
897 Default value: \fB0\fR.
898 .RE
899
900 .sp
901 .ne 2
902 .na
903 \fBzfs_autoimport_disable\fR (int)
904 .ad
905 .RS 12n
906 Disable pool import at module load by ignoring the cache file (typically \fB/etc/zfs/zpool.cache\fR).
907 .sp
908 Use \fB1\fR for yes (default) and \fB0\fR for no.
909 .RE
910
911 .sp
912 .ne 2
913 .na
914 \fBzfs_checksums_per_second\fR (int)
915 .ad
916 .RS 12n
917 Rate limit checksum events to this many per second. Note that this should
918 not be set below the zed thresholds (currently 10 checksums over 10 sec)
919 or else zed may not trigger any action.
920 .sp
921 Default value: 20
922 .RE
923
924 .sp
925 .ne 2
926 .na
927 \fBzfs_commit_timeout_pct\fR (int)
928 .ad
929 .RS 12n
930 This controls the amount of time that a ZIL block (lwb) will remain "open"
931 when it isn't "full", and it has a thread waiting for it to be committed to
932 stable storage. The timeout is scaled based on a percentage of the last lwb
933 latency to avoid significantly impacting the latency of each individual
934 transaction record (itx).
935 .sp
936 Default value: \fB5\fR%.
937 .RE
938
939 .sp
940 .ne 2
941 .na
942 \fBzfs_condense_indirect_vdevs_enable\fR (int)
943 .ad
944 .RS 12n
945 Enable condensing indirect vdev mappings. When set to a non-zero value,
946 attempt to condense indirect vdev mappings if the mapping uses more than
947 \fBzfs_condense_min_mapping_bytes\fR bytes of memory and if the obsolete
948 space map object uses more than \fBzfs_condense_max_obsolete_bytes\fR
949 bytes on-disk. The condensing process is an attempt to save memory by
950 removing obsolete mappings.
951 .sp
952 Default value: \fB1\fR.
953 .RE
954
955 .sp
956 .ne 2
957 .na
958 \fBzfs_condense_max_obsolete_bytes\fR (ulong)
959 .ad
960 .RS 12n
961 Only attempt to condense indirect vdev mappings if the on-disk size
962 of the obsolete space map object is greater than this number of bytes
963 (see \fBfBzfs_condense_indirect_vdevs_enable\fR).
964 .sp
965 Default value: \fB1,073,741,824\fR.
966 .RE
967
968 .sp
969 .ne 2
970 .na
971 \fBzfs_condense_min_mapping_bytes\fR (ulong)
972 .ad
973 .RS 12n
974 Minimum size vdev mapping to attempt to condense (see
975 \fBzfs_condense_indirect_vdevs_enable\fR).
976 .sp
977 Default value: \fB131,072\fR.
978 .RE
979
980 .sp
981 .ne 2
982 .na
983 \fBzfs_dbgmsg_enable\fR (int)
984 .ad
985 .RS 12n
986 Internally ZFS keeps a small log to facilitate debugging. By default the log
987 is disabled, to enable it set this option to 1. The contents of the log can
988 be accessed by reading the /proc/spl/kstat/zfs/dbgmsg file. Writing 0 to
989 this proc file clears the log.
990 .sp
991 Default value: \fB0\fR.
992 .RE
993
994 .sp
995 .ne 2
996 .na
997 \fBzfs_dbgmsg_maxsize\fR (int)
998 .ad
999 .RS 12n
1000 The maximum size in bytes of the internal ZFS debug log.
1001 .sp
1002 Default value: \fB4M\fR.
1003 .RE
1004
1005 .sp
1006 .ne 2
1007 .na
1008 \fBzfs_dbuf_state_index\fR (int)
1009 .ad
1010 .RS 12n
1011 This feature is currently unused. It is normally used for controlling what
1012 reporting is available under /proc/spl/kstat/zfs.
1013 .sp
1014 Default value: \fB0\fR.
1015 .RE
1016
1017 .sp
1018 .ne 2
1019 .na
1020 \fBzfs_deadman_enabled\fR (int)
1021 .ad
1022 .RS 12n
1023 When a pool sync operation takes longer than \fBzfs_deadman_synctime_ms\fR
1024 milliseconds, or when an individual I/O takes longer than
1025 \fBzfs_deadman_ziotime_ms\fR milliseconds, then the operation is considered to
1026 be "hung". If \fBzfs_deadman_enabled\fR is set then the deadman behavior is
1027 invoked as described by the \fBzfs_deadman_failmode\fR module option.
1028 By default the deadman is enabled and configured to \fBwait\fR which results
1029 in "hung" I/Os only being logged. The deadman is automatically disabled
1030 when a pool gets suspended.
1031 .sp
1032 Default value: \fB1\fR.
1033 .RE
1034
1035 .sp
1036 .ne 2
1037 .na
1038 \fBzfs_deadman_failmode\fR (charp)
1039 .ad
1040 .RS 12n
1041 Controls the failure behavior when the deadman detects a "hung" I/O. Valid
1042 values are \fBwait\fR, \fBcontinue\fR, and \fBpanic\fR.
1043 .sp
1044 \fBwait\fR - Wait for a "hung" I/O to complete. For each "hung" I/O a
1045 "deadman" event will be posted describing that I/O.
1046 .sp
1047 \fBcontinue\fR - Attempt to recover from a "hung" I/O by re-dispatching it
1048 to the I/O pipeline if possible.
1049 .sp
1050 \fBpanic\fR - Panic the system. This can be used to facilitate an automatic
1051 fail-over to a properly configured fail-over partner.
1052 .sp
1053 Default value: \fBwait\fR.
1054 .RE
1055
1056 .sp
1057 .ne 2
1058 .na
1059 \fBzfs_deadman_checktime_ms\fR (int)
1060 .ad
1061 .RS 12n
1062 Check time in milliseconds. This defines the frequency at which we check
1063 for hung I/O and potentially invoke the \fBzfs_deadman_failmode\fR behavior.
1064 .sp
1065 Default value: \fB60,000\fR.
1066 .RE
1067
1068 .sp
1069 .ne 2
1070 .na
1071 \fBzfs_deadman_synctime_ms\fR (ulong)
1072 .ad
1073 .RS 12n
1074 Interval in milliseconds after which the deadman is triggered and also
1075 the interval after which a pool sync operation is considered to be "hung".
1076 Once this limit is exceeded the deadman will be invoked every
1077 \fBzfs_deadman_checktime_ms\fR milliseconds until the pool sync completes.
1078 .sp
1079 Default value: \fB600,000\fR.
1080 .RE
1081
1082 .sp
1083 .ne 2
1084 .na
1085 \fBzfs_deadman_ziotime_ms\fR (ulong)
1086 .ad
1087 .RS 12n
1088 Interval in milliseconds after which the deadman is triggered and an
1089 individual I/O operation is considered to be "hung". As long as the I/O
1090 remains "hung" the deadman will be invoked every \fBzfs_deadman_checktime_ms\fR
1091 milliseconds until the I/O completes.
1092 .sp
1093 Default value: \fB300,000\fR.
1094 .RE
1095
1096 .sp
1097 .ne 2
1098 .na
1099 \fBzfs_dedup_prefetch\fR (int)
1100 .ad
1101 .RS 12n
1102 Enable prefetching dedup-ed blks
1103 .sp
1104 Use \fB1\fR for yes and \fB0\fR to disable (default).
1105 .RE
1106
1107 .sp
1108 .ne 2
1109 .na
1110 \fBzfs_delay_min_dirty_percent\fR (int)
1111 .ad
1112 .RS 12n
1113 Start to delay each transaction once there is this amount of dirty data,
1114 expressed as a percentage of \fBzfs_dirty_data_max\fR.
1115 This value should be >= zfs_vdev_async_write_active_max_dirty_percent.
1116 See the section "ZFS TRANSACTION DELAY".
1117 .sp
1118 Default value: \fB60\fR%.
1119 .RE
1120
1121 .sp
1122 .ne 2
1123 .na
1124 \fBzfs_delay_scale\fR (int)
1125 .ad
1126 .RS 12n
1127 This controls how quickly the transaction delay approaches infinity.
1128 Larger values cause longer delays for a given amount of dirty data.
1129 .sp
1130 For the smoothest delay, this value should be about 1 billion divided
1131 by the maximum number of operations per second. This will smoothly
1132 handle between 10x and 1/10th this number.
1133 .sp
1134 See the section "ZFS TRANSACTION DELAY".
1135 .sp
1136 Note: \fBzfs_delay_scale\fR * \fBzfs_dirty_data_max\fR must be < 2^64.
1137 .sp
1138 Default value: \fB500,000\fR.
1139 .RE
1140
1141 .sp
1142 .ne 2
1143 .na
1144 \fBzfs_slow_io_events_per_second\fR (int)
1145 .ad
1146 .RS 12n
1147 Rate limit delay zevents (which report slow I/Os) to this many per second.
1148 .sp
1149 Default value: 20
1150 .RE
1151
1152 .sp
1153 .ne 2
1154 .na
1155 \fBzfs_delete_blocks\fR (ulong)
1156 .ad
1157 .RS 12n
1158 This is the used to define a large file for the purposes of delete. Files
1159 containing more than \fBzfs_delete_blocks\fR will be deleted asynchronously
1160 while smaller files are deleted synchronously. Decreasing this value will
1161 reduce the time spent in an unlink(2) system call at the expense of a longer
1162 delay before the freed space is available.
1163 .sp
1164 Default value: \fB20,480\fR.
1165 .RE
1166
1167 .sp
1168 .ne 2
1169 .na
1170 \fBzfs_dirty_data_max\fR (int)
1171 .ad
1172 .RS 12n
1173 Determines the dirty space limit in bytes. Once this limit is exceeded, new
1174 writes are halted until space frees up. This parameter takes precedence
1175 over \fBzfs_dirty_data_max_percent\fR.
1176 See the section "ZFS TRANSACTION DELAY".
1177 .sp
1178 Default value: \fB10\fR% of physical RAM, capped at \fBzfs_dirty_data_max_max\fR.
1179 .RE
1180
1181 .sp
1182 .ne 2
1183 .na
1184 \fBzfs_dirty_data_max_max\fR (int)
1185 .ad
1186 .RS 12n
1187 Maximum allowable value of \fBzfs_dirty_data_max\fR, expressed in bytes.
1188 This limit is only enforced at module load time, and will be ignored if
1189 \fBzfs_dirty_data_max\fR is later changed. This parameter takes
1190 precedence over \fBzfs_dirty_data_max_max_percent\fR. See the section
1191 "ZFS TRANSACTION DELAY".
1192 .sp
1193 Default value: \fB25\fR% of physical RAM.
1194 .RE
1195
1196 .sp
1197 .ne 2
1198 .na
1199 \fBzfs_dirty_data_max_max_percent\fR (int)
1200 .ad
1201 .RS 12n
1202 Maximum allowable value of \fBzfs_dirty_data_max\fR, expressed as a
1203 percentage of physical RAM. This limit is only enforced at module load
1204 time, and will be ignored if \fBzfs_dirty_data_max\fR is later changed.
1205 The parameter \fBzfs_dirty_data_max_max\fR takes precedence over this
1206 one. See the section "ZFS TRANSACTION DELAY".
1207 .sp
1208 Default value: \fB25\fR%.
1209 .RE
1210
1211 .sp
1212 .ne 2
1213 .na
1214 \fBzfs_dirty_data_max_percent\fR (int)
1215 .ad
1216 .RS 12n
1217 Determines the dirty space limit, expressed as a percentage of all
1218 memory. Once this limit is exceeded, new writes are halted until space frees
1219 up. The parameter \fBzfs_dirty_data_max\fR takes precedence over this
1220 one. See the section "ZFS TRANSACTION DELAY".
1221 .sp
1222 Default value: \fB10\fR%, subject to \fBzfs_dirty_data_max_max\fR.
1223 .RE
1224
1225 .sp
1226 .ne 2
1227 .na
1228 \fBzfs_dirty_data_sync_percent\fR (int)
1229 .ad
1230 .RS 12n
1231 Start syncing out a transaction group if there's at least this much dirty data
1232 as a percentage of \fBzfs_dirty_data_max\fR. This should be less than
1233 \fBzfs_vdev_async_write_active_min_dirty_percent\fR.
1234 .sp
1235 Default value: \fB20\fR% of \fBzfs_dirty_data_max\fR.
1236 .RE
1237
1238 .sp
1239 .ne 2
1240 .na
1241 \fBzfs_fletcher_4_impl\fR (string)
1242 .ad
1243 .RS 12n
1244 Select a fletcher 4 implementation.
1245 .sp
1246 Supported selectors are: \fBfastest\fR, \fBscalar\fR, \fBsse2\fR, \fBssse3\fR,
1247 \fBavx2\fR, \fBavx512f\fR, and \fBaarch64_neon\fR.
1248 All of the selectors except \fBfastest\fR and \fBscalar\fR require instruction
1249 set extensions to be available and will only appear if ZFS detects that they are
1250 present at runtime. If multiple implementations of fletcher 4 are available,
1251 the \fBfastest\fR will be chosen using a micro benchmark. Selecting \fBscalar\fR
1252 results in the original, CPU based calculation, being used. Selecting any option
1253 other than \fBfastest\fR and \fBscalar\fR results in vector instructions from
1254 the respective CPU instruction set being used.
1255 .sp
1256 Default value: \fBfastest\fR.
1257 .RE
1258
1259 .sp
1260 .ne 2
1261 .na
1262 \fBzfs_free_bpobj_enabled\fR (int)
1263 .ad
1264 .RS 12n
1265 Enable/disable the processing of the free_bpobj object.
1266 .sp
1267 Default value: \fB1\fR.
1268 .RE
1269
1270 .sp
1271 .ne 2
1272 .na
1273 \fBzfs_async_block_max_blocks\fR (ulong)
1274 .ad
1275 .RS 12n
1276 Maximum number of blocks freed in a single txg.
1277 .sp
1278 Default value: \fB100,000\fR.
1279 .RE
1280
1281 .sp
1282 .ne 2
1283 .na
1284 \fBzfs_override_estimate_recordsize\fR (ulong)
1285 .ad
1286 .RS 12n
1287 Record size calculation override for zfs send estimates.
1288 .sp
1289 Default value: \fB0\fR.
1290 .RE
1291
1292 .sp
1293 .ne 2
1294 .na
1295 \fBzfs_vdev_async_read_max_active\fR (int)
1296 .ad
1297 .RS 12n
1298 Maximum asynchronous read I/Os active to each device.
1299 See the section "ZFS I/O SCHEDULER".
1300 .sp
1301 Default value: \fB3\fR.
1302 .RE
1303
1304 .sp
1305 .ne 2
1306 .na
1307 \fBzfs_vdev_async_read_min_active\fR (int)
1308 .ad
1309 .RS 12n
1310 Minimum asynchronous read I/Os active to each device.
1311 See the section "ZFS I/O SCHEDULER".
1312 .sp
1313 Default value: \fB1\fR.
1314 .RE
1315
1316 .sp
1317 .ne 2
1318 .na
1319 \fBzfs_vdev_async_write_active_max_dirty_percent\fR (int)
1320 .ad
1321 .RS 12n
1322 When the pool has more than
1323 \fBzfs_vdev_async_write_active_max_dirty_percent\fR dirty data, use
1324 \fBzfs_vdev_async_write_max_active\fR to limit active async writes. If
1325 the dirty data is between min and max, the active I/O limit is linearly
1326 interpolated. See the section "ZFS I/O SCHEDULER".
1327 .sp
1328 Default value: \fB60\fR%.
1329 .RE
1330
1331 .sp
1332 .ne 2
1333 .na
1334 \fBzfs_vdev_async_write_active_min_dirty_percent\fR (int)
1335 .ad
1336 .RS 12n
1337 When the pool has less than
1338 \fBzfs_vdev_async_write_active_min_dirty_percent\fR dirty data, use
1339 \fBzfs_vdev_async_write_min_active\fR to limit active async writes. If
1340 the dirty data is between min and max, the active I/O limit is linearly
1341 interpolated. See the section "ZFS I/O SCHEDULER".
1342 .sp
1343 Default value: \fB30\fR%.
1344 .RE
1345
1346 .sp
1347 .ne 2
1348 .na
1349 \fBzfs_vdev_async_write_max_active\fR (int)
1350 .ad
1351 .RS 12n
1352 Maximum asynchronous write I/Os active to each device.
1353 See the section "ZFS I/O SCHEDULER".
1354 .sp
1355 Default value: \fB10\fR.
1356 .RE
1357
1358 .sp
1359 .ne 2
1360 .na
1361 \fBzfs_vdev_async_write_min_active\fR (int)
1362 .ad
1363 .RS 12n
1364 Minimum asynchronous write I/Os active to each device.
1365 See the section "ZFS I/O SCHEDULER".
1366 .sp
1367 Lower values are associated with better latency on rotational media but poorer
1368 resilver performance. The default value of 2 was chosen as a compromise. A
1369 value of 3 has been shown to improve resilver performance further at a cost of
1370 further increasing latency.
1371 .sp
1372 Default value: \fB2\fR.
1373 .RE
1374
1375 .sp
1376 .ne 2
1377 .na
1378 \fBzfs_vdev_initializing_max_active\fR (int)
1379 .ad
1380 .RS 12n
1381 Maximum initializing I/Os active to each device.
1382 See the section "ZFS I/O SCHEDULER".
1383 .sp
1384 Default value: \fB1\fR.
1385 .RE
1386
1387 .sp
1388 .ne 2
1389 .na
1390 \fBzfs_vdev_initializing_min_active\fR (int)
1391 .ad
1392 .RS 12n
1393 Minimum initializing I/Os active to each device.
1394 See the section "ZFS I/O SCHEDULER".
1395 .sp
1396 Default value: \fB1\fR.
1397 .RE
1398
1399 .sp
1400 .ne 2
1401 .na
1402 \fBzfs_vdev_max_active\fR (int)
1403 .ad
1404 .RS 12n
1405 The maximum number of I/Os active to each device. Ideally, this will be >=
1406 the sum of each queue's max_active. It must be at least the sum of each
1407 queue's min_active. See the section "ZFS I/O SCHEDULER".
1408 .sp
1409 Default value: \fB1,000\fR.
1410 .RE
1411
1412 .sp
1413 .ne 2
1414 .na
1415 \fBzfs_vdev_removal_max_active\fR (int)
1416 .ad
1417 .RS 12n
1418 Maximum removal I/Os active to each device.
1419 See the section "ZFS I/O SCHEDULER".
1420 .sp
1421 Default value: \fB2\fR.
1422 .RE
1423
1424 .sp
1425 .ne 2
1426 .na
1427 \fBzfs_vdev_removal_min_active\fR (int)
1428 .ad
1429 .RS 12n
1430 Minimum removal I/Os active to each device.
1431 See the section "ZFS I/O SCHEDULER".
1432 .sp
1433 Default value: \fB1\fR.
1434 .RE
1435
1436 .sp
1437 .ne 2
1438 .na
1439 \fBzfs_vdev_scrub_max_active\fR (int)
1440 .ad
1441 .RS 12n
1442 Maximum scrub I/Os active to each device.
1443 See the section "ZFS I/O SCHEDULER".
1444 .sp
1445 Default value: \fB2\fR.
1446 .RE
1447
1448 .sp
1449 .ne 2
1450 .na
1451 \fBzfs_vdev_scrub_min_active\fR (int)
1452 .ad
1453 .RS 12n
1454 Minimum scrub I/Os active to each device.
1455 See the section "ZFS I/O SCHEDULER".
1456 .sp
1457 Default value: \fB1\fR.
1458 .RE
1459
1460 .sp
1461 .ne 2
1462 .na
1463 \fBzfs_vdev_sync_read_max_active\fR (int)
1464 .ad
1465 .RS 12n
1466 Maximum synchronous read I/Os active to each device.
1467 See the section "ZFS I/O SCHEDULER".
1468 .sp
1469 Default value: \fB10\fR.
1470 .RE
1471
1472 .sp
1473 .ne 2
1474 .na
1475 \fBzfs_vdev_sync_read_min_active\fR (int)
1476 .ad
1477 .RS 12n
1478 Minimum synchronous read I/Os active to each device.
1479 See the section "ZFS I/O SCHEDULER".
1480 .sp
1481 Default value: \fB10\fR.
1482 .RE
1483
1484 .sp
1485 .ne 2
1486 .na
1487 \fBzfs_vdev_sync_write_max_active\fR (int)
1488 .ad
1489 .RS 12n
1490 Maximum synchronous write I/Os active to each device.
1491 See the section "ZFS I/O SCHEDULER".
1492 .sp
1493 Default value: \fB10\fR.
1494 .RE
1495
1496 .sp
1497 .ne 2
1498 .na
1499 \fBzfs_vdev_sync_write_min_active\fR (int)
1500 .ad
1501 .RS 12n
1502 Minimum synchronous write I/Os active to each device.
1503 See the section "ZFS I/O SCHEDULER".
1504 .sp
1505 Default value: \fB10\fR.
1506 .RE
1507
1508 .sp
1509 .ne 2
1510 .na
1511 \fBzfs_vdev_queue_depth_pct\fR (int)
1512 .ad
1513 .RS 12n
1514 Maximum number of queued allocations per top-level vdev expressed as
1515 a percentage of \fBzfs_vdev_async_write_max_active\fR which allows the
1516 system to detect devices that are more capable of handling allocations
1517 and to allocate more blocks to those devices. It allows for dynamic
1518 allocation distribution when devices are imbalanced as fuller devices
1519 will tend to be slower than empty devices.
1520
1521 See also \fBzio_dva_throttle_enabled\fR.
1522 .sp
1523 Default value: \fB1000\fR%.
1524 .RE
1525
1526 .sp
1527 .ne 2
1528 .na
1529 \fBzfs_expire_snapshot\fR (int)
1530 .ad
1531 .RS 12n
1532 Seconds to expire .zfs/snapshot
1533 .sp
1534 Default value: \fB300\fR.
1535 .RE
1536
1537 .sp
1538 .ne 2
1539 .na
1540 \fBzfs_admin_snapshot\fR (int)
1541 .ad
1542 .RS 12n
1543 Allow the creation, removal, or renaming of entries in the .zfs/snapshot
1544 directory to cause the creation, destruction, or renaming of snapshots.
1545 When enabled this functionality works both locally and over NFS exports
1546 which have the 'no_root_squash' option set. This functionality is disabled
1547 by default.
1548 .sp
1549 Use \fB1\fR for yes and \fB0\fR for no (default).
1550 .RE
1551
1552 .sp
1553 .ne 2
1554 .na
1555 \fBzfs_flags\fR (int)
1556 .ad
1557 .RS 12n
1558 Set additional debugging flags. The following flags may be bitwise-or'd
1559 together.
1560 .sp
1561 .TS
1562 box;
1563 rB lB
1564 lB lB
1565 r l.
1566 Value Symbolic Name
1567 Description
1568 _
1569 1 ZFS_DEBUG_DPRINTF
1570 Enable dprintf entries in the debug log.
1571 _
1572 2 ZFS_DEBUG_DBUF_VERIFY *
1573 Enable extra dbuf verifications.
1574 _
1575 4 ZFS_DEBUG_DNODE_VERIFY *
1576 Enable extra dnode verifications.
1577 _
1578 8 ZFS_DEBUG_SNAPNAMES
1579 Enable snapshot name verification.
1580 _
1581 16 ZFS_DEBUG_MODIFY
1582 Check for illegally modified ARC buffers.
1583 _
1584 64 ZFS_DEBUG_ZIO_FREE
1585 Enable verification of block frees.
1586 _
1587 128 ZFS_DEBUG_HISTOGRAM_VERIFY
1588 Enable extra spacemap histogram verifications.
1589 _
1590 256 ZFS_DEBUG_METASLAB_VERIFY
1591 Verify space accounting on disk matches in-core range_trees.
1592 _
1593 512 ZFS_DEBUG_SET_ERROR
1594 Enable SET_ERROR and dprintf entries in the debug log.
1595 .TE
1596 .sp
1597 * Requires debug build.
1598 .sp
1599 Default value: \fB0\fR.
1600 .RE
1601
1602 .sp
1603 .ne 2
1604 .na
1605 \fBzfs_free_leak_on_eio\fR (int)
1606 .ad
1607 .RS 12n
1608 If destroy encounters an EIO while reading metadata (e.g. indirect
1609 blocks), space referenced by the missing metadata can not be freed.
1610 Normally this causes the background destroy to become "stalled", as
1611 it is unable to make forward progress. While in this stalled state,
1612 all remaining space to free from the error-encountering filesystem is
1613 "temporarily leaked". Set this flag to cause it to ignore the EIO,
1614 permanently leak the space from indirect blocks that can not be read,
1615 and continue to free everything else that it can.
1616
1617 The default, "stalling" behavior is useful if the storage partially
1618 fails (i.e. some but not all i/os fail), and then later recovers. In
1619 this case, we will be able to continue pool operations while it is
1620 partially failed, and when it recovers, we can continue to free the
1621 space, with no leaks. However, note that this case is actually
1622 fairly rare.
1623
1624 Typically pools either (a) fail completely (but perhaps temporarily,
1625 e.g. a top-level vdev going offline), or (b) have localized,
1626 permanent errors (e.g. disk returns the wrong data due to bit flip or
1627 firmware bug). In case (a), this setting does not matter because the
1628 pool will be suspended and the sync thread will not be able to make
1629 forward progress regardless. In case (b), because the error is
1630 permanent, the best we can do is leak the minimum amount of space,
1631 which is what setting this flag will do. Therefore, it is reasonable
1632 for this flag to normally be set, but we chose the more conservative
1633 approach of not setting it, so that there is no possibility of
1634 leaking space in the "partial temporary" failure case.
1635 .sp
1636 Default value: \fB0\fR.
1637 .RE
1638
1639 .sp
1640 .ne 2
1641 .na
1642 \fBzfs_free_min_time_ms\fR (int)
1643 .ad
1644 .RS 12n
1645 During a \fBzfs destroy\fR operation using \fBfeature@async_destroy\fR a minimum
1646 of this much time will be spent working on freeing blocks per txg.
1647 .sp
1648 Default value: \fB1,000\fR.
1649 .RE
1650
1651 .sp
1652 .ne 2
1653 .na
1654 \fBzfs_immediate_write_sz\fR (long)
1655 .ad
1656 .RS 12n
1657 Largest data block to write to zil. Larger blocks will be treated as if the
1658 dataset being written to had the property setting \fBlogbias=throughput\fR.
1659 .sp
1660 Default value: \fB32,768\fR.
1661 .RE
1662
1663 .sp
1664 .ne 2
1665 .na
1666 \fBzfs_initialize_value\fR (ulong)
1667 .ad
1668 .RS 12n
1669 Pattern written to vdev free space by \fBzpool initialize\fR.
1670 .sp
1671 Default value: \fB16,045,690,984,833,335,022\fR (0xdeadbeefdeadbeee).
1672 .RE
1673
1674 .sp
1675 .ne 2
1676 .na
1677 \fBzfs_lua_max_instrlimit\fR (ulong)
1678 .ad
1679 .RS 12n
1680 The maximum execution time limit that can be set for a ZFS channel program,
1681 specified as a number of Lua instructions.
1682 .sp
1683 Default value: \fB100,000,000\fR.
1684 .RE
1685
1686 .sp
1687 .ne 2
1688 .na
1689 \fBzfs_lua_max_memlimit\fR (ulong)
1690 .ad
1691 .RS 12n
1692 The maximum memory limit that can be set for a ZFS channel program, specified
1693 in bytes.
1694 .sp
1695 Default value: \fB104,857,600\fR.
1696 .RE
1697
1698 .sp
1699 .ne 2
1700 .na
1701 \fBzfs_max_dataset_nesting\fR (int)
1702 .ad
1703 .RS 12n
1704 The maximum depth of nested datasets. This value can be tuned temporarily to
1705 fix existing datasets that exceed the predefined limit.
1706 .sp
1707 Default value: \fB50\fR.
1708 .RE
1709
1710 .sp
1711 .ne 2
1712 .na
1713 \fBzfs_max_recordsize\fR (int)
1714 .ad
1715 .RS 12n
1716 We currently support block sizes from 512 bytes to 16MB. The benefits of
1717 larger blocks, and thus larger I/O, need to be weighed against the cost of
1718 COWing a giant block to modify one byte. Additionally, very large blocks
1719 can have an impact on i/o latency, and also potentially on the memory
1720 allocator. Therefore, we do not allow the recordsize to be set larger than
1721 zfs_max_recordsize (default 1MB). Larger blocks can be created by changing
1722 this tunable, and pools with larger blocks can always be imported and used,
1723 regardless of this setting.
1724 .sp
1725 Default value: \fB1,048,576\fR.
1726 .RE
1727
1728 .sp
1729 .ne 2
1730 .na
1731 \fBzfs_metaslab_fragmentation_threshold\fR (int)
1732 .ad
1733 .RS 12n
1734 Allow metaslabs to keep their active state as long as their fragmentation
1735 percentage is less than or equal to this value. An active metaslab that
1736 exceeds this threshold will no longer keep its active status allowing
1737 better metaslabs to be selected.
1738 .sp
1739 Default value: \fB70\fR.
1740 .RE
1741
1742 .sp
1743 .ne 2
1744 .na
1745 \fBzfs_mg_fragmentation_threshold\fR (int)
1746 .ad
1747 .RS 12n
1748 Metaslab groups are considered eligible for allocations if their
1749 fragmentation metric (measured as a percentage) is less than or equal to
1750 this value. If a metaslab group exceeds this threshold then it will be
1751 skipped unless all metaslab groups within the metaslab class have also
1752 crossed this threshold.
1753 .sp
1754 Default value: \fB85\fR.
1755 .RE
1756
1757 .sp
1758 .ne 2
1759 .na
1760 \fBzfs_mg_noalloc_threshold\fR (int)
1761 .ad
1762 .RS 12n
1763 Defines a threshold at which metaslab groups should be eligible for
1764 allocations. The value is expressed as a percentage of free space
1765 beyond which a metaslab group is always eligible for allocations.
1766 If a metaslab group's free space is less than or equal to the
1767 threshold, the allocator will avoid allocating to that group
1768 unless all groups in the pool have reached the threshold. Once all
1769 groups have reached the threshold, all groups are allowed to accept
1770 allocations. The default value of 0 disables the feature and causes
1771 all metaslab groups to be eligible for allocations.
1772
1773 This parameter allows one to deal with pools having heavily imbalanced
1774 vdevs such as would be the case when a new vdev has been added.
1775 Setting the threshold to a non-zero percentage will stop allocations
1776 from being made to vdevs that aren't filled to the specified percentage
1777 and allow lesser filled vdevs to acquire more allocations than they
1778 otherwise would under the old \fBzfs_mg_alloc_failures\fR facility.
1779 .sp
1780 Default value: \fB0\fR.
1781 .RE
1782
1783 .sp
1784 .ne 2
1785 .na
1786 \fBzfs_ddt_data_is_special\fR (int)
1787 .ad
1788 .RS 12n
1789 If enabled, ZFS will place DDT data into the special allocation class.
1790 .sp
1791 Default value: \fB1\fR.
1792 .RE
1793
1794 .sp
1795 .ne 2
1796 .na
1797 \fBzfs_user_indirect_is_special\fR (int)
1798 .ad
1799 .RS 12n
1800 If enabled, ZFS will place user data (both file and zvol) indirect blocks
1801 into the special allocation class.
1802 .sp
1803 Default value: \fB1\fR.
1804 .RE
1805
1806 .sp
1807 .ne 2
1808 .na
1809 \fBzfs_multihost_history\fR (int)
1810 .ad
1811 .RS 12n
1812 Historical statistics for the last N multihost updates will be available in
1813 \fB/proc/spl/kstat/zfs/<pool>/multihost\fR
1814 .sp
1815 Default value: \fB0\fR.
1816 .RE
1817
1818 .sp
1819 .ne 2
1820 .na
1821 \fBzfs_multihost_interval\fR (ulong)
1822 .ad
1823 .RS 12n
1824 Used to control the frequency of multihost writes which are performed when the
1825 \fBmultihost\fR pool property is on. This is one factor used to determine
1826 the length of the activity check during import.
1827 .sp
1828 The multihost write period is \fBzfs_multihost_interval / leaf-vdevs\fR milliseconds.
1829 This means that on average a multihost write will be issued for each leaf vdev every
1830 \fBzfs_multihost_interval\fR milliseconds. In practice, the observed period can
1831 vary with the I/O load and this observed value is the delay which is stored in
1832 the uberblock.
1833 .sp
1834 On import the activity check waits a minimum amount of time determined by
1835 \fBzfs_multihost_interval * zfs_multihost_import_intervals\fR. The activity
1836 check time may be further extended if the value of mmp delay found in the best
1837 uberblock indicates actual multihost updates happened at longer intervals than
1838 \fBzfs_multihost_interval\fR. A minimum value of \fB100ms\fR is enforced.
1839 .sp
1840 Default value: \fB1000\fR.
1841 .RE
1842
1843 .sp
1844 .ne 2
1845 .na
1846 \fBzfs_multihost_import_intervals\fR (uint)
1847 .ad
1848 .RS 12n
1849 Used to control the duration of the activity test on import. Smaller values of
1850 \fBzfs_multihost_import_intervals\fR will reduce the import time but increase
1851 the risk of failing to detect an active pool. The total activity check time is
1852 never allowed to drop below one second. A value of 0 is ignored and treated as
1853 if it was set to 1
1854 .sp
1855 Default value: \fB10\fR.
1856 .RE
1857
1858 .sp
1859 .ne 2
1860 .na
1861 \fBzfs_multihost_fail_intervals\fR (uint)
1862 .ad
1863 .RS 12n
1864 Controls the behavior of the pool when multihost write failures are detected.
1865 .sp
1866 When \fBzfs_multihost_fail_intervals = 0\fR then multihost write failures are ignored.
1867 The failures will still be reported to the ZED which depending on its
1868 configuration may take action such as suspending the pool or offlining a device.
1869 .sp
1870 When \fBzfs_multihost_fail_intervals > 0\fR then sequential multihost write failures
1871 will cause the pool to be suspended. This occurs when
1872 \fBzfs_multihost_fail_intervals * zfs_multihost_interval\fR milliseconds have
1873 passed since the last successful multihost write. This guarantees the activity test
1874 will see multihost writes if the pool is imported.
1875 .sp
1876 Default value: \fB5\fR.
1877 .RE
1878
1879 .sp
1880 .ne 2
1881 .na
1882 \fBzfs_no_scrub_io\fR (int)
1883 .ad
1884 .RS 12n
1885 Set for no scrub I/O. This results in scrubs not actually scrubbing data and
1886 simply doing a metadata crawl of the pool instead.
1887 .sp
1888 Use \fB1\fR for yes and \fB0\fR for no (default).
1889 .RE
1890
1891 .sp
1892 .ne 2
1893 .na
1894 \fBzfs_no_scrub_prefetch\fR (int)
1895 .ad
1896 .RS 12n
1897 Set to disable block prefetching for scrubs.
1898 .sp
1899 Use \fB1\fR for yes and \fB0\fR for no (default).
1900 .RE
1901
1902 .sp
1903 .ne 2
1904 .na
1905 \fBzfs_nocacheflush\fR (int)
1906 .ad
1907 .RS 12n
1908 Disable cache flush operations on disks when writing. Setting this will
1909 cause pool corruption on power loss if a volatile out-of-order write cache
1910 is enabled.
1911 .sp
1912 Use \fB1\fR for yes and \fB0\fR for no (default).
1913 .RE
1914
1915 .sp
1916 .ne 2
1917 .na
1918 \fBzfs_nopwrite_enabled\fR (int)
1919 .ad
1920 .RS 12n
1921 Enable NOP writes
1922 .sp
1923 Use \fB1\fR for yes (default) and \fB0\fR to disable.
1924 .RE
1925
1926 .sp
1927 .ne 2
1928 .na
1929 \fBzfs_dmu_offset_next_sync\fR (int)
1930 .ad
1931 .RS 12n
1932 Enable forcing txg sync to find holes. When enabled forces ZFS to act
1933 like prior versions when SEEK_HOLE or SEEK_DATA flags are used, which
1934 when a dnode is dirty causes txg's to be synced so that this data can be
1935 found.
1936 .sp
1937 Use \fB1\fR for yes and \fB0\fR to disable (default).
1938 .RE
1939
1940 .sp
1941 .ne 2
1942 .na
1943 \fBzfs_pd_bytes_max\fR (int)
1944 .ad
1945 .RS 12n
1946 The number of bytes which should be prefetched during a pool traversal
1947 (eg: \fBzfs send\fR or other data crawling operations)
1948 .sp
1949 Default value: \fB52,428,800\fR.
1950 .RE
1951
1952 .sp
1953 .ne 2
1954 .na
1955 \fBzfs_per_txg_dirty_frees_percent \fR (ulong)
1956 .ad
1957 .RS 12n
1958 Tunable to control percentage of dirtied blocks from frees in one TXG.
1959 After this threshold is crossed, additional dirty blocks from frees
1960 wait until the next TXG.
1961 A value of zero will disable this throttle.
1962 .sp
1963 Default value: \fB30\fR and \fB0\fR to disable.
1964 .RE
1965
1966 .sp
1967 .ne 2
1968 .na
1969 \fBzfs_prefetch_disable\fR (int)
1970 .ad
1971 .RS 12n
1972 This tunable disables predictive prefetch. Note that it leaves "prescient"
1973 prefetch (e.g. prefetch for zfs send) intact. Unlike predictive prefetch,
1974 prescient prefetch never issues i/os that end up not being needed, so it
1975 can't hurt performance.
1976 .sp
1977 Use \fB1\fR for yes and \fB0\fR for no (default).
1978 .RE
1979
1980 .sp
1981 .ne 2
1982 .na
1983 \fBzfs_read_chunk_size\fR (long)
1984 .ad
1985 .RS 12n
1986 Bytes to read per chunk
1987 .sp
1988 Default value: \fB1,048,576\fR.
1989 .RE
1990
1991 .sp
1992 .ne 2
1993 .na
1994 \fBzfs_read_history\fR (int)
1995 .ad
1996 .RS 12n
1997 Historical statistics for the last N reads will be available in
1998 \fB/proc/spl/kstat/zfs/<pool>/reads\fR
1999 .sp
2000 Default value: \fB0\fR (no data is kept).
2001 .RE
2002
2003 .sp
2004 .ne 2
2005 .na
2006 \fBzfs_read_history_hits\fR (int)
2007 .ad
2008 .RS 12n
2009 Include cache hits in read history
2010 .sp
2011 Use \fB1\fR for yes and \fB0\fR for no (default).
2012 .RE
2013
2014 .sp
2015 .ne 2
2016 .na
2017 \fBzfs_reconstruct_indirect_combinations_max\fR (int)
2018 .ad
2019 .RS 12na
2020 If an indirect split block contains more than this many possible unique
2021 combinations when being reconstructed, consider it too computationally
2022 expensive to check them all. Instead, try at most
2023 \fBzfs_reconstruct_indirect_combinations_max\fR randomly-selected
2024 combinations each time the block is accessed. This allows all segment
2025 copies to participate fairly in the reconstruction when all combinations
2026 cannot be checked and prevents repeated use of one bad copy.
2027 .sp
2028 Default value: \fB4096\fR.
2029 .RE
2030
2031 .sp
2032 .ne 2
2033 .na
2034 \fBzfs_recover\fR (int)
2035 .ad
2036 .RS 12n
2037 Set to attempt to recover from fatal errors. This should only be used as a
2038 last resort, as it typically results in leaked space, or worse.
2039 .sp
2040 Use \fB1\fR for yes and \fB0\fR for no (default).
2041 .RE
2042
2043 .sp
2044 .ne 2
2045 .na
2046 \fBzfs_removal_ignore_errors\fR (int)
2047 .ad
2048 .RS 12n
2049 .sp
2050 Ignore hard IO errors during device removal. When set, if a device encounters
2051 a hard IO error during the removal process the removal will not be cancelled.
2052 This can result in a normally recoverable block becoming permanently damaged
2053 and is not recommended. This should only be used as a last resort when the
2054 pool cannot be returned to a healthy state prior to removing the device.
2055 .sp
2056 Default value: \fB0\fR.
2057 .RE
2058
2059 .sp
2060 .ne 2
2061 .na
2062 \fBzfs_resilver_min_time_ms\fR (int)
2063 .ad
2064 .RS 12n
2065 Resilvers are processed by the sync thread. While resilvering it will spend
2066 at least this much time working on a resilver between txg flushes.
2067 .sp
2068 Default value: \fB3,000\fR.
2069 .RE
2070
2071 .sp
2072 .ne 2
2073 .na
2074 \fBzfs_scan_ignore_errors\fR (int)
2075 .ad
2076 .RS 12n
2077 If set to a nonzero value, remove the DTL (dirty time list) upon
2078 completion of a pool scan (scrub) even if there were unrepairable
2079 errors. It is intended to be used during pool repair or recovery to
2080 stop resilvering when the pool is next imported.
2081 .sp
2082 Default value: \fB0\fR.
2083 .RE
2084
2085 .sp
2086 .ne 2
2087 .na
2088 \fBzfs_scrub_min_time_ms\fR (int)
2089 .ad
2090 .RS 12n
2091 Scrubs are processed by the sync thread. While scrubbing it will spend
2092 at least this much time working on a scrub between txg flushes.
2093 .sp
2094 Default value: \fB1,000\fR.
2095 .RE
2096
2097 .sp
2098 .ne 2
2099 .na
2100 \fBzfs_scan_checkpoint_intval\fR (int)
2101 .ad
2102 .RS 12n
2103 To preserve progress across reboots the sequential scan algorithm periodically
2104 needs to stop metadata scanning and issue all the verifications I/Os to disk.
2105 The frequency of this flushing is determined by the
2106 \fBzfs_scan_checkpoint_intval\fR tunable.
2107 .sp
2108 Default value: \fB7200\fR seconds (every 2 hours).
2109 .RE
2110
2111 .sp
2112 .ne 2
2113 .na
2114 \fBzfs_scan_fill_weight\fR (int)
2115 .ad
2116 .RS 12n
2117 This tunable affects how scrub and resilver I/O segments are ordered. A higher
2118 number indicates that we care more about how filled in a segment is, while a
2119 lower number indicates we care more about the size of the extent without
2120 considering the gaps within a segment. This value is only tunable upon module
2121 insertion. Changing the value afterwards will have no affect on scrub or
2122 resilver performance.
2123 .sp
2124 Default value: \fB3\fR.
2125 .RE
2126
2127 .sp
2128 .ne 2
2129 .na
2130 \fBzfs_scan_issue_strategy\fR (int)
2131 .ad
2132 .RS 12n
2133 Determines the order that data will be verified while scrubbing or resilvering.
2134 If set to \fB1\fR, data will be verified as sequentially as possible, given the
2135 amount of memory reserved for scrubbing (see \fBzfs_scan_mem_lim_fact\fR). This
2136 may improve scrub performance if the pool's data is very fragmented. If set to
2137 \fB2\fR, the largest mostly-contiguous chunk of found data will be verified
2138 first. By deferring scrubbing of small segments, we may later find adjacent data
2139 to coalesce and increase the segment size. If set to \fB0\fR, zfs will use
2140 strategy \fB1\fR during normal verification and strategy \fB2\fR while taking a
2141 checkpoint.
2142 .sp
2143 Default value: \fB0\fR.
2144 .RE
2145
2146 .sp
2147 .ne 2
2148 .na
2149 \fBzfs_scan_legacy\fR (int)
2150 .ad
2151 .RS 12n
2152 A value of 0 indicates that scrubs and resilvers will gather metadata in
2153 memory before issuing sequential I/O. A value of 1 indicates that the legacy
2154 algorithm will be used where I/O is initiated as soon as it is discovered.
2155 Changing this value to 0 will not affect scrubs or resilvers that are already
2156 in progress.
2157 .sp
2158 Default value: \fB0\fR.
2159 .RE
2160
2161 .sp
2162 .ne 2
2163 .na
2164 \fBzfs_scan_max_ext_gap\fR (int)
2165 .ad
2166 .RS 12n
2167 Indicates the largest gap in bytes between scrub / resilver I/Os that will still
2168 be considered sequential for sorting purposes. Changing this value will not
2169 affect scrubs or resilvers that are already in progress.
2170 .sp
2171 Default value: \fB2097152 (2 MB)\fR.
2172 .RE
2173
2174 .sp
2175 .ne 2
2176 .na
2177 \fBzfs_scan_mem_lim_fact\fR (int)
2178 .ad
2179 .RS 12n
2180 Maximum fraction of RAM used for I/O sorting by sequential scan algorithm.
2181 This tunable determines the hard limit for I/O sorting memory usage.
2182 When the hard limit is reached we stop scanning metadata and start issuing
2183 data verification I/O. This is done until we get below the soft limit.
2184 .sp
2185 Default value: \fB20\fR which is 5% of RAM (1/20).
2186 .RE
2187
2188 .sp
2189 .ne 2
2190 .na
2191 \fBzfs_scan_mem_lim_soft_fact\fR (int)
2192 .ad
2193 .RS 12n
2194 The fraction of the hard limit used to determined the soft limit for I/O sorting
2195 by the sequential scan algorithm. When we cross this limit from bellow no action
2196 is taken. When we cross this limit from above it is because we are issuing
2197 verification I/O. In this case (unless the metadata scan is done) we stop
2198 issuing verification I/O and start scanning metadata again until we get to the
2199 hard limit.
2200 .sp
2201 Default value: \fB20\fR which is 5% of the hard limit (1/20).
2202 .RE
2203
2204 .sp
2205 .ne 2
2206 .na
2207 \fBzfs_scan_vdev_limit\fR (int)
2208 .ad
2209 .RS 12n
2210 Maximum amount of data that can be concurrently issued at once for scrubs and
2211 resilvers per leaf device, given in bytes.
2212 .sp
2213 Default value: \fB41943040\fR.
2214 .RE
2215
2216 .sp
2217 .ne 2
2218 .na
2219 \fBzfs_send_corrupt_data\fR (int)
2220 .ad
2221 .RS 12n
2222 Allow sending of corrupt data (ignore read/checksum errors when sending data)
2223 .sp
2224 Use \fB1\fR for yes and \fB0\fR for no (default).
2225 .RE
2226
2227 .sp
2228 .ne 2
2229 .na
2230 \fBzfs_send_queue_length\fR (int)
2231 .ad
2232 .RS 12n
2233 The maximum number of bytes allowed in the \fBzfs send\fR queue. This value
2234 must be at least twice the maximum block size in use.
2235 .sp
2236 Default value: \fB16,777,216\fR.
2237 .RE
2238
2239 .sp
2240 .ne 2
2241 .na
2242 \fBzfs_recv_queue_length\fR (int)
2243 .ad
2244 .RS 12n
2245 .sp
2246 The maximum number of bytes allowed in the \fBzfs receive\fR queue. This value
2247 must be at least twice the maximum block size in use.
2248 .sp
2249 Default value: \fB16,777,216\fR.
2250 .RE
2251
2252 .sp
2253 .ne 2
2254 .na
2255 \fBzfs_sync_pass_deferred_free\fR (int)
2256 .ad
2257 .RS 12n
2258 Flushing of data to disk is done in passes. Defer frees starting in this pass
2259 .sp
2260 Default value: \fB2\fR.
2261 .RE
2262
2263 .sp
2264 .ne 2
2265 .na
2266 \fBzfs_spa_discard_memory_limit\fR (int)
2267 .ad
2268 .RS 12n
2269 Maximum memory used for prefetching a checkpoint's space map on each
2270 vdev while discarding the checkpoint.
2271 .sp
2272 Default value: \fB16,777,216\fR.
2273 .RE
2274
2275 .sp
2276 .ne 2
2277 .na
2278 \fBzfs_sync_pass_dont_compress\fR (int)
2279 .ad
2280 .RS 12n
2281 Don't compress starting in this pass
2282 .sp
2283 Default value: \fB5\fR.
2284 .RE
2285
2286 .sp
2287 .ne 2
2288 .na
2289 \fBzfs_sync_pass_rewrite\fR (int)
2290 .ad
2291 .RS 12n
2292 Rewrite new block pointers starting in this pass
2293 .sp
2294 Default value: \fB2\fR.
2295 .RE
2296
2297 .sp
2298 .ne 2
2299 .na
2300 \fBzfs_sync_taskq_batch_pct\fR (int)
2301 .ad
2302 .RS 12n
2303 This controls the number of threads used by the dp_sync_taskq. The default
2304 value of 75% will create a maximum of one thread per cpu.
2305 .sp
2306 Default value: \fB75\fR%.
2307 .RE
2308
2309 .sp
2310 .ne 2
2311 .na
2312 \fBzfs_txg_history\fR (int)
2313 .ad
2314 .RS 12n
2315 Historical statistics for the last N txgs will be available in
2316 \fB/proc/spl/kstat/zfs/<pool>/txgs\fR
2317 .sp
2318 Default value: \fB0\fR.
2319 .RE
2320
2321 .sp
2322 .ne 2
2323 .na
2324 \fBzfs_txg_timeout\fR (int)
2325 .ad
2326 .RS 12n
2327 Flush dirty data to disk at least every N seconds (maximum txg duration)
2328 .sp
2329 Default value: \fB5\fR.
2330 .RE
2331
2332 .sp
2333 .ne 2
2334 .na
2335 \fBzfs_vdev_aggregation_limit\fR (int)
2336 .ad
2337 .RS 12n
2338 Max vdev I/O aggregation size
2339 .sp
2340 Default value: \fB131,072\fR.
2341 .RE
2342
2343 .sp
2344 .ne 2
2345 .na
2346 \fBzfs_vdev_cache_bshift\fR (int)
2347 .ad
2348 .RS 12n
2349 Shift size to inflate reads too
2350 .sp
2351 Default value: \fB16\fR (effectively 65536).
2352 .RE
2353
2354 .sp
2355 .ne 2
2356 .na
2357 \fBzfs_vdev_cache_max\fR (int)
2358 .ad
2359 .RS 12n
2360 Inflate reads smaller than this value to meet the \fBzfs_vdev_cache_bshift\fR
2361 size (default 64k).
2362 .sp
2363 Default value: \fB16384\fR.
2364 .RE
2365
2366 .sp
2367 .ne 2
2368 .na
2369 \fBzfs_vdev_cache_size\fR (int)
2370 .ad
2371 .RS 12n
2372 Total size of the per-disk cache in bytes.
2373 .sp
2374 Currently this feature is disabled as it has been found to not be helpful
2375 for performance and in some cases harmful.
2376 .sp
2377 Default value: \fB0\fR.
2378 .RE
2379
2380 .sp
2381 .ne 2
2382 .na
2383 \fBzfs_vdev_mirror_rotating_inc\fR (int)
2384 .ad
2385 .RS 12n
2386 A number by which the balancing algorithm increments the load calculation for
2387 the purpose of selecting the least busy mirror member when an I/O immediately
2388 follows its predecessor on rotational vdevs for the purpose of making decisions
2389 based on load.
2390 .sp
2391 Default value: \fB0\fR.
2392 .RE
2393
2394 .sp
2395 .ne 2
2396 .na
2397 \fBzfs_vdev_mirror_rotating_seek_inc\fR (int)
2398 .ad
2399 .RS 12n
2400 A number by which the balancing algorithm increments the load calculation for
2401 the purpose of selecting the least busy mirror member when an I/O lacks
2402 locality as defined by the zfs_vdev_mirror_rotating_seek_offset. I/Os within
2403 this that are not immediately following the previous I/O are incremented by
2404 half.
2405 .sp
2406 Default value: \fB5\fR.
2407 .RE
2408
2409 .sp
2410 .ne 2
2411 .na
2412 \fBzfs_vdev_mirror_rotating_seek_offset\fR (int)
2413 .ad
2414 .RS 12n
2415 The maximum distance for the last queued I/O in which the balancing algorithm
2416 considers an I/O to have locality.
2417 See the section "ZFS I/O SCHEDULER".
2418 .sp
2419 Default value: \fB1048576\fR.
2420 .RE
2421
2422 .sp
2423 .ne 2
2424 .na
2425 \fBzfs_vdev_mirror_non_rotating_inc\fR (int)
2426 .ad
2427 .RS 12n
2428 A number by which the balancing algorithm increments the load calculation for
2429 the purpose of selecting the least busy mirror member on non-rotational vdevs
2430 when I/Os do not immediately follow one another.
2431 .sp
2432 Default value: \fB0\fR.
2433 .RE
2434
2435 .sp
2436 .ne 2
2437 .na
2438 \fBzfs_vdev_mirror_non_rotating_seek_inc\fR (int)
2439 .ad
2440 .RS 12n
2441 A number by which the balancing algorithm increments the load calculation for
2442 the purpose of selecting the least busy mirror member when an I/O lacks
2443 locality as defined by the zfs_vdev_mirror_rotating_seek_offset. I/Os within
2444 this that are not immediately following the previous I/O are incremented by
2445 half.
2446 .sp
2447 Default value: \fB1\fR.
2448 .RE
2449
2450 .sp
2451 .ne 2
2452 .na
2453 \fBzfs_vdev_read_gap_limit\fR (int)
2454 .ad
2455 .RS 12n
2456 Aggregate read I/O operations if the gap on-disk between them is within this
2457 threshold.
2458 .sp
2459 Default value: \fB32,768\fR.
2460 .RE
2461
2462 .sp
2463 .ne 2
2464 .na
2465 \fBzfs_vdev_scheduler\fR (charp)
2466 .ad
2467 .RS 12n
2468 Set the Linux I/O scheduler on whole disk vdevs to this scheduler. Valid options
2469 are noop, cfq, bfq & deadline
2470 .sp
2471 Default value: \fBnoop\fR.
2472 .RE
2473
2474 .sp
2475 .ne 2
2476 .na
2477 \fBzfs_vdev_write_gap_limit\fR (int)
2478 .ad
2479 .RS 12n
2480 Aggregate write I/O over gap
2481 .sp
2482 Default value: \fB4,096\fR.
2483 .RE
2484
2485 .sp
2486 .ne 2
2487 .na
2488 \fBzfs_vdev_raidz_impl\fR (string)
2489 .ad
2490 .RS 12n
2491 Parameter for selecting raidz parity implementation to use.
2492
2493 Options marked (always) below may be selected on module load as they are
2494 supported on all systems.
2495 The remaining options may only be set after the module is loaded, as they
2496 are available only if the implementations are compiled in and supported
2497 on the running system.
2498
2499 Once the module is loaded, the content of
2500 /sys/module/zfs/parameters/zfs_vdev_raidz_impl will show available options
2501 with the currently selected one enclosed in [].
2502 Possible options are:
2503 fastest - (always) implementation selected using built-in benchmark
2504 original - (always) original raidz implementation
2505 scalar - (always) scalar raidz implementation
2506 sse2 - implementation using SSE2 instruction set (64bit x86 only)
2507 ssse3 - implementation using SSSE3 instruction set (64bit x86 only)
2508 avx2 - implementation using AVX2 instruction set (64bit x86 only)
2509 avx512f - implementation using AVX512F instruction set (64bit x86 only)
2510 avx512bw - implementation using AVX512F & AVX512BW instruction sets (64bit x86 only)
2511 aarch64_neon - implementation using NEON (Aarch64/64 bit ARMv8 only)
2512 aarch64_neonx2 - implementation using NEON with more unrolling (Aarch64/64 bit ARMv8 only)
2513 .sp
2514 Default value: \fBfastest\fR.
2515 .RE
2516
2517 .sp
2518 .ne 2
2519 .na
2520 \fBzfs_zevent_cols\fR (int)
2521 .ad
2522 .RS 12n
2523 When zevents are logged to the console use this as the word wrap width.
2524 .sp
2525 Default value: \fB80\fR.
2526 .RE
2527
2528 .sp
2529 .ne 2
2530 .na
2531 \fBzfs_zevent_console\fR (int)
2532 .ad
2533 .RS 12n
2534 Log events to the console
2535 .sp
2536 Use \fB1\fR for yes and \fB0\fR for no (default).
2537 .RE
2538
2539 .sp
2540 .ne 2
2541 .na
2542 \fBzfs_zevent_len_max\fR (int)
2543 .ad
2544 .RS 12n
2545 Max event queue length. A value of 0 will result in a calculated value which
2546 increases with the number of CPUs in the system (minimum 64 events). Events
2547 in the queue can be viewed with the \fBzpool events\fR command.
2548 .sp
2549 Default value: \fB0\fR.
2550 .RE
2551
2552 .sp
2553 .ne 2
2554 .na
2555 \fBzfs_zil_clean_taskq_maxalloc\fR (int)
2556 .ad
2557 .RS 12n
2558 The maximum number of taskq entries that are allowed to be cached. When this
2559 limit is exceeded transaction records (itxs) will be cleaned synchronously.
2560 .sp
2561 Default value: \fB1048576\fR.
2562 .RE
2563
2564 .sp
2565 .ne 2
2566 .na
2567 \fBzfs_zil_clean_taskq_minalloc\fR (int)
2568 .ad
2569 .RS 12n
2570 The number of taskq entries that are pre-populated when the taskq is first
2571 created and are immediately available for use.
2572 .sp
2573 Default value: \fB1024\fR.
2574 .RE
2575
2576 .sp
2577 .ne 2
2578 .na
2579 \fBzfs_zil_clean_taskq_nthr_pct\fR (int)
2580 .ad
2581 .RS 12n
2582 This controls the number of threads used by the dp_zil_clean_taskq. The default
2583 value of 100% will create a maximum of one thread per cpu.
2584 .sp
2585 Default value: \fB100\fR%.
2586 .RE
2587
2588 .sp
2589 .ne 2
2590 .na
2591 \fBzil_nocacheflush\fR (int)
2592 .ad
2593 .RS 12n
2594 Disable the cache flush commands that are normally sent to the disk(s) by
2595 the ZIL after an LWB write has completed. Setting this will cause ZIL
2596 corruption on power loss if a volatile out-of-order write cache is enabled.
2597 .sp
2598 Use \fB1\fR for yes and \fB0\fR for no (default).
2599 .RE
2600
2601 .sp
2602 .ne 2
2603 .na
2604 \fBzil_replay_disable\fR (int)
2605 .ad
2606 .RS 12n
2607 Disable intent logging replay. Can be disabled for recovery from corrupted
2608 ZIL
2609 .sp
2610 Use \fB1\fR for yes and \fB0\fR for no (default).
2611 .RE
2612
2613 .sp
2614 .ne 2
2615 .na
2616 \fBzil_slog_bulk\fR (ulong)
2617 .ad
2618 .RS 12n
2619 Limit SLOG write size per commit executed with synchronous priority.
2620 Any writes above that will be executed with lower (asynchronous) priority
2621 to limit potential SLOG device abuse by single active ZIL writer.
2622 .sp
2623 Default value: \fB786,432\fR.
2624 .RE
2625
2626 .sp
2627 .ne 2
2628 .na
2629 \fBzio_decompress_fail_fraction\fR (int)
2630 .ad
2631 .RS 12n
2632 If non-zero, this value represents the denominator of the probability that zfs
2633 should induce a decompression failure. For instance, for a 5% decompression
2634 failure rate, this value should be set to 20.
2635 .sp
2636 Default value: \fB0\fR.
2637 .RE
2638
2639 .sp
2640 .ne 2
2641 .na
2642 \fBzio_slow_io_ms\fR (int)
2643 .ad
2644 .RS 12n
2645 When an I/O operation takes more than \fBzio_slow_io_ms\fR milliseconds to
2646 complete is marked as a slow I/O. Each slow I/O causes a delay zevent. Slow
2647 I/O counters can be seen with "zpool status -s".
2648
2649 .sp
2650 Default value: \fB30,000\fR.
2651 .RE
2652
2653 .sp
2654 .ne 2
2655 .na
2656 \fBzio_dva_throttle_enabled\fR (int)
2657 .ad
2658 .RS 12n
2659 Throttle block allocations in the I/O pipeline. This allows for
2660 dynamic allocation distribution when devices are imbalanced.
2661 When enabled, the maximum number of pending allocations per top-level vdev
2662 is limited by \fBzfs_vdev_queue_depth_pct\fR.
2663 .sp
2664 Default value: \fB1\fR.
2665 .RE
2666
2667 .sp
2668 .ne 2
2669 .na
2670 \fBzio_requeue_io_start_cut_in_line\fR (int)
2671 .ad
2672 .RS 12n
2673 Prioritize requeued I/O
2674 .sp
2675 Default value: \fB0\fR.
2676 .RE
2677
2678 .sp
2679 .ne 2
2680 .na
2681 \fBzio_taskq_batch_pct\fR (uint)
2682 .ad
2683 .RS 12n
2684 Percentage of online CPUs (or CPU cores, etc) which will run a worker thread
2685 for I/O. These workers are responsible for I/O work such as compression and
2686 checksum calculations. Fractional number of CPUs will be rounded down.
2687 .sp
2688 The default value of 75 was chosen to avoid using all CPUs which can result in
2689 latency issues and inconsistent application performance, especially when high
2690 compression is enabled.
2691 .sp
2692 Default value: \fB75\fR.
2693 .RE
2694
2695 .sp
2696 .ne 2
2697 .na
2698 \fBzvol_inhibit_dev\fR (uint)
2699 .ad
2700 .RS 12n
2701 Do not create zvol device nodes. This may slightly improve startup time on
2702 systems with a very large number of zvols.
2703 .sp
2704 Use \fB1\fR for yes and \fB0\fR for no (default).
2705 .RE
2706
2707 .sp
2708 .ne 2
2709 .na
2710 \fBzvol_major\fR (uint)
2711 .ad
2712 .RS 12n
2713 Major number for zvol block devices
2714 .sp
2715 Default value: \fB230\fR.
2716 .RE
2717
2718 .sp
2719 .ne 2
2720 .na
2721 \fBzvol_max_discard_blocks\fR (ulong)
2722 .ad
2723 .RS 12n
2724 Discard (aka TRIM) operations done on zvols will be done in batches of this
2725 many blocks, where block size is determined by the \fBvolblocksize\fR property
2726 of a zvol.
2727 .sp
2728 Default value: \fB16,384\fR.
2729 .RE
2730
2731 .sp
2732 .ne 2
2733 .na
2734 \fBzvol_prefetch_bytes\fR (uint)
2735 .ad
2736 .RS 12n
2737 When adding a zvol to the system prefetch \fBzvol_prefetch_bytes\fR
2738 from the start and end of the volume. Prefetching these regions
2739 of the volume is desirable because they are likely to be accessed
2740 immediately by \fBblkid(8)\fR or by the kernel scanning for a partition
2741 table.
2742 .sp
2743 Default value: \fB131,072\fR.
2744 .RE
2745
2746 .sp
2747 .ne 2
2748 .na
2749 \fBzvol_request_sync\fR (uint)
2750 .ad
2751 .RS 12n
2752 When processing I/O requests for a zvol submit them synchronously. This
2753 effectively limits the queue depth to 1 for each I/O submitter. When set
2754 to 0 requests are handled asynchronously by a thread pool. The number of
2755 requests which can be handled concurrently is controller by \fBzvol_threads\fR.
2756 .sp
2757 Default value: \fB0\fR.
2758 .RE
2759
2760 .sp
2761 .ne 2
2762 .na
2763 \fBzvol_threads\fR (uint)
2764 .ad
2765 .RS 12n
2766 Max number of threads which can handle zvol I/O requests concurrently.
2767 .sp
2768 Default value: \fB32\fR.
2769 .RE
2770
2771 .sp
2772 .ne 2
2773 .na
2774 \fBzvol_volmode\fR (uint)
2775 .ad
2776 .RS 12n
2777 Defines zvol block devices behaviour when \fBvolmode\fR is set to \fBdefault\fR.
2778 Valid values are \fB1\fR (full), \fB2\fR (dev) and \fB3\fR (none).
2779 .sp
2780 Default value: \fB1\fR.
2781 .RE
2782
2783 .sp
2784 .ne 2
2785 .na
2786 \fBzfs_qat_disable\fR (int)
2787 .ad
2788 .RS 12n
2789 This tunable disables qat hardware acceleration for gzip compression and.
2790 AES-GCM encryption. It is available only if qat acceleration is compiled in
2791 and the qat driver is present.
2792 .sp
2793 Use \fB1\fR for yes and \fB0\fR for no (default).
2794 .RE
2795
2796 .SH ZFS I/O SCHEDULER
2797 ZFS issues I/O operations to leaf vdevs to satisfy and complete I/Os.
2798 The I/O scheduler determines when and in what order those operations are
2799 issued. The I/O scheduler divides operations into five I/O classes
2800 prioritized in the following order: sync read, sync write, async read,
2801 async write, and scrub/resilver. Each queue defines the minimum and
2802 maximum number of concurrent operations that may be issued to the
2803 device. In addition, the device has an aggregate maximum,
2804 \fBzfs_vdev_max_active\fR. Note that the sum of the per-queue minimums
2805 must not exceed the aggregate maximum. If the sum of the per-queue
2806 maximums exceeds the aggregate maximum, then the number of active I/Os
2807 may reach \fBzfs_vdev_max_active\fR, in which case no further I/Os will
2808 be issued regardless of whether all per-queue minimums have been met.
2809 .sp
2810 For many physical devices, throughput increases with the number of
2811 concurrent operations, but latency typically suffers. Further, physical
2812 devices typically have a limit at which more concurrent operations have no
2813 effect on throughput or can actually cause it to decrease.
2814 .sp
2815 The scheduler selects the next operation to issue by first looking for an
2816 I/O class whose minimum has not been satisfied. Once all are satisfied and
2817 the aggregate maximum has not been hit, the scheduler looks for classes
2818 whose maximum has not been satisfied. Iteration through the I/O classes is
2819 done in the order specified above. No further operations are issued if the
2820 aggregate maximum number of concurrent operations has been hit or if there
2821 are no operations queued for an I/O class that has not hit its maximum.
2822 Every time an I/O is queued or an operation completes, the I/O scheduler
2823 looks for new operations to issue.
2824 .sp
2825 In general, smaller max_active's will lead to lower latency of synchronous
2826 operations. Larger max_active's may lead to higher overall throughput,
2827 depending on underlying storage.
2828 .sp
2829 The ratio of the queues' max_actives determines the balance of performance
2830 between reads, writes, and scrubs. E.g., increasing
2831 \fBzfs_vdev_scrub_max_active\fR will cause the scrub or resilver to complete
2832 more quickly, but reads and writes to have higher latency and lower throughput.
2833 .sp
2834 All I/O classes have a fixed maximum number of outstanding operations
2835 except for the async write class. Asynchronous writes represent the data
2836 that is committed to stable storage during the syncing stage for
2837 transaction groups. Transaction groups enter the syncing state
2838 periodically so the number of queued async writes will quickly burst up
2839 and then bleed down to zero. Rather than servicing them as quickly as
2840 possible, the I/O scheduler changes the maximum number of active async
2841 write I/Os according to the amount of dirty data in the pool. Since
2842 both throughput and latency typically increase with the number of
2843 concurrent operations issued to physical devices, reducing the
2844 burstiness in the number of concurrent operations also stabilizes the
2845 response time of operations from other -- and in particular synchronous
2846 -- queues. In broad strokes, the I/O scheduler will issue more
2847 concurrent operations from the async write queue as there's more dirty
2848 data in the pool.
2849 .sp
2850 Async Writes
2851 .sp
2852 The number of concurrent operations issued for the async write I/O class
2853 follows a piece-wise linear function defined by a few adjustable points.
2854 .nf
2855
2856 | o---------| <-- zfs_vdev_async_write_max_active
2857 ^ | /^ |
2858 | | / | |
2859 active | / | |
2860 I/O | / | |
2861 count | / | |
2862 | / | |
2863 |-------o | | <-- zfs_vdev_async_write_min_active
2864 0|_______^______|_________|
2865 0% | | 100% of zfs_dirty_data_max
2866 | |
2867 | `-- zfs_vdev_async_write_active_max_dirty_percent
2868 `--------- zfs_vdev_async_write_active_min_dirty_percent
2869
2870 .fi
2871 Until the amount of dirty data exceeds a minimum percentage of the dirty
2872 data allowed in the pool, the I/O scheduler will limit the number of
2873 concurrent operations to the minimum. As that threshold is crossed, the
2874 number of concurrent operations issued increases linearly to the maximum at
2875 the specified maximum percentage of the dirty data allowed in the pool.
2876 .sp
2877 Ideally, the amount of dirty data on a busy pool will stay in the sloped
2878 part of the function between \fBzfs_vdev_async_write_active_min_dirty_percent\fR
2879 and \fBzfs_vdev_async_write_active_max_dirty_percent\fR. If it exceeds the
2880 maximum percentage, this indicates that the rate of incoming data is
2881 greater than the rate that the backend storage can handle. In this case, we
2882 must further throttle incoming writes, as described in the next section.
2883
2884 .SH ZFS TRANSACTION DELAY
2885 We delay transactions when we've determined that the backend storage
2886 isn't able to accommodate the rate of incoming writes.
2887 .sp
2888 If there is already a transaction waiting, we delay relative to when
2889 that transaction will finish waiting. This way the calculated delay time
2890 is independent of the number of threads concurrently executing
2891 transactions.
2892 .sp
2893 If we are the only waiter, wait relative to when the transaction
2894 started, rather than the current time. This credits the transaction for
2895 "time already served", e.g. reading indirect blocks.
2896 .sp
2897 The minimum time for a transaction to take is calculated as:
2898 .nf
2899 min_time = zfs_delay_scale * (dirty - min) / (max - dirty)
2900 min_time is then capped at 100 milliseconds.
2901 .fi
2902 .sp
2903 The delay has two degrees of freedom that can be adjusted via tunables. The
2904 percentage of dirty data at which we start to delay is defined by
2905 \fBzfs_delay_min_dirty_percent\fR. This should typically be at or above
2906 \fBzfs_vdev_async_write_active_max_dirty_percent\fR so that we only start to
2907 delay after writing at full speed has failed to keep up with the incoming write
2908 rate. The scale of the curve is defined by \fBzfs_delay_scale\fR. Roughly speaking,
2909 this variable determines the amount of delay at the midpoint of the curve.
2910 .sp
2911 .nf
2912 delay
2913 10ms +-------------------------------------------------------------*+
2914 | *|
2915 9ms + *+
2916 | *|
2917 8ms + *+
2918 | * |
2919 7ms + * +
2920 | * |
2921 6ms + * +
2922 | * |
2923 5ms + * +
2924 | * |
2925 4ms + * +
2926 | * |
2927 3ms + * +
2928 | * |
2929 2ms + (midpoint) * +
2930 | | ** |
2931 1ms + v *** +
2932 | zfs_delay_scale ----------> ******** |
2933 0 +-------------------------------------*********----------------+
2934 0% <- zfs_dirty_data_max -> 100%
2935 .fi
2936 .sp
2937 Note that since the delay is added to the outstanding time remaining on the
2938 most recent transaction, the delay is effectively the inverse of IOPS.
2939 Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
2940 was chosen such that small changes in the amount of accumulated dirty data
2941 in the first 3/4 of the curve yield relatively small differences in the
2942 amount of delay.
2943 .sp
2944 The effects can be easier to understand when the amount of delay is
2945 represented on a log scale:
2946 .sp
2947 .nf
2948 delay
2949 100ms +-------------------------------------------------------------++
2950 + +
2951 | |
2952 + *+
2953 10ms + *+
2954 + ** +
2955 | (midpoint) ** |
2956 + | ** +
2957 1ms + v **** +
2958 + zfs_delay_scale ----------> ***** +
2959 | **** |
2960 + **** +
2961 100us + ** +
2962 + * +
2963 | * |
2964 + * +
2965 10us + * +
2966 + +
2967 | |
2968 + +
2969 +--------------------------------------------------------------+
2970 0% <- zfs_dirty_data_max -> 100%
2971 .fi
2972 .sp
2973 Note here that only as the amount of dirty data approaches its limit does
2974 the delay start to increase rapidly. The goal of a properly tuned system
2975 should be to keep the amount of dirty data out of that range by first
2976 ensuring that the appropriate limits are set for the I/O scheduler to reach
2977 optimal throughput on the backend storage, and then by changing the value
2978 of \fBzfs_delay_scale\fR to increase the steepness of the curve.