]> git.proxmox.com Git - mirror_zfs.git/blob - man/man5/zfs-module-parameters.5
ad6cd4e94b687d0f5cac9bf7d973877a3a95039d
[mirror_zfs.git] / man / man5 / zfs-module-parameters.5
1 '\" te
2 .\" Copyright (c) 2013 by Turbo Fredriksson <turbo@bayour.com>. All rights reserved.
3 .\" Copyright (c) 2019 by Delphix. All rights reserved.
4 .\" Copyright (c) 2019 Datto Inc.
5 .\" The contents of this file are subject to the terms of the Common Development
6 .\" and Distribution License (the "License"). You may not use this file except
7 .\" in compliance with the License. You can obtain a copy of the license at
8 .\" usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing.
9 .\"
10 .\" See the License for the specific language governing permissions and
11 .\" limitations under the License. When distributing Covered Code, include this
12 .\" CDDL HEADER in each file and include the License file at
13 .\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this
14 .\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
15 .\" own identifying information:
16 .\" Portions Copyright [yyyy] [name of copyright owner]
17 .TH ZFS-MODULE-PARAMETERS 5 "Feb 15, 2019"
18 .SH NAME
19 zfs\-module\-parameters \- ZFS module parameters
20 .SH DESCRIPTION
21 .sp
22 .LP
23 Description of the different parameters to the ZFS module.
24
25 .SS "Module parameters"
26 .sp
27 .LP
28
29 .sp
30 .ne 2
31 .na
32 \fBdbuf_cache_max_bytes\fR (ulong)
33 .ad
34 .RS 12n
35 Maximum size in bytes of the dbuf cache. When \fB0\fR this value will default
36 to \fB1/2^dbuf_cache_shift\fR (1/32) of the target ARC size, otherwise the
37 provided value in bytes will be used. The behavior of the dbuf cache and its
38 associated settings can be observed via the \fB/proc/spl/kstat/zfs/dbufstats\fR
39 kstat.
40 .sp
41 Default value: \fB0\fR.
42 .RE
43
44 .sp
45 .ne 2
46 .na
47 \fBdbuf_metadata_cache_max_bytes\fR (ulong)
48 .ad
49 .RS 12n
50 Maximum size in bytes of the metadata dbuf cache. When \fB0\fR this value will
51 default to \fB1/2^dbuf_cache_shift\fR (1/16) of the target ARC size, otherwise
52 the provided value in bytes will be used. The behavior of the metadata dbuf
53 cache and its associated settings can be observed via the
54 \fB/proc/spl/kstat/zfs/dbufstats\fR kstat.
55 .sp
56 Default value: \fB0\fR.
57 .RE
58
59 .sp
60 .ne 2
61 .na
62 \fBdbuf_cache_hiwater_pct\fR (uint)
63 .ad
64 .RS 12n
65 The percentage over \fBdbuf_cache_max_bytes\fR when dbufs must be evicted
66 directly.
67 .sp
68 Default value: \fB10\fR%.
69 .RE
70
71 .sp
72 .ne 2
73 .na
74 \fBdbuf_cache_lowater_pct\fR (uint)
75 .ad
76 .RS 12n
77 The percentage below \fBdbuf_cache_max_bytes\fR when the evict thread stops
78 evicting dbufs.
79 .sp
80 Default value: \fB10\fR%.
81 .RE
82
83 .sp
84 .ne 2
85 .na
86 \fBdbuf_cache_shift\fR (int)
87 .ad
88 .RS 12n
89 Set the size of the dbuf cache, \fBdbuf_cache_max_bytes\fR, to a log2 fraction
90 of the target arc size.
91 .sp
92 Default value: \fB5\fR.
93 .RE
94
95 .sp
96 .ne 2
97 .na
98 \fBdbuf_metadata_cache_shift\fR (int)
99 .ad
100 .RS 12n
101 Set the size of the dbuf metadata cache, \fBdbuf_metadata_cache_max_bytes\fR,
102 to a log2 fraction of the target arc size.
103 .sp
104 Default value: \fB6\fR.
105 .RE
106
107 .sp
108 .ne 2
109 .na
110 \fBignore_hole_birth\fR (int)
111 .ad
112 .RS 12n
113 This is an alias for \fBsend_holes_without_birth_time\fR.
114 .RE
115
116 .sp
117 .ne 2
118 .na
119 \fBl2arc_feed_again\fR (int)
120 .ad
121 .RS 12n
122 Turbo L2ARC warm-up. When the L2ARC is cold the fill interval will be set as
123 fast as possible.
124 .sp
125 Use \fB1\fR for yes (default) and \fB0\fR to disable.
126 .RE
127
128 .sp
129 .ne 2
130 .na
131 \fBl2arc_feed_min_ms\fR (ulong)
132 .ad
133 .RS 12n
134 Min feed interval in milliseconds. Requires \fBl2arc_feed_again=1\fR and only
135 applicable in related situations.
136 .sp
137 Default value: \fB200\fR.
138 .RE
139
140 .sp
141 .ne 2
142 .na
143 \fBl2arc_feed_secs\fR (ulong)
144 .ad
145 .RS 12n
146 Seconds between L2ARC writing
147 .sp
148 Default value: \fB1\fR.
149 .RE
150
151 .sp
152 .ne 2
153 .na
154 \fBl2arc_headroom\fR (ulong)
155 .ad
156 .RS 12n
157 How far through the ARC lists to search for L2ARC cacheable content, expressed
158 as a multiplier of \fBl2arc_write_max\fR
159 .sp
160 Default value: \fB2\fR.
161 .RE
162
163 .sp
164 .ne 2
165 .na
166 \fBl2arc_headroom_boost\fR (ulong)
167 .ad
168 .RS 12n
169 Scales \fBl2arc_headroom\fR by this percentage when L2ARC contents are being
170 successfully compressed before writing. A value of 100 disables this feature.
171 .sp
172 Default value: \fB200\fR%.
173 .RE
174
175 .sp
176 .ne 2
177 .na
178 \fBl2arc_noprefetch\fR (int)
179 .ad
180 .RS 12n
181 Do not write buffers to L2ARC if they were prefetched but not used by
182 applications
183 .sp
184 Use \fB1\fR for yes (default) and \fB0\fR to disable.
185 .RE
186
187 .sp
188 .ne 2
189 .na
190 \fBl2arc_norw\fR (int)
191 .ad
192 .RS 12n
193 No reads during writes
194 .sp
195 Use \fB1\fR for yes and \fB0\fR for no (default).
196 .RE
197
198 .sp
199 .ne 2
200 .na
201 \fBl2arc_write_boost\fR (ulong)
202 .ad
203 .RS 12n
204 Cold L2ARC devices will have \fBl2arc_write_max\fR increased by this amount
205 while they remain cold.
206 .sp
207 Default value: \fB8,388,608\fR.
208 .RE
209
210 .sp
211 .ne 2
212 .na
213 \fBl2arc_write_max\fR (ulong)
214 .ad
215 .RS 12n
216 Max write bytes per interval
217 .sp
218 Default value: \fB8,388,608\fR.
219 .RE
220
221 .sp
222 .ne 2
223 .na
224 \fBmetaslab_aliquot\fR (ulong)
225 .ad
226 .RS 12n
227 Metaslab granularity, in bytes. This is roughly similar to what would be
228 referred to as the "stripe size" in traditional RAID arrays. In normal
229 operation, ZFS will try to write this amount of data to a top-level vdev
230 before moving on to the next one.
231 .sp
232 Default value: \fB524,288\fR.
233 .RE
234
235 .sp
236 .ne 2
237 .na
238 \fBmetaslab_bias_enabled\fR (int)
239 .ad
240 .RS 12n
241 Enable metaslab group biasing based on its vdev's over- or under-utilization
242 relative to the pool.
243 .sp
244 Use \fB1\fR for yes (default) and \fB0\fR for no.
245 .RE
246
247 .sp
248 .ne 2
249 .na
250 \fBmetaslab_force_ganging\fR (ulong)
251 .ad
252 .RS 12n
253 Make some blocks above a certain size be gang blocks. This option is used
254 by the test suite to facilitate testing.
255 .sp
256 Default value: \fB16,777,217\fR.
257 .RE
258
259 .sp
260 .ne 2
261 .na
262 \fBzfs_metaslab_segment_weight_enabled\fR (int)
263 .ad
264 .RS 12n
265 Enable/disable segment-based metaslab selection.
266 .sp
267 Use \fB1\fR for yes (default) and \fB0\fR for no.
268 .RE
269
270 .sp
271 .ne 2
272 .na
273 \fBzfs_metaslab_switch_threshold\fR (int)
274 .ad
275 .RS 12n
276 When using segment-based metaslab selection, continue allocating
277 from the active metaslab until \fBzfs_metaslab_switch_threshold\fR
278 worth of buckets have been exhausted.
279 .sp
280 Default value: \fB2\fR.
281 .RE
282
283 .sp
284 .ne 2
285 .na
286 \fBmetaslab_debug_load\fR (int)
287 .ad
288 .RS 12n
289 Load all metaslabs during pool import.
290 .sp
291 Use \fB1\fR for yes and \fB0\fR for no (default).
292 .RE
293
294 .sp
295 .ne 2
296 .na
297 \fBmetaslab_debug_unload\fR (int)
298 .ad
299 .RS 12n
300 Prevent metaslabs from being unloaded.
301 .sp
302 Use \fB1\fR for yes and \fB0\fR for no (default).
303 .RE
304
305 .sp
306 .ne 2
307 .na
308 \fBmetaslab_fragmentation_factor_enabled\fR (int)
309 .ad
310 .RS 12n
311 Enable use of the fragmentation metric in computing metaslab weights.
312 .sp
313 Use \fB1\fR for yes (default) and \fB0\fR for no.
314 .RE
315
316 .sp
317 .ne 2
318 .na
319 \fBzfs_vdev_default_ms_count\fR (int)
320 .ad
321 .RS 12n
322 When a vdev is added target this number of metaslabs per top-level vdev.
323 .sp
324 Default value: \fB200\fR.
325 .RE
326
327 .sp
328 .ne 2
329 .na
330 \fBzfs_vdev_min_ms_count\fR (int)
331 .ad
332 .RS 12n
333 Minimum number of metaslabs to create in a top-level vdev.
334 .sp
335 Default value: \fB16\fR.
336 .RE
337
338 .sp
339 .ne 2
340 .na
341 \fBvdev_ms_count_limit\fR (int)
342 .ad
343 .RS 12n
344 Practical upper limit of total metaslabs per top-level vdev.
345 .sp
346 Default value: \fB131,072\fR.
347 .RE
348
349 .sp
350 .ne 2
351 .na
352 \fBmetaslab_preload_enabled\fR (int)
353 .ad
354 .RS 12n
355 Enable metaslab group preloading.
356 .sp
357 Use \fB1\fR for yes (default) and \fB0\fR for no.
358 .RE
359
360 .sp
361 .ne 2
362 .na
363 \fBmetaslab_lba_weighting_enabled\fR (int)
364 .ad
365 .RS 12n
366 Give more weight to metaslabs with lower LBAs, assuming they have
367 greater bandwidth as is typically the case on a modern constant
368 angular velocity disk drive.
369 .sp
370 Use \fB1\fR for yes (default) and \fB0\fR for no.
371 .RE
372
373 .sp
374 .ne 2
375 .na
376 \fBsend_holes_without_birth_time\fR (int)
377 .ad
378 .RS 12n
379 When set, the hole_birth optimization will not be used, and all holes will
380 always be sent on zfs send. This is useful if you suspect your datasets are
381 affected by a bug in hole_birth.
382 .sp
383 Use \fB1\fR for on (default) and \fB0\fR for off.
384 .RE
385
386 .sp
387 .ne 2
388 .na
389 \fBspa_config_path\fR (charp)
390 .ad
391 .RS 12n
392 SPA config file
393 .sp
394 Default value: \fB/etc/zfs/zpool.cache\fR.
395 .RE
396
397 .sp
398 .ne 2
399 .na
400 \fBspa_asize_inflation\fR (int)
401 .ad
402 .RS 12n
403 Multiplication factor used to estimate actual disk consumption from the
404 size of data being written. The default value is a worst case estimate,
405 but lower values may be valid for a given pool depending on its
406 configuration. Pool administrators who understand the factors involved
407 may wish to specify a more realistic inflation factor, particularly if
408 they operate close to quota or capacity limits.
409 .sp
410 Default value: \fB24\fR.
411 .RE
412
413 .sp
414 .ne 2
415 .na
416 \fBspa_load_print_vdev_tree\fR (int)
417 .ad
418 .RS 12n
419 Whether to print the vdev tree in the debugging message buffer during pool import.
420 Use 0 to disable and 1 to enable.
421 .sp
422 Default value: \fB0\fR.
423 .RE
424
425 .sp
426 .ne 2
427 .na
428 \fBspa_load_verify_data\fR (int)
429 .ad
430 .RS 12n
431 Whether to traverse data blocks during an "extreme rewind" (\fB-X\fR)
432 import. Use 0 to disable and 1 to enable.
433
434 An extreme rewind import normally performs a full traversal of all
435 blocks in the pool for verification. If this parameter is set to 0,
436 the traversal skips non-metadata blocks. It can be toggled once the
437 import has started to stop or start the traversal of non-metadata blocks.
438 .sp
439 Default value: \fB1\fR.
440 .RE
441
442 .sp
443 .ne 2
444 .na
445 \fBspa_load_verify_metadata\fR (int)
446 .ad
447 .RS 12n
448 Whether to traverse blocks during an "extreme rewind" (\fB-X\fR)
449 pool import. Use 0 to disable and 1 to enable.
450
451 An extreme rewind import normally performs a full traversal of all
452 blocks in the pool for verification. If this parameter is set to 0,
453 the traversal is not performed. It can be toggled once the import has
454 started to stop or start the traversal.
455 .sp
456 Default value: \fB1\fR.
457 .RE
458
459 .sp
460 .ne 2
461 .na
462 \fBspa_load_verify_maxinflight\fR (int)
463 .ad
464 .RS 12n
465 Maximum concurrent I/Os during the traversal performed during an "extreme
466 rewind" (\fB-X\fR) pool import.
467 .sp
468 Default value: \fB10000\fR.
469 .RE
470
471 .sp
472 .ne 2
473 .na
474 \fBspa_slop_shift\fR (int)
475 .ad
476 .RS 12n
477 Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space
478 in the pool to be consumed. This ensures that we don't run the pool
479 completely out of space, due to unaccounted changes (e.g. to the MOS).
480 It also limits the worst-case time to allocate space. If we have
481 less than this amount of free space, most ZPL operations (e.g. write,
482 create) will return ENOSPC.
483 .sp
484 Default value: \fB5\fR.
485 .RE
486
487 .sp
488 .ne 2
489 .na
490 \fBvdev_removal_max_span\fR (int)
491 .ad
492 .RS 12n
493 During top-level vdev removal, chunks of data are copied from the vdev
494 which may include free space in order to trade bandwidth for IOPS.
495 This parameter determines the maximum span of free space (in bytes)
496 which will be included as "unnecessary" data in a chunk of copied data.
497
498 The default value here was chosen to align with
499 \fBzfs_vdev_read_gap_limit\fR, which is a similar concept when doing
500 regular reads (but there's no reason it has to be the same).
501 .sp
502 Default value: \fB32,768\fR.
503 .RE
504
505 .sp
506 .ne 2
507 .na
508 \fBzfetch_array_rd_sz\fR (ulong)
509 .ad
510 .RS 12n
511 If prefetching is enabled, disable prefetching for reads larger than this size.
512 .sp
513 Default value: \fB1,048,576\fR.
514 .RE
515
516 .sp
517 .ne 2
518 .na
519 \fBzfetch_max_distance\fR (uint)
520 .ad
521 .RS 12n
522 Max bytes to prefetch per stream (default 8MB).
523 .sp
524 Default value: \fB8,388,608\fR.
525 .RE
526
527 .sp
528 .ne 2
529 .na
530 \fBzfetch_max_streams\fR (uint)
531 .ad
532 .RS 12n
533 Max number of streams per zfetch (prefetch streams per file).
534 .sp
535 Default value: \fB8\fR.
536 .RE
537
538 .sp
539 .ne 2
540 .na
541 \fBzfetch_min_sec_reap\fR (uint)
542 .ad
543 .RS 12n
544 Min time before an active prefetch stream can be reclaimed
545 .sp
546 Default value: \fB2\fR.
547 .RE
548
549 .sp
550 .ne 2
551 .na
552 \fBzfs_abd_scatter_min_size\fR (uint)
553 .ad
554 .RS 12n
555 This is the minimum allocation size that will use scatter (page-based)
556 ABD's. Smaller allocations will use linear ABD's.
557 .sp
558 Default value: \fB1536\fR (512B and 1KB allocations will be linear).
559 .RE
560
561 .sp
562 .ne 2
563 .na
564 \fBzfs_arc_dnode_limit\fR (ulong)
565 .ad
566 .RS 12n
567 When the number of bytes consumed by dnodes in the ARC exceeds this number of
568 bytes, try to unpin some of it in response to demand for non-metadata. This
569 value acts as a ceiling to the amount of dnode metadata, and defaults to 0 which
570 indicates that a percent which is based on \fBzfs_arc_dnode_limit_percent\fR of
571 the ARC meta buffers that may be used for dnodes.
572
573 See also \fBzfs_arc_meta_prune\fR which serves a similar purpose but is used
574 when the amount of metadata in the ARC exceeds \fBzfs_arc_meta_limit\fR rather
575 than in response to overall demand for non-metadata.
576
577 .sp
578 Default value: \fB0\fR.
579 .RE
580
581 .sp
582 .ne 2
583 .na
584 \fBzfs_arc_dnode_limit_percent\fR (ulong)
585 .ad
586 .RS 12n
587 Percentage that can be consumed by dnodes of ARC meta buffers.
588 .sp
589 See also \fBzfs_arc_dnode_limit\fR which serves a similar purpose but has a
590 higher priority if set to nonzero value.
591 .sp
592 Default value: \fB10\fR%.
593 .RE
594
595 .sp
596 .ne 2
597 .na
598 \fBzfs_arc_dnode_reduce_percent\fR (ulong)
599 .ad
600 .RS 12n
601 Percentage of ARC dnodes to try to scan in response to demand for non-metadata
602 when the number of bytes consumed by dnodes exceeds \fBzfs_arc_dnode_limit\fR.
603
604 .sp
605 Default value: \fB10\fR% of the number of dnodes in the ARC.
606 .RE
607
608 .sp
609 .ne 2
610 .na
611 \fBzfs_arc_average_blocksize\fR (int)
612 .ad
613 .RS 12n
614 The ARC's buffer hash table is sized based on the assumption of an average
615 block size of \fBzfs_arc_average_blocksize\fR (default 8K). This works out
616 to roughly 1MB of hash table per 1GB of physical memory with 8-byte pointers.
617 For configurations with a known larger average block size this value can be
618 increased to reduce the memory footprint.
619
620 .sp
621 Default value: \fB8192\fR.
622 .RE
623
624 .sp
625 .ne 2
626 .na
627 \fBzfs_arc_evict_batch_limit\fR (int)
628 .ad
629 .RS 12n
630 Number ARC headers to evict per sub-list before proceeding to another sub-list.
631 This batch-style operation prevents entire sub-lists from being evicted at once
632 but comes at a cost of additional unlocking and locking.
633 .sp
634 Default value: \fB10\fR.
635 .RE
636
637 .sp
638 .ne 2
639 .na
640 \fBzfs_arc_grow_retry\fR (int)
641 .ad
642 .RS 12n
643 If set to a non zero value, it will replace the arc_grow_retry value with this value.
644 The arc_grow_retry value (default 5) is the number of seconds the ARC will wait before
645 trying to resume growth after a memory pressure event.
646 .sp
647 Default value: \fB0\fR.
648 .RE
649
650 .sp
651 .ne 2
652 .na
653 \fBzfs_arc_lotsfree_percent\fR (int)
654 .ad
655 .RS 12n
656 Throttle I/O when free system memory drops below this percentage of total
657 system memory. Setting this value to 0 will disable the throttle.
658 .sp
659 Default value: \fB10\fR%.
660 .RE
661
662 .sp
663 .ne 2
664 .na
665 \fBzfs_arc_max\fR (ulong)
666 .ad
667 .RS 12n
668 Max arc size of ARC in bytes. If set to 0 then it will consume 1/2 of system
669 RAM. This value must be at least 67108864 (64 megabytes).
670 .sp
671 This value can be changed dynamically with some caveats. It cannot be set back
672 to 0 while running and reducing it below the current ARC size will not cause
673 the ARC to shrink without memory pressure to induce shrinking.
674 .sp
675 Default value: \fB0\fR.
676 .RE
677
678 .sp
679 .ne 2
680 .na
681 \fBzfs_arc_meta_adjust_restarts\fR (ulong)
682 .ad
683 .RS 12n
684 The number of restart passes to make while scanning the ARC attempting
685 the free buffers in order to stay below the \fBzfs_arc_meta_limit\fR.
686 This value should not need to be tuned but is available to facilitate
687 performance analysis.
688 .sp
689 Default value: \fB4096\fR.
690 .RE
691
692 .sp
693 .ne 2
694 .na
695 \fBzfs_arc_meta_limit\fR (ulong)
696 .ad
697 .RS 12n
698 The maximum allowed size in bytes that meta data buffers are allowed to
699 consume in the ARC. When this limit is reached meta data buffers will
700 be reclaimed even if the overall arc_c_max has not been reached. This
701 value defaults to 0 which indicates that a percent which is based on
702 \fBzfs_arc_meta_limit_percent\fR of the ARC may be used for meta data.
703 .sp
704 This value my be changed dynamically except that it cannot be set back to 0
705 for a specific percent of the ARC; it must be set to an explicit value.
706 .sp
707 Default value: \fB0\fR.
708 .RE
709
710 .sp
711 .ne 2
712 .na
713 \fBzfs_arc_meta_limit_percent\fR (ulong)
714 .ad
715 .RS 12n
716 Percentage of ARC buffers that can be used for meta data.
717
718 See also \fBzfs_arc_meta_limit\fR which serves a similar purpose but has a
719 higher priority if set to nonzero value.
720
721 .sp
722 Default value: \fB75\fR%.
723 .RE
724
725 .sp
726 .ne 2
727 .na
728 \fBzfs_arc_meta_min\fR (ulong)
729 .ad
730 .RS 12n
731 The minimum allowed size in bytes that meta data buffers may consume in
732 the ARC. This value defaults to 0 which disables a floor on the amount
733 of the ARC devoted meta data.
734 .sp
735 Default value: \fB0\fR.
736 .RE
737
738 .sp
739 .ne 2
740 .na
741 \fBzfs_arc_meta_prune\fR (int)
742 .ad
743 .RS 12n
744 The number of dentries and inodes to be scanned looking for entries
745 which can be dropped. This may be required when the ARC reaches the
746 \fBzfs_arc_meta_limit\fR because dentries and inodes can pin buffers
747 in the ARC. Increasing this value will cause to dentry and inode caches
748 to be pruned more aggressively. Setting this value to 0 will disable
749 pruning the inode and dentry caches.
750 .sp
751 Default value: \fB10,000\fR.
752 .RE
753
754 .sp
755 .ne 2
756 .na
757 \fBzfs_arc_meta_strategy\fR (int)
758 .ad
759 .RS 12n
760 Define the strategy for ARC meta data buffer eviction (meta reclaim strategy).
761 A value of 0 (META_ONLY) will evict only the ARC meta data buffers.
762 A value of 1 (BALANCED) indicates that additional data buffers may be evicted if
763 that is required to in order to evict the required number of meta data buffers.
764 .sp
765 Default value: \fB1\fR.
766 .RE
767
768 .sp
769 .ne 2
770 .na
771 \fBzfs_arc_min\fR (ulong)
772 .ad
773 .RS 12n
774 Min arc size of ARC in bytes. If set to 0 then arc_c_min will default to
775 consuming the larger of 32M or 1/32 of total system memory.
776 .sp
777 Default value: \fB0\fR.
778 .RE
779
780 .sp
781 .ne 2
782 .na
783 \fBzfs_arc_min_prefetch_ms\fR (int)
784 .ad
785 .RS 12n
786 Minimum time prefetched blocks are locked in the ARC, specified in ms.
787 A value of \fB0\fR will default to 1000 ms.
788 .sp
789 Default value: \fB0\fR.
790 .RE
791
792 .sp
793 .ne 2
794 .na
795 \fBzfs_arc_min_prescient_prefetch_ms\fR (int)
796 .ad
797 .RS 12n
798 Minimum time "prescient prefetched" blocks are locked in the ARC, specified
799 in ms. These blocks are meant to be prefetched fairly aggresively ahead of
800 the code that may use them. A value of \fB0\fR will default to 6000 ms.
801 .sp
802 Default value: \fB0\fR.
803 .RE
804
805 .sp
806 .ne 2
807 .na
808 \fBzfs_max_missing_tvds\fR (int)
809 .ad
810 .RS 12n
811 Number of missing top-level vdevs which will be allowed during
812 pool import (only in read-only mode).
813 .sp
814 Default value: \fB0\fR
815 .RE
816
817 .sp
818 .ne 2
819 .na
820 \fBzfs_multilist_num_sublists\fR (int)
821 .ad
822 .RS 12n
823 To allow more fine-grained locking, each ARC state contains a series
824 of lists for both data and meta data objects. Locking is performed at
825 the level of these "sub-lists". This parameters controls the number of
826 sub-lists per ARC state, and also applies to other uses of the
827 multilist data structure.
828 .sp
829 Default value: \fB4\fR or the number of online CPUs, whichever is greater
830 .RE
831
832 .sp
833 .ne 2
834 .na
835 \fBzfs_arc_overflow_shift\fR (int)
836 .ad
837 .RS 12n
838 The ARC size is considered to be overflowing if it exceeds the current
839 ARC target size (arc_c) by a threshold determined by this parameter.
840 The threshold is calculated as a fraction of arc_c using the formula
841 "arc_c >> \fBzfs_arc_overflow_shift\fR".
842
843 The default value of 8 causes the ARC to be considered to be overflowing
844 if it exceeds the target size by 1/256th (0.3%) of the target size.
845
846 When the ARC is overflowing, new buffer allocations are stalled until
847 the reclaim thread catches up and the overflow condition no longer exists.
848 .sp
849 Default value: \fB8\fR.
850 .RE
851
852 .sp
853 .ne 2
854 .na
855
856 \fBzfs_arc_p_min_shift\fR (int)
857 .ad
858 .RS 12n
859 If set to a non zero value, this will update arc_p_min_shift (default 4)
860 with the new value.
861 arc_p_min_shift is used to shift of arc_c for calculating both min and max
862 max arc_p
863 .sp
864 Default value: \fB0\fR.
865 .RE
866
867 .sp
868 .ne 2
869 .na
870 \fBzfs_arc_p_dampener_disable\fR (int)
871 .ad
872 .RS 12n
873 Disable arc_p adapt dampener
874 .sp
875 Use \fB1\fR for yes (default) and \fB0\fR to disable.
876 .RE
877
878 .sp
879 .ne 2
880 .na
881 \fBzfs_arc_shrink_shift\fR (int)
882 .ad
883 .RS 12n
884 If set to a non zero value, this will update arc_shrink_shift (default 7)
885 with the new value.
886 .sp
887 Default value: \fB0\fR.
888 .RE
889
890 .sp
891 .ne 2
892 .na
893 \fBzfs_arc_pc_percent\fR (uint)
894 .ad
895 .RS 12n
896 Percent of pagecache to reclaim arc to
897
898 This tunable allows ZFS arc to play more nicely with the kernel's LRU
899 pagecache. It can guarantee that the arc size won't collapse under scanning
900 pressure on the pagecache, yet still allows arc to be reclaimed down to
901 zfs_arc_min if necessary. This value is specified as percent of pagecache
902 size (as measured by NR_FILE_PAGES) where that percent may exceed 100. This
903 only operates during memory pressure/reclaim.
904 .sp
905 Default value: \fB0\fR% (disabled).
906 .RE
907
908 .sp
909 .ne 2
910 .na
911 \fBzfs_arc_sys_free\fR (ulong)
912 .ad
913 .RS 12n
914 The target number of bytes the ARC should leave as free memory on the system.
915 Defaults to the larger of 1/64 of physical memory or 512K. Setting this
916 option to a non-zero value will override the default.
917 .sp
918 Default value: \fB0\fR.
919 .RE
920
921 .sp
922 .ne 2
923 .na
924 \fBzfs_autoimport_disable\fR (int)
925 .ad
926 .RS 12n
927 Disable pool import at module load by ignoring the cache file (typically \fB/etc/zfs/zpool.cache\fR).
928 .sp
929 Use \fB1\fR for yes (default) and \fB0\fR for no.
930 .RE
931
932 .sp
933 .ne 2
934 .na
935 \fBzfs_checksums_per_second\fR (int)
936 .ad
937 .RS 12n
938 Rate limit checksum events to this many per second. Note that this should
939 not be set below the zed thresholds (currently 10 checksums over 10 sec)
940 or else zed may not trigger any action.
941 .sp
942 Default value: 20
943 .RE
944
945 .sp
946 .ne 2
947 .na
948 \fBzfs_commit_timeout_pct\fR (int)
949 .ad
950 .RS 12n
951 This controls the amount of time that a ZIL block (lwb) will remain "open"
952 when it isn't "full", and it has a thread waiting for it to be committed to
953 stable storage. The timeout is scaled based on a percentage of the last lwb
954 latency to avoid significantly impacting the latency of each individual
955 transaction record (itx).
956 .sp
957 Default value: \fB5\fR%.
958 .RE
959
960 .sp
961 .ne 2
962 .na
963 \fBzfs_condense_indirect_vdevs_enable\fR (int)
964 .ad
965 .RS 12n
966 Enable condensing indirect vdev mappings. When set to a non-zero value,
967 attempt to condense indirect vdev mappings if the mapping uses more than
968 \fBzfs_condense_min_mapping_bytes\fR bytes of memory and if the obsolete
969 space map object uses more than \fBzfs_condense_max_obsolete_bytes\fR
970 bytes on-disk. The condensing process is an attempt to save memory by
971 removing obsolete mappings.
972 .sp
973 Default value: \fB1\fR.
974 .RE
975
976 .sp
977 .ne 2
978 .na
979 \fBzfs_condense_max_obsolete_bytes\fR (ulong)
980 .ad
981 .RS 12n
982 Only attempt to condense indirect vdev mappings if the on-disk size
983 of the obsolete space map object is greater than this number of bytes
984 (see \fBfBzfs_condense_indirect_vdevs_enable\fR).
985 .sp
986 Default value: \fB1,073,741,824\fR.
987 .RE
988
989 .sp
990 .ne 2
991 .na
992 \fBzfs_condense_min_mapping_bytes\fR (ulong)
993 .ad
994 .RS 12n
995 Minimum size vdev mapping to attempt to condense (see
996 \fBzfs_condense_indirect_vdevs_enable\fR).
997 .sp
998 Default value: \fB131,072\fR.
999 .RE
1000
1001 .sp
1002 .ne 2
1003 .na
1004 \fBzfs_dbgmsg_enable\fR (int)
1005 .ad
1006 .RS 12n
1007 Internally ZFS keeps a small log to facilitate debugging. By default the log
1008 is disabled, to enable it set this option to 1. The contents of the log can
1009 be accessed by reading the /proc/spl/kstat/zfs/dbgmsg file. Writing 0 to
1010 this proc file clears the log.
1011 .sp
1012 Default value: \fB0\fR.
1013 .RE
1014
1015 .sp
1016 .ne 2
1017 .na
1018 \fBzfs_dbgmsg_maxsize\fR (int)
1019 .ad
1020 .RS 12n
1021 The maximum size in bytes of the internal ZFS debug log.
1022 .sp
1023 Default value: \fB4M\fR.
1024 .RE
1025
1026 .sp
1027 .ne 2
1028 .na
1029 \fBzfs_dbuf_state_index\fR (int)
1030 .ad
1031 .RS 12n
1032 This feature is currently unused. It is normally used for controlling what
1033 reporting is available under /proc/spl/kstat/zfs.
1034 .sp
1035 Default value: \fB0\fR.
1036 .RE
1037
1038 .sp
1039 .ne 2
1040 .na
1041 \fBzfs_deadman_enabled\fR (int)
1042 .ad
1043 .RS 12n
1044 When a pool sync operation takes longer than \fBzfs_deadman_synctime_ms\fR
1045 milliseconds, or when an individual I/O takes longer than
1046 \fBzfs_deadman_ziotime_ms\fR milliseconds, then the operation is considered to
1047 be "hung". If \fBzfs_deadman_enabled\fR is set then the deadman behavior is
1048 invoked as described by the \fBzfs_deadman_failmode\fR module option.
1049 By default the deadman is enabled and configured to \fBwait\fR which results
1050 in "hung" I/Os only being logged. The deadman is automatically disabled
1051 when a pool gets suspended.
1052 .sp
1053 Default value: \fB1\fR.
1054 .RE
1055
1056 .sp
1057 .ne 2
1058 .na
1059 \fBzfs_deadman_failmode\fR (charp)
1060 .ad
1061 .RS 12n
1062 Controls the failure behavior when the deadman detects a "hung" I/O. Valid
1063 values are \fBwait\fR, \fBcontinue\fR, and \fBpanic\fR.
1064 .sp
1065 \fBwait\fR - Wait for a "hung" I/O to complete. For each "hung" I/O a
1066 "deadman" event will be posted describing that I/O.
1067 .sp
1068 \fBcontinue\fR - Attempt to recover from a "hung" I/O by re-dispatching it
1069 to the I/O pipeline if possible.
1070 .sp
1071 \fBpanic\fR - Panic the system. This can be used to facilitate an automatic
1072 fail-over to a properly configured fail-over partner.
1073 .sp
1074 Default value: \fBwait\fR.
1075 .RE
1076
1077 .sp
1078 .ne 2
1079 .na
1080 \fBzfs_deadman_checktime_ms\fR (int)
1081 .ad
1082 .RS 12n
1083 Check time in milliseconds. This defines the frequency at which we check
1084 for hung I/O and potentially invoke the \fBzfs_deadman_failmode\fR behavior.
1085 .sp
1086 Default value: \fB60,000\fR.
1087 .RE
1088
1089 .sp
1090 .ne 2
1091 .na
1092 \fBzfs_deadman_synctime_ms\fR (ulong)
1093 .ad
1094 .RS 12n
1095 Interval in milliseconds after which the deadman is triggered and also
1096 the interval after which a pool sync operation is considered to be "hung".
1097 Once this limit is exceeded the deadman will be invoked every
1098 \fBzfs_deadman_checktime_ms\fR milliseconds until the pool sync completes.
1099 .sp
1100 Default value: \fB600,000\fR.
1101 .RE
1102
1103 .sp
1104 .ne 2
1105 .na
1106 \fBzfs_deadman_ziotime_ms\fR (ulong)
1107 .ad
1108 .RS 12n
1109 Interval in milliseconds after which the deadman is triggered and an
1110 individual I/O operation is considered to be "hung". As long as the I/O
1111 remains "hung" the deadman will be invoked every \fBzfs_deadman_checktime_ms\fR
1112 milliseconds until the I/O completes.
1113 .sp
1114 Default value: \fB300,000\fR.
1115 .RE
1116
1117 .sp
1118 .ne 2
1119 .na
1120 \fBzfs_dedup_prefetch\fR (int)
1121 .ad
1122 .RS 12n
1123 Enable prefetching dedup-ed blks
1124 .sp
1125 Use \fB1\fR for yes and \fB0\fR to disable (default).
1126 .RE
1127
1128 .sp
1129 .ne 2
1130 .na
1131 \fBzfs_delay_min_dirty_percent\fR (int)
1132 .ad
1133 .RS 12n
1134 Start to delay each transaction once there is this amount of dirty data,
1135 expressed as a percentage of \fBzfs_dirty_data_max\fR.
1136 This value should be >= zfs_vdev_async_write_active_max_dirty_percent.
1137 See the section "ZFS TRANSACTION DELAY".
1138 .sp
1139 Default value: \fB60\fR%.
1140 .RE
1141
1142 .sp
1143 .ne 2
1144 .na
1145 \fBzfs_delay_scale\fR (int)
1146 .ad
1147 .RS 12n
1148 This controls how quickly the transaction delay approaches infinity.
1149 Larger values cause longer delays for a given amount of dirty data.
1150 .sp
1151 For the smoothest delay, this value should be about 1 billion divided
1152 by the maximum number of operations per second. This will smoothly
1153 handle between 10x and 1/10th this number.
1154 .sp
1155 See the section "ZFS TRANSACTION DELAY".
1156 .sp
1157 Note: \fBzfs_delay_scale\fR * \fBzfs_dirty_data_max\fR must be < 2^64.
1158 .sp
1159 Default value: \fB500,000\fR.
1160 .RE
1161
1162 .sp
1163 .ne 2
1164 .na
1165 \fBzfs_slow_io_events_per_second\fR (int)
1166 .ad
1167 .RS 12n
1168 Rate limit delay zevents (which report slow I/Os) to this many per second.
1169 .sp
1170 Default value: 20
1171 .RE
1172
1173 .sp
1174 .ne 2
1175 .na
1176 \fBzfs_unlink_suspend_progress\fR (uint)
1177 .ad
1178 .RS 12n
1179 When enabled, files will not be asynchronously removed from the list of pending
1180 unlinks and the space they consume will be leaked. Once this option has been
1181 disabled and the dataset is remounted, the pending unlinks will be processed
1182 and the freed space returned to the pool.
1183 This option is used by the test suite to facilitate testing.
1184 .sp
1185 Uses \fB0\fR (default) to allow progress and \fB1\fR to pause progress.
1186 .RE
1187
1188 .sp
1189 .ne 2
1190 .na
1191 \fBzfs_delete_blocks\fR (ulong)
1192 .ad
1193 .RS 12n
1194 This is the used to define a large file for the purposes of delete. Files
1195 containing more than \fBzfs_delete_blocks\fR will be deleted asynchronously
1196 while smaller files are deleted synchronously. Decreasing this value will
1197 reduce the time spent in an unlink(2) system call at the expense of a longer
1198 delay before the freed space is available.
1199 .sp
1200 Default value: \fB20,480\fR.
1201 .RE
1202
1203 .sp
1204 .ne 2
1205 .na
1206 \fBzfs_dirty_data_max\fR (int)
1207 .ad
1208 .RS 12n
1209 Determines the dirty space limit in bytes. Once this limit is exceeded, new
1210 writes are halted until space frees up. This parameter takes precedence
1211 over \fBzfs_dirty_data_max_percent\fR.
1212 See the section "ZFS TRANSACTION DELAY".
1213 .sp
1214 Default value: \fB10\fR% of physical RAM, capped at \fBzfs_dirty_data_max_max\fR.
1215 .RE
1216
1217 .sp
1218 .ne 2
1219 .na
1220 \fBzfs_dirty_data_max_max\fR (int)
1221 .ad
1222 .RS 12n
1223 Maximum allowable value of \fBzfs_dirty_data_max\fR, expressed in bytes.
1224 This limit is only enforced at module load time, and will be ignored if
1225 \fBzfs_dirty_data_max\fR is later changed. This parameter takes
1226 precedence over \fBzfs_dirty_data_max_max_percent\fR. See the section
1227 "ZFS TRANSACTION DELAY".
1228 .sp
1229 Default value: \fB25\fR% of physical RAM.
1230 .RE
1231
1232 .sp
1233 .ne 2
1234 .na
1235 \fBzfs_dirty_data_max_max_percent\fR (int)
1236 .ad
1237 .RS 12n
1238 Maximum allowable value of \fBzfs_dirty_data_max\fR, expressed as a
1239 percentage of physical RAM. This limit is only enforced at module load
1240 time, and will be ignored if \fBzfs_dirty_data_max\fR is later changed.
1241 The parameter \fBzfs_dirty_data_max_max\fR takes precedence over this
1242 one. See the section "ZFS TRANSACTION DELAY".
1243 .sp
1244 Default value: \fB25\fR%.
1245 .RE
1246
1247 .sp
1248 .ne 2
1249 .na
1250 \fBzfs_dirty_data_max_percent\fR (int)
1251 .ad
1252 .RS 12n
1253 Determines the dirty space limit, expressed as a percentage of all
1254 memory. Once this limit is exceeded, new writes are halted until space frees
1255 up. The parameter \fBzfs_dirty_data_max\fR takes precedence over this
1256 one. See the section "ZFS TRANSACTION DELAY".
1257 .sp
1258 Default value: \fB10\fR%, subject to \fBzfs_dirty_data_max_max\fR.
1259 .RE
1260
1261 .sp
1262 .ne 2
1263 .na
1264 \fBzfs_dirty_data_sync_percent\fR (int)
1265 .ad
1266 .RS 12n
1267 Start syncing out a transaction group if there's at least this much dirty data
1268 as a percentage of \fBzfs_dirty_data_max\fR. This should be less than
1269 \fBzfs_vdev_async_write_active_min_dirty_percent\fR.
1270 .sp
1271 Default value: \fB20\fR% of \fBzfs_dirty_data_max\fR.
1272 .RE
1273
1274 .sp
1275 .ne 2
1276 .na
1277 \fBzfs_fletcher_4_impl\fR (string)
1278 .ad
1279 .RS 12n
1280 Select a fletcher 4 implementation.
1281 .sp
1282 Supported selectors are: \fBfastest\fR, \fBscalar\fR, \fBsse2\fR, \fBssse3\fR,
1283 \fBavx2\fR, \fBavx512f\fR, and \fBaarch64_neon\fR.
1284 All of the selectors except \fBfastest\fR and \fBscalar\fR require instruction
1285 set extensions to be available and will only appear if ZFS detects that they are
1286 present at runtime. If multiple implementations of fletcher 4 are available,
1287 the \fBfastest\fR will be chosen using a micro benchmark. Selecting \fBscalar\fR
1288 results in the original, CPU based calculation, being used. Selecting any option
1289 other than \fBfastest\fR and \fBscalar\fR results in vector instructions from
1290 the respective CPU instruction set being used.
1291 .sp
1292 Default value: \fBfastest\fR.
1293 .RE
1294
1295 .sp
1296 .ne 2
1297 .na
1298 \fBzfs_free_bpobj_enabled\fR (int)
1299 .ad
1300 .RS 12n
1301 Enable/disable the processing of the free_bpobj object.
1302 .sp
1303 Default value: \fB1\fR.
1304 .RE
1305
1306 .sp
1307 .ne 2
1308 .na
1309 \fBzfs_async_block_max_blocks\fR (ulong)
1310 .ad
1311 .RS 12n
1312 Maximum number of blocks freed in a single txg.
1313 .sp
1314 Default value: \fB100,000\fR.
1315 .RE
1316
1317 .sp
1318 .ne 2
1319 .na
1320 \fBzfs_override_estimate_recordsize\fR (ulong)
1321 .ad
1322 .RS 12n
1323 Record size calculation override for zfs send estimates.
1324 .sp
1325 Default value: \fB0\fR.
1326 .RE
1327
1328 .sp
1329 .ne 2
1330 .na
1331 \fBzfs_vdev_async_read_max_active\fR (int)
1332 .ad
1333 .RS 12n
1334 Maximum asynchronous read I/Os active to each device.
1335 See the section "ZFS I/O SCHEDULER".
1336 .sp
1337 Default value: \fB3\fR.
1338 .RE
1339
1340 .sp
1341 .ne 2
1342 .na
1343 \fBzfs_vdev_async_read_min_active\fR (int)
1344 .ad
1345 .RS 12n
1346 Minimum asynchronous read I/Os active to each device.
1347 See the section "ZFS I/O SCHEDULER".
1348 .sp
1349 Default value: \fB1\fR.
1350 .RE
1351
1352 .sp
1353 .ne 2
1354 .na
1355 \fBzfs_vdev_async_write_active_max_dirty_percent\fR (int)
1356 .ad
1357 .RS 12n
1358 When the pool has more than
1359 \fBzfs_vdev_async_write_active_max_dirty_percent\fR dirty data, use
1360 \fBzfs_vdev_async_write_max_active\fR to limit active async writes. If
1361 the dirty data is between min and max, the active I/O limit is linearly
1362 interpolated. See the section "ZFS I/O SCHEDULER".
1363 .sp
1364 Default value: \fB60\fR%.
1365 .RE
1366
1367 .sp
1368 .ne 2
1369 .na
1370 \fBzfs_vdev_async_write_active_min_dirty_percent\fR (int)
1371 .ad
1372 .RS 12n
1373 When the pool has less than
1374 \fBzfs_vdev_async_write_active_min_dirty_percent\fR dirty data, use
1375 \fBzfs_vdev_async_write_min_active\fR to limit active async writes. If
1376 the dirty data is between min and max, the active I/O limit is linearly
1377 interpolated. See the section "ZFS I/O SCHEDULER".
1378 .sp
1379 Default value: \fB30\fR%.
1380 .RE
1381
1382 .sp
1383 .ne 2
1384 .na
1385 \fBzfs_vdev_async_write_max_active\fR (int)
1386 .ad
1387 .RS 12n
1388 Maximum asynchronous write I/Os active to each device.
1389 See the section "ZFS I/O SCHEDULER".
1390 .sp
1391 Default value: \fB10\fR.
1392 .RE
1393
1394 .sp
1395 .ne 2
1396 .na
1397 \fBzfs_vdev_async_write_min_active\fR (int)
1398 .ad
1399 .RS 12n
1400 Minimum asynchronous write I/Os active to each device.
1401 See the section "ZFS I/O SCHEDULER".
1402 .sp
1403 Lower values are associated with better latency on rotational media but poorer
1404 resilver performance. The default value of 2 was chosen as a compromise. A
1405 value of 3 has been shown to improve resilver performance further at a cost of
1406 further increasing latency.
1407 .sp
1408 Default value: \fB2\fR.
1409 .RE
1410
1411 .sp
1412 .ne 2
1413 .na
1414 \fBzfs_vdev_initializing_max_active\fR (int)
1415 .ad
1416 .RS 12n
1417 Maximum initializing I/Os active to each device.
1418 See the section "ZFS I/O SCHEDULER".
1419 .sp
1420 Default value: \fB1\fR.
1421 .RE
1422
1423 .sp
1424 .ne 2
1425 .na
1426 \fBzfs_vdev_initializing_min_active\fR (int)
1427 .ad
1428 .RS 12n
1429 Minimum initializing I/Os active to each device.
1430 See the section "ZFS I/O SCHEDULER".
1431 .sp
1432 Default value: \fB1\fR.
1433 .RE
1434
1435 .sp
1436 .ne 2
1437 .na
1438 \fBzfs_vdev_max_active\fR (int)
1439 .ad
1440 .RS 12n
1441 The maximum number of I/Os active to each device. Ideally, this will be >=
1442 the sum of each queue's max_active. It must be at least the sum of each
1443 queue's min_active. See the section "ZFS I/O SCHEDULER".
1444 .sp
1445 Default value: \fB1,000\fR.
1446 .RE
1447
1448 .sp
1449 .ne 2
1450 .na
1451 \fBzfs_vdev_removal_max_active\fR (int)
1452 .ad
1453 .RS 12n
1454 Maximum removal I/Os active to each device.
1455 See the section "ZFS I/O SCHEDULER".
1456 .sp
1457 Default value: \fB2\fR.
1458 .RE
1459
1460 .sp
1461 .ne 2
1462 .na
1463 \fBzfs_vdev_removal_min_active\fR (int)
1464 .ad
1465 .RS 12n
1466 Minimum removal I/Os active to each device.
1467 See the section "ZFS I/O SCHEDULER".
1468 .sp
1469 Default value: \fB1\fR.
1470 .RE
1471
1472 .sp
1473 .ne 2
1474 .na
1475 \fBzfs_vdev_scrub_max_active\fR (int)
1476 .ad
1477 .RS 12n
1478 Maximum scrub I/Os active to each device.
1479 See the section "ZFS I/O SCHEDULER".
1480 .sp
1481 Default value: \fB2\fR.
1482 .RE
1483
1484 .sp
1485 .ne 2
1486 .na
1487 \fBzfs_vdev_scrub_min_active\fR (int)
1488 .ad
1489 .RS 12n
1490 Minimum scrub I/Os active to each device.
1491 See the section "ZFS I/O SCHEDULER".
1492 .sp
1493 Default value: \fB1\fR.
1494 .RE
1495
1496 .sp
1497 .ne 2
1498 .na
1499 \fBzfs_vdev_sync_read_max_active\fR (int)
1500 .ad
1501 .RS 12n
1502 Maximum synchronous read I/Os active to each device.
1503 See the section "ZFS I/O SCHEDULER".
1504 .sp
1505 Default value: \fB10\fR.
1506 .RE
1507
1508 .sp
1509 .ne 2
1510 .na
1511 \fBzfs_vdev_sync_read_min_active\fR (int)
1512 .ad
1513 .RS 12n
1514 Minimum synchronous read I/Os active to each device.
1515 See the section "ZFS I/O SCHEDULER".
1516 .sp
1517 Default value: \fB10\fR.
1518 .RE
1519
1520 .sp
1521 .ne 2
1522 .na
1523 \fBzfs_vdev_sync_write_max_active\fR (int)
1524 .ad
1525 .RS 12n
1526 Maximum synchronous write I/Os active to each device.
1527 See the section "ZFS I/O SCHEDULER".
1528 .sp
1529 Default value: \fB10\fR.
1530 .RE
1531
1532 .sp
1533 .ne 2
1534 .na
1535 \fBzfs_vdev_sync_write_min_active\fR (int)
1536 .ad
1537 .RS 12n
1538 Minimum synchronous write I/Os active to each device.
1539 See the section "ZFS I/O SCHEDULER".
1540 .sp
1541 Default value: \fB10\fR.
1542 .RE
1543
1544 .sp
1545 .ne 2
1546 .na
1547 \fBzfs_vdev_trim_max_active\fR (int)
1548 .ad
1549 .RS 12n
1550 Maximum trim/discard I/Os active to each device.
1551 See the section "ZFS I/O SCHEDULER".
1552 .sp
1553 Default value: \fB2\fR.
1554 .RE
1555
1556 .sp
1557 .ne 2
1558 .na
1559 \fBzfs_vdev_trim_min_active\fR (int)
1560 .ad
1561 .RS 12n
1562 Minimum trim/discard I/Os active to each device.
1563 See the section "ZFS I/O SCHEDULER".
1564 .sp
1565 Default value: \fB1\fR.
1566 .RE
1567
1568 .sp
1569 .ne 2
1570 .na
1571 \fBzfs_vdev_queue_depth_pct\fR (int)
1572 .ad
1573 .RS 12n
1574 Maximum number of queued allocations per top-level vdev expressed as
1575 a percentage of \fBzfs_vdev_async_write_max_active\fR which allows the
1576 system to detect devices that are more capable of handling allocations
1577 and to allocate more blocks to those devices. It allows for dynamic
1578 allocation distribution when devices are imbalanced as fuller devices
1579 will tend to be slower than empty devices.
1580
1581 See also \fBzio_dva_throttle_enabled\fR.
1582 .sp
1583 Default value: \fB1000\fR%.
1584 .RE
1585
1586 .sp
1587 .ne 2
1588 .na
1589 \fBzfs_expire_snapshot\fR (int)
1590 .ad
1591 .RS 12n
1592 Seconds to expire .zfs/snapshot
1593 .sp
1594 Default value: \fB300\fR.
1595 .RE
1596
1597 .sp
1598 .ne 2
1599 .na
1600 \fBzfs_admin_snapshot\fR (int)
1601 .ad
1602 .RS 12n
1603 Allow the creation, removal, or renaming of entries in the .zfs/snapshot
1604 directory to cause the creation, destruction, or renaming of snapshots.
1605 When enabled this functionality works both locally and over NFS exports
1606 which have the 'no_root_squash' option set. This functionality is disabled
1607 by default.
1608 .sp
1609 Use \fB1\fR for yes and \fB0\fR for no (default).
1610 .RE
1611
1612 .sp
1613 .ne 2
1614 .na
1615 \fBzfs_flags\fR (int)
1616 .ad
1617 .RS 12n
1618 Set additional debugging flags. The following flags may be bitwise-or'd
1619 together.
1620 .sp
1621 .TS
1622 box;
1623 rB lB
1624 lB lB
1625 r l.
1626 Value Symbolic Name
1627 Description
1628 _
1629 1 ZFS_DEBUG_DPRINTF
1630 Enable dprintf entries in the debug log.
1631 _
1632 2 ZFS_DEBUG_DBUF_VERIFY *
1633 Enable extra dbuf verifications.
1634 _
1635 4 ZFS_DEBUG_DNODE_VERIFY *
1636 Enable extra dnode verifications.
1637 _
1638 8 ZFS_DEBUG_SNAPNAMES
1639 Enable snapshot name verification.
1640 _
1641 16 ZFS_DEBUG_MODIFY
1642 Check for illegally modified ARC buffers.
1643 _
1644 64 ZFS_DEBUG_ZIO_FREE
1645 Enable verification of block frees.
1646 _
1647 128 ZFS_DEBUG_HISTOGRAM_VERIFY
1648 Enable extra spacemap histogram verifications.
1649 _
1650 256 ZFS_DEBUG_METASLAB_VERIFY
1651 Verify space accounting on disk matches in-core range_trees.
1652 _
1653 512 ZFS_DEBUG_SET_ERROR
1654 Enable SET_ERROR and dprintf entries in the debug log.
1655 _
1656 1024 ZFS_DEBUG_INDIRECT_REMAP
1657 Verify split blocks created by device removal.
1658 _
1659 2048 ZFS_DEBUG_TRIM
1660 Verify TRIM ranges are always within the allocatable range tree.
1661 .TE
1662 .sp
1663 * Requires debug build.
1664 .sp
1665 Default value: \fB0\fR.
1666 .RE
1667
1668 .sp
1669 .ne 2
1670 .na
1671 \fBzfs_free_leak_on_eio\fR (int)
1672 .ad
1673 .RS 12n
1674 If destroy encounters an EIO while reading metadata (e.g. indirect
1675 blocks), space referenced by the missing metadata can not be freed.
1676 Normally this causes the background destroy to become "stalled", as
1677 it is unable to make forward progress. While in this stalled state,
1678 all remaining space to free from the error-encountering filesystem is
1679 "temporarily leaked". Set this flag to cause it to ignore the EIO,
1680 permanently leak the space from indirect blocks that can not be read,
1681 and continue to free everything else that it can.
1682
1683 The default, "stalling" behavior is useful if the storage partially
1684 fails (i.e. some but not all i/os fail), and then later recovers. In
1685 this case, we will be able to continue pool operations while it is
1686 partially failed, and when it recovers, we can continue to free the
1687 space, with no leaks. However, note that this case is actually
1688 fairly rare.
1689
1690 Typically pools either (a) fail completely (but perhaps temporarily,
1691 e.g. a top-level vdev going offline), or (b) have localized,
1692 permanent errors (e.g. disk returns the wrong data due to bit flip or
1693 firmware bug). In case (a), this setting does not matter because the
1694 pool will be suspended and the sync thread will not be able to make
1695 forward progress regardless. In case (b), because the error is
1696 permanent, the best we can do is leak the minimum amount of space,
1697 which is what setting this flag will do. Therefore, it is reasonable
1698 for this flag to normally be set, but we chose the more conservative
1699 approach of not setting it, so that there is no possibility of
1700 leaking space in the "partial temporary" failure case.
1701 .sp
1702 Default value: \fB0\fR.
1703 .RE
1704
1705 .sp
1706 .ne 2
1707 .na
1708 \fBzfs_free_min_time_ms\fR (int)
1709 .ad
1710 .RS 12n
1711 During a \fBzfs destroy\fR operation using \fBfeature@async_destroy\fR a minimum
1712 of this much time will be spent working on freeing blocks per txg.
1713 .sp
1714 Default value: \fB1,000\fR.
1715 .RE
1716
1717 .sp
1718 .ne 2
1719 .na
1720 \fBzfs_immediate_write_sz\fR (long)
1721 .ad
1722 .RS 12n
1723 Largest data block to write to zil. Larger blocks will be treated as if the
1724 dataset being written to had the property setting \fBlogbias=throughput\fR.
1725 .sp
1726 Default value: \fB32,768\fR.
1727 .RE
1728
1729 .sp
1730 .ne 2
1731 .na
1732 \fBzfs_initialize_value\fR (ulong)
1733 .ad
1734 .RS 12n
1735 Pattern written to vdev free space by \fBzpool initialize\fR.
1736 .sp
1737 Default value: \fB16,045,690,984,833,335,022\fR (0xdeadbeefdeadbeee).
1738 .RE
1739
1740 .sp
1741 .ne 2
1742 .na
1743 \fBzfs_lua_max_instrlimit\fR (ulong)
1744 .ad
1745 .RS 12n
1746 The maximum execution time limit that can be set for a ZFS channel program,
1747 specified as a number of Lua instructions.
1748 .sp
1749 Default value: \fB100,000,000\fR.
1750 .RE
1751
1752 .sp
1753 .ne 2
1754 .na
1755 \fBzfs_lua_max_memlimit\fR (ulong)
1756 .ad
1757 .RS 12n
1758 The maximum memory limit that can be set for a ZFS channel program, specified
1759 in bytes.
1760 .sp
1761 Default value: \fB104,857,600\fR.
1762 .RE
1763
1764 .sp
1765 .ne 2
1766 .na
1767 \fBzfs_max_dataset_nesting\fR (int)
1768 .ad
1769 .RS 12n
1770 The maximum depth of nested datasets. This value can be tuned temporarily to
1771 fix existing datasets that exceed the predefined limit.
1772 .sp
1773 Default value: \fB50\fR.
1774 .RE
1775
1776 .sp
1777 .ne 2
1778 .na
1779 \fBzfs_max_recordsize\fR (int)
1780 .ad
1781 .RS 12n
1782 We currently support block sizes from 512 bytes to 16MB. The benefits of
1783 larger blocks, and thus larger I/O, need to be weighed against the cost of
1784 COWing a giant block to modify one byte. Additionally, very large blocks
1785 can have an impact on i/o latency, and also potentially on the memory
1786 allocator. Therefore, we do not allow the recordsize to be set larger than
1787 zfs_max_recordsize (default 1MB). Larger blocks can be created by changing
1788 this tunable, and pools with larger blocks can always be imported and used,
1789 regardless of this setting.
1790 .sp
1791 Default value: \fB1,048,576\fR.
1792 .RE
1793
1794 .sp
1795 .ne 2
1796 .na
1797 \fBzfs_metaslab_fragmentation_threshold\fR (int)
1798 .ad
1799 .RS 12n
1800 Allow metaslabs to keep their active state as long as their fragmentation
1801 percentage is less than or equal to this value. An active metaslab that
1802 exceeds this threshold will no longer keep its active status allowing
1803 better metaslabs to be selected.
1804 .sp
1805 Default value: \fB70\fR.
1806 .RE
1807
1808 .sp
1809 .ne 2
1810 .na
1811 \fBzfs_mg_fragmentation_threshold\fR (int)
1812 .ad
1813 .RS 12n
1814 Metaslab groups are considered eligible for allocations if their
1815 fragmentation metric (measured as a percentage) is less than or equal to
1816 this value. If a metaslab group exceeds this threshold then it will be
1817 skipped unless all metaslab groups within the metaslab class have also
1818 crossed this threshold.
1819 .sp
1820 Default value: \fB85\fR.
1821 .RE
1822
1823 .sp
1824 .ne 2
1825 .na
1826 \fBzfs_mg_noalloc_threshold\fR (int)
1827 .ad
1828 .RS 12n
1829 Defines a threshold at which metaslab groups should be eligible for
1830 allocations. The value is expressed as a percentage of free space
1831 beyond which a metaslab group is always eligible for allocations.
1832 If a metaslab group's free space is less than or equal to the
1833 threshold, the allocator will avoid allocating to that group
1834 unless all groups in the pool have reached the threshold. Once all
1835 groups have reached the threshold, all groups are allowed to accept
1836 allocations. The default value of 0 disables the feature and causes
1837 all metaslab groups to be eligible for allocations.
1838
1839 This parameter allows one to deal with pools having heavily imbalanced
1840 vdevs such as would be the case when a new vdev has been added.
1841 Setting the threshold to a non-zero percentage will stop allocations
1842 from being made to vdevs that aren't filled to the specified percentage
1843 and allow lesser filled vdevs to acquire more allocations than they
1844 otherwise would under the old \fBzfs_mg_alloc_failures\fR facility.
1845 .sp
1846 Default value: \fB0\fR.
1847 .RE
1848
1849 .sp
1850 .ne 2
1851 .na
1852 \fBzfs_ddt_data_is_special\fR (int)
1853 .ad
1854 .RS 12n
1855 If enabled, ZFS will place DDT data into the special allocation class.
1856 .sp
1857 Default value: \fB1\fR.
1858 .RE
1859
1860 .sp
1861 .ne 2
1862 .na
1863 \fBzfs_user_indirect_is_special\fR (int)
1864 .ad
1865 .RS 12n
1866 If enabled, ZFS will place user data (both file and zvol) indirect blocks
1867 into the special allocation class.
1868 .sp
1869 Default value: \fB1\fR.
1870 .RE
1871
1872 .sp
1873 .ne 2
1874 .na
1875 \fBzfs_multihost_history\fR (int)
1876 .ad
1877 .RS 12n
1878 Historical statistics for the last N multihost updates will be available in
1879 \fB/proc/spl/kstat/zfs/<pool>/multihost\fR
1880 .sp
1881 Default value: \fB0\fR.
1882 .RE
1883
1884 .sp
1885 .ne 2
1886 .na
1887 \fBzfs_multihost_interval\fR (ulong)
1888 .ad
1889 .RS 12n
1890 Used to control the frequency of multihost writes which are performed when the
1891 \fBmultihost\fR pool property is on. This is one factor used to determine the
1892 length of the activity check during import.
1893 .sp
1894 The multihost write period is \fBzfs_multihost_interval / leaf-vdevs\fR
1895 milliseconds. On average a multihost write will be issued for each leaf vdev
1896 every \fBzfs_multihost_interval\fR milliseconds. In practice, the observed
1897 period can vary with the I/O load and this observed value is the delay which is
1898 stored in the uberblock.
1899 .sp
1900 Default value: \fB1000\fR.
1901 .RE
1902
1903 .sp
1904 .ne 2
1905 .na
1906 \fBzfs_multihost_import_intervals\fR (uint)
1907 .ad
1908 .RS 12n
1909 Used to control the duration of the activity test on import. Smaller values of
1910 \fBzfs_multihost_import_intervals\fR will reduce the import time but increase
1911 the risk of failing to detect an active pool. The total activity check time is
1912 never allowed to drop below one second.
1913 .sp
1914 On import the activity check waits a minimum amount of time determined by
1915 \fBzfs_multihost_interval * zfs_multihost_import_intervals\fR, or the same
1916 product computed on the host which last had the pool imported (whichever is
1917 greater). The activity check time may be further extended if the value of mmp
1918 delay found in the best uberblock indicates actual multihost updates happened
1919 at longer intervals than \fBzfs_multihost_interval\fR. A minimum value of
1920 \fB100ms\fR is enforced.
1921 .sp
1922 A value of 0 is ignored and treated as if it was set to 1.
1923 .sp
1924 Default value: \fB20\fR.
1925 .RE
1926
1927 .sp
1928 .ne 2
1929 .na
1930 \fBzfs_multihost_fail_intervals\fR (uint)
1931 .ad
1932 .RS 12n
1933 Controls the behavior of the pool when multihost write failures or delays are
1934 detected.
1935 .sp
1936 When \fBzfs_multihost_fail_intervals = 0\fR, multihost write failures or delays
1937 are ignored. The failures will still be reported to the ZED which depending on
1938 its configuration may take action such as suspending the pool or offlining a
1939 device.
1940
1941 .sp
1942 When \fBzfs_multihost_fail_intervals > 0\fR, the pool will be suspended if
1943 \fBzfs_multihost_fail_intervals * zfs_multihost_interval\fR milliseconds pass
1944 without a successful mmp write. This guarantees the activity test will see
1945 mmp writes if the pool is imported. A value of 1 is ignored and treated as
1946 if it was set to 2. This is necessary to prevent the pool from being suspended
1947 due to normal, small I/O latency variations.
1948
1949 .sp
1950 Default value: \fB10\fR.
1951 .RE
1952
1953 .sp
1954 .ne 2
1955 .na
1956 \fBzfs_no_scrub_io\fR (int)
1957 .ad
1958 .RS 12n
1959 Set for no scrub I/O. This results in scrubs not actually scrubbing data and
1960 simply doing a metadata crawl of the pool instead.
1961 .sp
1962 Use \fB1\fR for yes and \fB0\fR for no (default).
1963 .RE
1964
1965 .sp
1966 .ne 2
1967 .na
1968 \fBzfs_no_scrub_prefetch\fR (int)
1969 .ad
1970 .RS 12n
1971 Set to disable block prefetching for scrubs.
1972 .sp
1973 Use \fB1\fR for yes and \fB0\fR for no (default).
1974 .RE
1975
1976 .sp
1977 .ne 2
1978 .na
1979 \fBzfs_nocacheflush\fR (int)
1980 .ad
1981 .RS 12n
1982 Disable cache flush operations on disks when writing. Setting this will
1983 cause pool corruption on power loss if a volatile out-of-order write cache
1984 is enabled.
1985 .sp
1986 Use \fB1\fR for yes and \fB0\fR for no (default).
1987 .RE
1988
1989 .sp
1990 .ne 2
1991 .na
1992 \fBzfs_nopwrite_enabled\fR (int)
1993 .ad
1994 .RS 12n
1995 Enable NOP writes
1996 .sp
1997 Use \fB1\fR for yes (default) and \fB0\fR to disable.
1998 .RE
1999
2000 .sp
2001 .ne 2
2002 .na
2003 \fBzfs_dmu_offset_next_sync\fR (int)
2004 .ad
2005 .RS 12n
2006 Enable forcing txg sync to find holes. When enabled forces ZFS to act
2007 like prior versions when SEEK_HOLE or SEEK_DATA flags are used, which
2008 when a dnode is dirty causes txg's to be synced so that this data can be
2009 found.
2010 .sp
2011 Use \fB1\fR for yes and \fB0\fR to disable (default).
2012 .RE
2013
2014 .sp
2015 .ne 2
2016 .na
2017 \fBzfs_pd_bytes_max\fR (int)
2018 .ad
2019 .RS 12n
2020 The number of bytes which should be prefetched during a pool traversal
2021 (eg: \fBzfs send\fR or other data crawling operations)
2022 .sp
2023 Default value: \fB52,428,800\fR.
2024 .RE
2025
2026 .sp
2027 .ne 2
2028 .na
2029 \fBzfs_per_txg_dirty_frees_percent \fR (ulong)
2030 .ad
2031 .RS 12n
2032 Tunable to control percentage of dirtied indirect blocks from frees allowed
2033 into one TXG. After this threshold is crossed, additional frees will wait until
2034 the next TXG.
2035 A value of zero will disable this throttle.
2036 .sp
2037 Default value: \fB5\fR, set to \fB0\fR to disable.
2038 .RE
2039
2040 .sp
2041 .ne 2
2042 .na
2043 \fBzfs_prefetch_disable\fR (int)
2044 .ad
2045 .RS 12n
2046 This tunable disables predictive prefetch. Note that it leaves "prescient"
2047 prefetch (e.g. prefetch for zfs send) intact. Unlike predictive prefetch,
2048 prescient prefetch never issues i/os that end up not being needed, so it
2049 can't hurt performance.
2050 .sp
2051 Use \fB1\fR for yes and \fB0\fR for no (default).
2052 .RE
2053
2054 .sp
2055 .ne 2
2056 .na
2057 \fBzfs_qat_checksum_disable\fR (int)
2058 .ad
2059 .RS 12n
2060 This tunable disables qat hardware acceleration for sha256 checksums. It
2061 may be set after the zfs modules have been loaded to initialize the qat
2062 hardware as long as support is compiled in and the qat driver is present.
2063 .sp
2064 Use \fB1\fR for yes and \fB0\fR for no (default).
2065 .RE
2066
2067 .sp
2068 .ne 2
2069 .na
2070 \fBzfs_qat_compress_disable\fR (int)
2071 .ad
2072 .RS 12n
2073 This tunable disables qat hardware acceleration for gzip compression. It
2074 may be set after the zfs modules have been loaded to initialize the qat
2075 hardware as long as support is compiled in and the qat driver is present.
2076 .sp
2077 Use \fB1\fR for yes and \fB0\fR for no (default).
2078 .RE
2079
2080 .sp
2081 .ne 2
2082 .na
2083 \fBzfs_qat_encrypt_disable\fR (int)
2084 .ad
2085 .RS 12n
2086 This tunable disables qat hardware acceleration for AES-GCM encryption. It
2087 may be set after the zfs modules have been loaded to initialize the qat
2088 hardware as long as support is compiled in and the qat driver is present.
2089 .sp
2090 Use \fB1\fR for yes and \fB0\fR for no (default).
2091 .RE
2092
2093 .sp
2094 .ne 2
2095 .na
2096 \fBzfs_read_chunk_size\fR (long)
2097 .ad
2098 .RS 12n
2099 Bytes to read per chunk
2100 .sp
2101 Default value: \fB1,048,576\fR.
2102 .RE
2103
2104 .sp
2105 .ne 2
2106 .na
2107 \fBzfs_read_history\fR (int)
2108 .ad
2109 .RS 12n
2110 Historical statistics for the last N reads will be available in
2111 \fB/proc/spl/kstat/zfs/<pool>/reads\fR
2112 .sp
2113 Default value: \fB0\fR (no data is kept).
2114 .RE
2115
2116 .sp
2117 .ne 2
2118 .na
2119 \fBzfs_read_history_hits\fR (int)
2120 .ad
2121 .RS 12n
2122 Include cache hits in read history
2123 .sp
2124 Use \fB1\fR for yes and \fB0\fR for no (default).
2125 .RE
2126
2127 .sp
2128 .ne 2
2129 .na
2130 \fBzfs_reconstruct_indirect_combinations_max\fR (int)
2131 .ad
2132 .RS 12na
2133 If an indirect split block contains more than this many possible unique
2134 combinations when being reconstructed, consider it too computationally
2135 expensive to check them all. Instead, try at most
2136 \fBzfs_reconstruct_indirect_combinations_max\fR randomly-selected
2137 combinations each time the block is accessed. This allows all segment
2138 copies to participate fairly in the reconstruction when all combinations
2139 cannot be checked and prevents repeated use of one bad copy.
2140 .sp
2141 Default value: \fB4096\fR.
2142 .RE
2143
2144 .sp
2145 .ne 2
2146 .na
2147 \fBzfs_recover\fR (int)
2148 .ad
2149 .RS 12n
2150 Set to attempt to recover from fatal errors. This should only be used as a
2151 last resort, as it typically results in leaked space, or worse.
2152 .sp
2153 Use \fB1\fR for yes and \fB0\fR for no (default).
2154 .RE
2155
2156 .sp
2157 .ne 2
2158 .na
2159 \fBzfs_removal_ignore_errors\fR (int)
2160 .ad
2161 .RS 12n
2162 .sp
2163 Ignore hard IO errors during device removal. When set, if a device encounters
2164 a hard IO error during the removal process the removal will not be cancelled.
2165 This can result in a normally recoverable block becoming permanently damaged
2166 and is not recommended. This should only be used as a last resort when the
2167 pool cannot be returned to a healthy state prior to removing the device.
2168 .sp
2169 Default value: \fB0\fR.
2170 .RE
2171
2172 .sp
2173 .ne 2
2174 .na
2175 \fBzfs_resilver_min_time_ms\fR (int)
2176 .ad
2177 .RS 12n
2178 Resilvers are processed by the sync thread. While resilvering it will spend
2179 at least this much time working on a resilver between txg flushes.
2180 .sp
2181 Default value: \fB3,000\fR.
2182 .RE
2183
2184 .sp
2185 .ne 2
2186 .na
2187 \fBzfs_scan_ignore_errors\fR (int)
2188 .ad
2189 .RS 12n
2190 If set to a nonzero value, remove the DTL (dirty time list) upon
2191 completion of a pool scan (scrub) even if there were unrepairable
2192 errors. It is intended to be used during pool repair or recovery to
2193 stop resilvering when the pool is next imported.
2194 .sp
2195 Default value: \fB0\fR.
2196 .RE
2197
2198 .sp
2199 .ne 2
2200 .na
2201 \fBzfs_scrub_min_time_ms\fR (int)
2202 .ad
2203 .RS 12n
2204 Scrubs are processed by the sync thread. While scrubbing it will spend
2205 at least this much time working on a scrub between txg flushes.
2206 .sp
2207 Default value: \fB1,000\fR.
2208 .RE
2209
2210 .sp
2211 .ne 2
2212 .na
2213 \fBzfs_scan_checkpoint_intval\fR (int)
2214 .ad
2215 .RS 12n
2216 To preserve progress across reboots the sequential scan algorithm periodically
2217 needs to stop metadata scanning and issue all the verifications I/Os to disk.
2218 The frequency of this flushing is determined by the
2219 \fBzfs_scan_checkpoint_intval\fR tunable.
2220 .sp
2221 Default value: \fB7200\fR seconds (every 2 hours).
2222 .RE
2223
2224 .sp
2225 .ne 2
2226 .na
2227 \fBzfs_scan_fill_weight\fR (int)
2228 .ad
2229 .RS 12n
2230 This tunable affects how scrub and resilver I/O segments are ordered. A higher
2231 number indicates that we care more about how filled in a segment is, while a
2232 lower number indicates we care more about the size of the extent without
2233 considering the gaps within a segment. This value is only tunable upon module
2234 insertion. Changing the value afterwards will have no affect on scrub or
2235 resilver performance.
2236 .sp
2237 Default value: \fB3\fR.
2238 .RE
2239
2240 .sp
2241 .ne 2
2242 .na
2243 \fBzfs_scan_issue_strategy\fR (int)
2244 .ad
2245 .RS 12n
2246 Determines the order that data will be verified while scrubbing or resilvering.
2247 If set to \fB1\fR, data will be verified as sequentially as possible, given the
2248 amount of memory reserved for scrubbing (see \fBzfs_scan_mem_lim_fact\fR). This
2249 may improve scrub performance if the pool's data is very fragmented. If set to
2250 \fB2\fR, the largest mostly-contiguous chunk of found data will be verified
2251 first. By deferring scrubbing of small segments, we may later find adjacent data
2252 to coalesce and increase the segment size. If set to \fB0\fR, zfs will use
2253 strategy \fB1\fR during normal verification and strategy \fB2\fR while taking a
2254 checkpoint.
2255 .sp
2256 Default value: \fB0\fR.
2257 .RE
2258
2259 .sp
2260 .ne 2
2261 .na
2262 \fBzfs_scan_legacy\fR (int)
2263 .ad
2264 .RS 12n
2265 A value of 0 indicates that scrubs and resilvers will gather metadata in
2266 memory before issuing sequential I/O. A value of 1 indicates that the legacy
2267 algorithm will be used where I/O is initiated as soon as it is discovered.
2268 Changing this value to 0 will not affect scrubs or resilvers that are already
2269 in progress.
2270 .sp
2271 Default value: \fB0\fR.
2272 .RE
2273
2274 .sp
2275 .ne 2
2276 .na
2277 \fBzfs_scan_max_ext_gap\fR (int)
2278 .ad
2279 .RS 12n
2280 Indicates the largest gap in bytes between scrub / resilver I/Os that will still
2281 be considered sequential for sorting purposes. Changing this value will not
2282 affect scrubs or resilvers that are already in progress.
2283 .sp
2284 Default value: \fB2097152 (2 MB)\fR.
2285 .RE
2286
2287 .sp
2288 .ne 2
2289 .na
2290 \fBzfs_scan_mem_lim_fact\fR (int)
2291 .ad
2292 .RS 12n
2293 Maximum fraction of RAM used for I/O sorting by sequential scan algorithm.
2294 This tunable determines the hard limit for I/O sorting memory usage.
2295 When the hard limit is reached we stop scanning metadata and start issuing
2296 data verification I/O. This is done until we get below the soft limit.
2297 .sp
2298 Default value: \fB20\fR which is 5% of RAM (1/20).
2299 .RE
2300
2301 .sp
2302 .ne 2
2303 .na
2304 \fBzfs_scan_mem_lim_soft_fact\fR (int)
2305 .ad
2306 .RS 12n
2307 The fraction of the hard limit used to determined the soft limit for I/O sorting
2308 by the sequential scan algorithm. When we cross this limit from bellow no action
2309 is taken. When we cross this limit from above it is because we are issuing
2310 verification I/O. In this case (unless the metadata scan is done) we stop
2311 issuing verification I/O and start scanning metadata again until we get to the
2312 hard limit.
2313 .sp
2314 Default value: \fB20\fR which is 5% of the hard limit (1/20).
2315 .RE
2316
2317 .sp
2318 .ne 2
2319 .na
2320 \fBzfs_scan_vdev_limit\fR (int)
2321 .ad
2322 .RS 12n
2323 Maximum amount of data that can be concurrently issued at once for scrubs and
2324 resilvers per leaf device, given in bytes.
2325 .sp
2326 Default value: \fB41943040\fR.
2327 .RE
2328
2329 .sp
2330 .ne 2
2331 .na
2332 \fBzfs_send_corrupt_data\fR (int)
2333 .ad
2334 .RS 12n
2335 Allow sending of corrupt data (ignore read/checksum errors when sending data)
2336 .sp
2337 Use \fB1\fR for yes and \fB0\fR for no (default).
2338 .RE
2339
2340 .sp
2341 .ne 2
2342 .na
2343 \fBzfs_send_queue_length\fR (int)
2344 .ad
2345 .RS 12n
2346 The maximum number of bytes allowed in the \fBzfs send\fR queue. This value
2347 must be at least twice the maximum block size in use.
2348 .sp
2349 Default value: \fB16,777,216\fR.
2350 .RE
2351
2352 .sp
2353 .ne 2
2354 .na
2355 \fBzfs_recv_queue_length\fR (int)
2356 .ad
2357 .RS 12n
2358 .sp
2359 The maximum number of bytes allowed in the \fBzfs receive\fR queue. This value
2360 must be at least twice the maximum block size in use.
2361 .sp
2362 Default value: \fB16,777,216\fR.
2363 .RE
2364
2365 .sp
2366 .ne 2
2367 .na
2368 \fBzfs_sync_pass_deferred_free\fR (int)
2369 .ad
2370 .RS 12n
2371 Flushing of data to disk is done in passes. Defer frees starting in this pass
2372 .sp
2373 Default value: \fB2\fR.
2374 .RE
2375
2376 .sp
2377 .ne 2
2378 .na
2379 \fBzfs_spa_discard_memory_limit\fR (int)
2380 .ad
2381 .RS 12n
2382 Maximum memory used for prefetching a checkpoint's space map on each
2383 vdev while discarding the checkpoint.
2384 .sp
2385 Default value: \fB16,777,216\fR.
2386 .RE
2387
2388 .sp
2389 .ne 2
2390 .na
2391 \fBzfs_sync_pass_dont_compress\fR (int)
2392 .ad
2393 .RS 12n
2394 Don't compress starting in this pass
2395 .sp
2396 Default value: \fB5\fR.
2397 .RE
2398
2399 .sp
2400 .ne 2
2401 .na
2402 \fBzfs_sync_pass_rewrite\fR (int)
2403 .ad
2404 .RS 12n
2405 Rewrite new block pointers starting in this pass
2406 .sp
2407 Default value: \fB2\fR.
2408 .RE
2409
2410 .sp
2411 .ne 2
2412 .na
2413 \fBzfs_sync_taskq_batch_pct\fR (int)
2414 .ad
2415 .RS 12n
2416 This controls the number of threads used by the dp_sync_taskq. The default
2417 value of 75% will create a maximum of one thread per cpu.
2418 .sp
2419 Default value: \fB75\fR%.
2420 .RE
2421
2422 .sp
2423 .ne 2
2424 .na
2425 \fBzfs_trim_extent_bytes_max\fR (unsigned int)
2426 .ad
2427 .RS 12n
2428 Maximum size of TRIM command. Ranges larger than this will be split in to
2429 chunks no larger than \fBzfs_trim_extent_bytes_max\fR bytes before being
2430 issued to the device.
2431 .sp
2432 Default value: \fB134,217,728\fR.
2433 .RE
2434
2435 .sp
2436 .ne 2
2437 .na
2438 \fBzfs_trim_extent_bytes_min\fR (unsigned int)
2439 .ad
2440 .RS 12n
2441 Minimum size of TRIM commands. TRIM ranges smaller than this will be skipped
2442 unless they're part of a larger range which was broken in to chunks. This is
2443 done because it's common for these small TRIMs to negatively impact overall
2444 performance. This value can be set to 0 to TRIM all unallocated space.
2445 .sp
2446 Default value: \fB32,768\fR.
2447 .RE
2448
2449 .sp
2450 .ne 2
2451 .na
2452 \fBzfs_trim_metaslab_skip\fR (unsigned int)
2453 .ad
2454 .RS 12n
2455 Skip uninitialized metaslabs during the TRIM process. This option is useful
2456 for pools constructed from large thinly-provisioned devices where TRIM
2457 operations are slow. As a pool ages an increasing fraction of the pools
2458 metaslabs will be initialized progressively degrading the usefulness of
2459 this option. This setting is stored when starting a manual TRIM and will
2460 persist for the duration of the requested TRIM.
2461 .sp
2462 Default value: \fB0\fR.
2463 .RE
2464
2465 .sp
2466 .ne 2
2467 .na
2468 \fBzfs_trim_queue_limit\fR (unsigned int)
2469 .ad
2470 .RS 12n
2471 Maximum number of queued TRIMs outstanding per leaf vdev. The number of
2472 concurrent TRIM commands issued to the device is controlled by the
2473 \fBzfs_vdev_trim_min_active\fR and \fBzfs_vdev_trim_max_active\fR module
2474 options.
2475 .sp
2476 Default value: \fB10\fR.
2477 .RE
2478
2479 .sp
2480 .ne 2
2481 .na
2482 \fBzfs_trim_txg_batch\fR (unsigned int)
2483 .ad
2484 .RS 12n
2485 The number of transaction groups worth of frees which should be aggregated
2486 before TRIM operations are issued to the device. This setting represents a
2487 trade-off between issuing larger, more efficient TRIM operations and the
2488 delay before the recently trimmed space is available for use by the device.
2489 .sp
2490 Increasing this value will allow frees to be aggregated for a longer time.
2491 This will result is larger TRIM operations and potentially increased memory
2492 usage. Decreasing this value will have the opposite effect. The default
2493 value of 32 was determined to be a reasonable compromise.
2494 .sp
2495 Default value: \fB32\fR.
2496 .RE
2497
2498 .sp
2499 .ne 2
2500 .na
2501 \fBzfs_txg_history\fR (int)
2502 .ad
2503 .RS 12n
2504 Historical statistics for the last N txgs will be available in
2505 \fB/proc/spl/kstat/zfs/<pool>/txgs\fR
2506 .sp
2507 Default value: \fB0\fR.
2508 .RE
2509
2510 .sp
2511 .ne 2
2512 .na
2513 \fBzfs_txg_timeout\fR (int)
2514 .ad
2515 .RS 12n
2516 Flush dirty data to disk at least every N seconds (maximum txg duration)
2517 .sp
2518 Default value: \fB5\fR.
2519 .RE
2520
2521 .sp
2522 .ne 2
2523 .na
2524 \fBzfs_vdev_aggregate_trim\fR (int)
2525 .ad
2526 .RS 12n
2527 Allow TRIM I/Os to be aggregated. This is normally not helpful because
2528 the extents to be trimmed will have been already been aggregated by the
2529 metaslab. This option is provided for debugging and performance analysis.
2530 .sp
2531 Default value: \fB0\fR.
2532 .RE
2533
2534 .sp
2535 .ne 2
2536 .na
2537 \fBzfs_vdev_aggregation_limit\fR (int)
2538 .ad
2539 .RS 12n
2540 Max vdev I/O aggregation size
2541 .sp
2542 Default value: \fB1,048,576\fR.
2543 .RE
2544
2545 .sp
2546 .ne 2
2547 .na
2548 \fBzfs_vdev_aggregation_limit_non_rotating\fR (int)
2549 .ad
2550 .RS 12n
2551 Max vdev I/O aggregation size for non-rotating media
2552 .sp
2553 Default value: \fB131,072\fR.
2554 .RE
2555
2556 .sp
2557 .ne 2
2558 .na
2559 \fBzfs_vdev_cache_bshift\fR (int)
2560 .ad
2561 .RS 12n
2562 Shift size to inflate reads too
2563 .sp
2564 Default value: \fB16\fR (effectively 65536).
2565 .RE
2566
2567 .sp
2568 .ne 2
2569 .na
2570 \fBzfs_vdev_cache_max\fR (int)
2571 .ad
2572 .RS 12n
2573 Inflate reads smaller than this value to meet the \fBzfs_vdev_cache_bshift\fR
2574 size (default 64k).
2575 .sp
2576 Default value: \fB16384\fR.
2577 .RE
2578
2579 .sp
2580 .ne 2
2581 .na
2582 \fBzfs_vdev_cache_size\fR (int)
2583 .ad
2584 .RS 12n
2585 Total size of the per-disk cache in bytes.
2586 .sp
2587 Currently this feature is disabled as it has been found to not be helpful
2588 for performance and in some cases harmful.
2589 .sp
2590 Default value: \fB0\fR.
2591 .RE
2592
2593 .sp
2594 .ne 2
2595 .na
2596 \fBzfs_vdev_mirror_rotating_inc\fR (int)
2597 .ad
2598 .RS 12n
2599 A number by which the balancing algorithm increments the load calculation for
2600 the purpose of selecting the least busy mirror member when an I/O immediately
2601 follows its predecessor on rotational vdevs for the purpose of making decisions
2602 based on load.
2603 .sp
2604 Default value: \fB0\fR.
2605 .RE
2606
2607 .sp
2608 .ne 2
2609 .na
2610 \fBzfs_vdev_mirror_rotating_seek_inc\fR (int)
2611 .ad
2612 .RS 12n
2613 A number by which the balancing algorithm increments the load calculation for
2614 the purpose of selecting the least busy mirror member when an I/O lacks
2615 locality as defined by the zfs_vdev_mirror_rotating_seek_offset. I/Os within
2616 this that are not immediately following the previous I/O are incremented by
2617 half.
2618 .sp
2619 Default value: \fB5\fR.
2620 .RE
2621
2622 .sp
2623 .ne 2
2624 .na
2625 \fBzfs_vdev_mirror_rotating_seek_offset\fR (int)
2626 .ad
2627 .RS 12n
2628 The maximum distance for the last queued I/O in which the balancing algorithm
2629 considers an I/O to have locality.
2630 See the section "ZFS I/O SCHEDULER".
2631 .sp
2632 Default value: \fB1048576\fR.
2633 .RE
2634
2635 .sp
2636 .ne 2
2637 .na
2638 \fBzfs_vdev_mirror_non_rotating_inc\fR (int)
2639 .ad
2640 .RS 12n
2641 A number by which the balancing algorithm increments the load calculation for
2642 the purpose of selecting the least busy mirror member on non-rotational vdevs
2643 when I/Os do not immediately follow one another.
2644 .sp
2645 Default value: \fB0\fR.
2646 .RE
2647
2648 .sp
2649 .ne 2
2650 .na
2651 \fBzfs_vdev_mirror_non_rotating_seek_inc\fR (int)
2652 .ad
2653 .RS 12n
2654 A number by which the balancing algorithm increments the load calculation for
2655 the purpose of selecting the least busy mirror member when an I/O lacks
2656 locality as defined by the zfs_vdev_mirror_rotating_seek_offset. I/Os within
2657 this that are not immediately following the previous I/O are incremented by
2658 half.
2659 .sp
2660 Default value: \fB1\fR.
2661 .RE
2662
2663 .sp
2664 .ne 2
2665 .na
2666 \fBzfs_vdev_read_gap_limit\fR (int)
2667 .ad
2668 .RS 12n
2669 Aggregate read I/O operations if the gap on-disk between them is within this
2670 threshold.
2671 .sp
2672 Default value: \fB32,768\fR.
2673 .RE
2674
2675 .sp
2676 .ne 2
2677 .na
2678 \fBzfs_vdev_scheduler\fR (charp)
2679 .ad
2680 .RS 12n
2681 Set the Linux I/O scheduler on whole disk vdevs to this scheduler. Valid options
2682 are noop, cfq, bfq & deadline
2683 .sp
2684 Default value: \fBnoop\fR.
2685 .RE
2686
2687 .sp
2688 .ne 2
2689 .na
2690 \fBzfs_vdev_write_gap_limit\fR (int)
2691 .ad
2692 .RS 12n
2693 Aggregate write I/O over gap
2694 .sp
2695 Default value: \fB4,096\fR.
2696 .RE
2697
2698 .sp
2699 .ne 2
2700 .na
2701 \fBzfs_vdev_raidz_impl\fR (string)
2702 .ad
2703 .RS 12n
2704 Parameter for selecting raidz parity implementation to use.
2705
2706 Options marked (always) below may be selected on module load as they are
2707 supported on all systems.
2708 The remaining options may only be set after the module is loaded, as they
2709 are available only if the implementations are compiled in and supported
2710 on the running system.
2711
2712 Once the module is loaded, the content of
2713 /sys/module/zfs/parameters/zfs_vdev_raidz_impl will show available options
2714 with the currently selected one enclosed in [].
2715 Possible options are:
2716 fastest - (always) implementation selected using built-in benchmark
2717 original - (always) original raidz implementation
2718 scalar - (always) scalar raidz implementation
2719 sse2 - implementation using SSE2 instruction set (64bit x86 only)
2720 ssse3 - implementation using SSSE3 instruction set (64bit x86 only)
2721 avx2 - implementation using AVX2 instruction set (64bit x86 only)
2722 avx512f - implementation using AVX512F instruction set (64bit x86 only)
2723 avx512bw - implementation using AVX512F & AVX512BW instruction sets (64bit x86 only)
2724 aarch64_neon - implementation using NEON (Aarch64/64 bit ARMv8 only)
2725 aarch64_neonx2 - implementation using NEON with more unrolling (Aarch64/64 bit ARMv8 only)
2726 .sp
2727 Default value: \fBfastest\fR.
2728 .RE
2729
2730 .sp
2731 .ne 2
2732 .na
2733 \fBzfs_zevent_cols\fR (int)
2734 .ad
2735 .RS 12n
2736 When zevents are logged to the console use this as the word wrap width.
2737 .sp
2738 Default value: \fB80\fR.
2739 .RE
2740
2741 .sp
2742 .ne 2
2743 .na
2744 \fBzfs_zevent_console\fR (int)
2745 .ad
2746 .RS 12n
2747 Log events to the console
2748 .sp
2749 Use \fB1\fR for yes and \fB0\fR for no (default).
2750 .RE
2751
2752 .sp
2753 .ne 2
2754 .na
2755 \fBzfs_zevent_len_max\fR (int)
2756 .ad
2757 .RS 12n
2758 Max event queue length. A value of 0 will result in a calculated value which
2759 increases with the number of CPUs in the system (minimum 64 events). Events
2760 in the queue can be viewed with the \fBzpool events\fR command.
2761 .sp
2762 Default value: \fB0\fR.
2763 .RE
2764
2765 .sp
2766 .ne 2
2767 .na
2768 \fBzfs_zil_clean_taskq_maxalloc\fR (int)
2769 .ad
2770 .RS 12n
2771 The maximum number of taskq entries that are allowed to be cached. When this
2772 limit is exceeded transaction records (itxs) will be cleaned synchronously.
2773 .sp
2774 Default value: \fB1048576\fR.
2775 .RE
2776
2777 .sp
2778 .ne 2
2779 .na
2780 \fBzfs_zil_clean_taskq_minalloc\fR (int)
2781 .ad
2782 .RS 12n
2783 The number of taskq entries that are pre-populated when the taskq is first
2784 created and are immediately available for use.
2785 .sp
2786 Default value: \fB1024\fR.
2787 .RE
2788
2789 .sp
2790 .ne 2
2791 .na
2792 \fBzfs_zil_clean_taskq_nthr_pct\fR (int)
2793 .ad
2794 .RS 12n
2795 This controls the number of threads used by the dp_zil_clean_taskq. The default
2796 value of 100% will create a maximum of one thread per cpu.
2797 .sp
2798 Default value: \fB100\fR%.
2799 .RE
2800
2801 .sp
2802 .ne 2
2803 .na
2804 \fBzil_nocacheflush\fR (int)
2805 .ad
2806 .RS 12n
2807 Disable the cache flush commands that are normally sent to the disk(s) by
2808 the ZIL after an LWB write has completed. Setting this will cause ZIL
2809 corruption on power loss if a volatile out-of-order write cache is enabled.
2810 .sp
2811 Use \fB1\fR for yes and \fB0\fR for no (default).
2812 .RE
2813
2814 .sp
2815 .ne 2
2816 .na
2817 \fBzil_replay_disable\fR (int)
2818 .ad
2819 .RS 12n
2820 Disable intent logging replay. Can be disabled for recovery from corrupted
2821 ZIL
2822 .sp
2823 Use \fB1\fR for yes and \fB0\fR for no (default).
2824 .RE
2825
2826 .sp
2827 .ne 2
2828 .na
2829 \fBzil_slog_bulk\fR (ulong)
2830 .ad
2831 .RS 12n
2832 Limit SLOG write size per commit executed with synchronous priority.
2833 Any writes above that will be executed with lower (asynchronous) priority
2834 to limit potential SLOG device abuse by single active ZIL writer.
2835 .sp
2836 Default value: \fB786,432\fR.
2837 .RE
2838
2839 .sp
2840 .ne 2
2841 .na
2842 \fBzio_deadman_log_all\fR (int)
2843 .ad
2844 .RS 12n
2845 If non-zero, the zio deadman will produce debugging messages (see
2846 \fBzfs_dbgmsg_enable\fR) for all zios, rather than only for leaf
2847 zios possessing a vdev. This is meant to be used by developers to gain
2848 diagnostic information for hang conditions which don't involve a mutex
2849 or other locking primitive; typically conditions in which a thread in
2850 the zio pipeline is looping indefinitely.
2851 .sp
2852 Default value: \fB0\fR.
2853 .RE
2854
2855 .sp
2856 .ne 2
2857 .na
2858 \fBzio_decompress_fail_fraction\fR (int)
2859 .ad
2860 .RS 12n
2861 If non-zero, this value represents the denominator of the probability that zfs
2862 should induce a decompression failure. For instance, for a 5% decompression
2863 failure rate, this value should be set to 20.
2864 .sp
2865 Default value: \fB0\fR.
2866 .RE
2867
2868 .sp
2869 .ne 2
2870 .na
2871 \fBzio_slow_io_ms\fR (int)
2872 .ad
2873 .RS 12n
2874 When an I/O operation takes more than \fBzio_slow_io_ms\fR milliseconds to
2875 complete is marked as a slow I/O. Each slow I/O causes a delay zevent. Slow
2876 I/O counters can be seen with "zpool status -s".
2877
2878 .sp
2879 Default value: \fB30,000\fR.
2880 .RE
2881
2882 .sp
2883 .ne 2
2884 .na
2885 \fBzio_dva_throttle_enabled\fR (int)
2886 .ad
2887 .RS 12n
2888 Throttle block allocations in the I/O pipeline. This allows for
2889 dynamic allocation distribution when devices are imbalanced.
2890 When enabled, the maximum number of pending allocations per top-level vdev
2891 is limited by \fBzfs_vdev_queue_depth_pct\fR.
2892 .sp
2893 Default value: \fB1\fR.
2894 .RE
2895
2896 .sp
2897 .ne 2
2898 .na
2899 \fBzio_requeue_io_start_cut_in_line\fR (int)
2900 .ad
2901 .RS 12n
2902 Prioritize requeued I/O
2903 .sp
2904 Default value: \fB0\fR.
2905 .RE
2906
2907 .sp
2908 .ne 2
2909 .na
2910 \fBzio_taskq_batch_pct\fR (uint)
2911 .ad
2912 .RS 12n
2913 Percentage of online CPUs (or CPU cores, etc) which will run a worker thread
2914 for I/O. These workers are responsible for I/O work such as compression and
2915 checksum calculations. Fractional number of CPUs will be rounded down.
2916 .sp
2917 The default value of 75 was chosen to avoid using all CPUs which can result in
2918 latency issues and inconsistent application performance, especially when high
2919 compression is enabled.
2920 .sp
2921 Default value: \fB75\fR.
2922 .RE
2923
2924 .sp
2925 .ne 2
2926 .na
2927 \fBzvol_inhibit_dev\fR (uint)
2928 .ad
2929 .RS 12n
2930 Do not create zvol device nodes. This may slightly improve startup time on
2931 systems with a very large number of zvols.
2932 .sp
2933 Use \fB1\fR for yes and \fB0\fR for no (default).
2934 .RE
2935
2936 .sp
2937 .ne 2
2938 .na
2939 \fBzvol_major\fR (uint)
2940 .ad
2941 .RS 12n
2942 Major number for zvol block devices
2943 .sp
2944 Default value: \fB230\fR.
2945 .RE
2946
2947 .sp
2948 .ne 2
2949 .na
2950 \fBzvol_max_discard_blocks\fR (ulong)
2951 .ad
2952 .RS 12n
2953 Discard (aka TRIM) operations done on zvols will be done in batches of this
2954 many blocks, where block size is determined by the \fBvolblocksize\fR property
2955 of a zvol.
2956 .sp
2957 Default value: \fB16,384\fR.
2958 .RE
2959
2960 .sp
2961 .ne 2
2962 .na
2963 \fBzvol_prefetch_bytes\fR (uint)
2964 .ad
2965 .RS 12n
2966 When adding a zvol to the system prefetch \fBzvol_prefetch_bytes\fR
2967 from the start and end of the volume. Prefetching these regions
2968 of the volume is desirable because they are likely to be accessed
2969 immediately by \fBblkid(8)\fR or by the kernel scanning for a partition
2970 table.
2971 .sp
2972 Default value: \fB131,072\fR.
2973 .RE
2974
2975 .sp
2976 .ne 2
2977 .na
2978 \fBzvol_request_sync\fR (uint)
2979 .ad
2980 .RS 12n
2981 When processing I/O requests for a zvol submit them synchronously. This
2982 effectively limits the queue depth to 1 for each I/O submitter. When set
2983 to 0 requests are handled asynchronously by a thread pool. The number of
2984 requests which can be handled concurrently is controller by \fBzvol_threads\fR.
2985 .sp
2986 Default value: \fB0\fR.
2987 .RE
2988
2989 .sp
2990 .ne 2
2991 .na
2992 \fBzvol_threads\fR (uint)
2993 .ad
2994 .RS 12n
2995 Max number of threads which can handle zvol I/O requests concurrently.
2996 .sp
2997 Default value: \fB32\fR.
2998 .RE
2999
3000 .sp
3001 .ne 2
3002 .na
3003 \fBzvol_volmode\fR (uint)
3004 .ad
3005 .RS 12n
3006 Defines zvol block devices behaviour when \fBvolmode\fR is set to \fBdefault\fR.
3007 Valid values are \fB1\fR (full), \fB2\fR (dev) and \fB3\fR (none).
3008 .sp
3009 Default value: \fB1\fR.
3010 .RE
3011
3012 .SH ZFS I/O SCHEDULER
3013 ZFS issues I/O operations to leaf vdevs to satisfy and complete I/Os.
3014 The I/O scheduler determines when and in what order those operations are
3015 issued. The I/O scheduler divides operations into five I/O classes
3016 prioritized in the following order: sync read, sync write, async read,
3017 async write, and scrub/resilver. Each queue defines the minimum and
3018 maximum number of concurrent operations that may be issued to the
3019 device. In addition, the device has an aggregate maximum,
3020 \fBzfs_vdev_max_active\fR. Note that the sum of the per-queue minimums
3021 must not exceed the aggregate maximum. If the sum of the per-queue
3022 maximums exceeds the aggregate maximum, then the number of active I/Os
3023 may reach \fBzfs_vdev_max_active\fR, in which case no further I/Os will
3024 be issued regardless of whether all per-queue minimums have been met.
3025 .sp
3026 For many physical devices, throughput increases with the number of
3027 concurrent operations, but latency typically suffers. Further, physical
3028 devices typically have a limit at which more concurrent operations have no
3029 effect on throughput or can actually cause it to decrease.
3030 .sp
3031 The scheduler selects the next operation to issue by first looking for an
3032 I/O class whose minimum has not been satisfied. Once all are satisfied and
3033 the aggregate maximum has not been hit, the scheduler looks for classes
3034 whose maximum has not been satisfied. Iteration through the I/O classes is
3035 done in the order specified above. No further operations are issued if the
3036 aggregate maximum number of concurrent operations has been hit or if there
3037 are no operations queued for an I/O class that has not hit its maximum.
3038 Every time an I/O is queued or an operation completes, the I/O scheduler
3039 looks for new operations to issue.
3040 .sp
3041 In general, smaller max_active's will lead to lower latency of synchronous
3042 operations. Larger max_active's may lead to higher overall throughput,
3043 depending on underlying storage.
3044 .sp
3045 The ratio of the queues' max_actives determines the balance of performance
3046 between reads, writes, and scrubs. E.g., increasing
3047 \fBzfs_vdev_scrub_max_active\fR will cause the scrub or resilver to complete
3048 more quickly, but reads and writes to have higher latency and lower throughput.
3049 .sp
3050 All I/O classes have a fixed maximum number of outstanding operations
3051 except for the async write class. Asynchronous writes represent the data
3052 that is committed to stable storage during the syncing stage for
3053 transaction groups. Transaction groups enter the syncing state
3054 periodically so the number of queued async writes will quickly burst up
3055 and then bleed down to zero. Rather than servicing them as quickly as
3056 possible, the I/O scheduler changes the maximum number of active async
3057 write I/Os according to the amount of dirty data in the pool. Since
3058 both throughput and latency typically increase with the number of
3059 concurrent operations issued to physical devices, reducing the
3060 burstiness in the number of concurrent operations also stabilizes the
3061 response time of operations from other -- and in particular synchronous
3062 -- queues. In broad strokes, the I/O scheduler will issue more
3063 concurrent operations from the async write queue as there's more dirty
3064 data in the pool.
3065 .sp
3066 Async Writes
3067 .sp
3068 The number of concurrent operations issued for the async write I/O class
3069 follows a piece-wise linear function defined by a few adjustable points.
3070 .nf
3071
3072 | o---------| <-- zfs_vdev_async_write_max_active
3073 ^ | /^ |
3074 | | / | |
3075 active | / | |
3076 I/O | / | |
3077 count | / | |
3078 | / | |
3079 |-------o | | <-- zfs_vdev_async_write_min_active
3080 0|_______^______|_________|
3081 0% | | 100% of zfs_dirty_data_max
3082 | |
3083 | `-- zfs_vdev_async_write_active_max_dirty_percent
3084 `--------- zfs_vdev_async_write_active_min_dirty_percent
3085
3086 .fi
3087 Until the amount of dirty data exceeds a minimum percentage of the dirty
3088 data allowed in the pool, the I/O scheduler will limit the number of
3089 concurrent operations to the minimum. As that threshold is crossed, the
3090 number of concurrent operations issued increases linearly to the maximum at
3091 the specified maximum percentage of the dirty data allowed in the pool.
3092 .sp
3093 Ideally, the amount of dirty data on a busy pool will stay in the sloped
3094 part of the function between \fBzfs_vdev_async_write_active_min_dirty_percent\fR
3095 and \fBzfs_vdev_async_write_active_max_dirty_percent\fR. If it exceeds the
3096 maximum percentage, this indicates that the rate of incoming data is
3097 greater than the rate that the backend storage can handle. In this case, we
3098 must further throttle incoming writes, as described in the next section.
3099
3100 .SH ZFS TRANSACTION DELAY
3101 We delay transactions when we've determined that the backend storage
3102 isn't able to accommodate the rate of incoming writes.
3103 .sp
3104 If there is already a transaction waiting, we delay relative to when
3105 that transaction will finish waiting. This way the calculated delay time
3106 is independent of the number of threads concurrently executing
3107 transactions.
3108 .sp
3109 If we are the only waiter, wait relative to when the transaction
3110 started, rather than the current time. This credits the transaction for
3111 "time already served", e.g. reading indirect blocks.
3112 .sp
3113 The minimum time for a transaction to take is calculated as:
3114 .nf
3115 min_time = zfs_delay_scale * (dirty - min) / (max - dirty)
3116 min_time is then capped at 100 milliseconds.
3117 .fi
3118 .sp
3119 The delay has two degrees of freedom that can be adjusted via tunables. The
3120 percentage of dirty data at which we start to delay is defined by
3121 \fBzfs_delay_min_dirty_percent\fR. This should typically be at or above
3122 \fBzfs_vdev_async_write_active_max_dirty_percent\fR so that we only start to
3123 delay after writing at full speed has failed to keep up with the incoming write
3124 rate. The scale of the curve is defined by \fBzfs_delay_scale\fR. Roughly speaking,
3125 this variable determines the amount of delay at the midpoint of the curve.
3126 .sp
3127 .nf
3128 delay
3129 10ms +-------------------------------------------------------------*+
3130 | *|
3131 9ms + *+
3132 | *|
3133 8ms + *+
3134 | * |
3135 7ms + * +
3136 | * |
3137 6ms + * +
3138 | * |
3139 5ms + * +
3140 | * |
3141 4ms + * +
3142 | * |
3143 3ms + * +
3144 | * |
3145 2ms + (midpoint) * +
3146 | | ** |
3147 1ms + v *** +
3148 | zfs_delay_scale ----------> ******** |
3149 0 +-------------------------------------*********----------------+
3150 0% <- zfs_dirty_data_max -> 100%
3151 .fi
3152 .sp
3153 Note that since the delay is added to the outstanding time remaining on the
3154 most recent transaction, the delay is effectively the inverse of IOPS.
3155 Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
3156 was chosen such that small changes in the amount of accumulated dirty data
3157 in the first 3/4 of the curve yield relatively small differences in the
3158 amount of delay.
3159 .sp
3160 The effects can be easier to understand when the amount of delay is
3161 represented on a log scale:
3162 .sp
3163 .nf
3164 delay
3165 100ms +-------------------------------------------------------------++
3166 + +
3167 | |
3168 + *+
3169 10ms + *+
3170 + ** +
3171 | (midpoint) ** |
3172 + | ** +
3173 1ms + v **** +
3174 + zfs_delay_scale ----------> ***** +
3175 | **** |
3176 + **** +
3177 100us + ** +
3178 + * +
3179 | * |
3180 + * +
3181 10us + * +
3182 + +
3183 | |
3184 + +
3185 +--------------------------------------------------------------+
3186 0% <- zfs_dirty_data_max -> 100%
3187 .fi
3188 .sp
3189 Note here that only as the amount of dirty data approaches its limit does
3190 the delay start to increase rapidly. The goal of a properly tuned system
3191 should be to keep the amount of dirty data out of that range by first
3192 ensuring that the appropriate limits are set for the I/O scheduler to reach
3193 optimal throughput on the backend storage, and then by changing the value
3194 of \fBzfs_delay_scale\fR to increase the steepness of the curve.