]> git.proxmox.com Git - mirror_zfs.git/blob - man/man5/zfs-module-parameters.5
Detect IO errors during device removal
[mirror_zfs.git] / man / man5 / zfs-module-parameters.5
1 '\" te
2 .\" Copyright (c) 2013 by Turbo Fredriksson <turbo@bayour.com>. All rights reserved.
3 .\" Copyright (c) 2017 Datto Inc.
4 .\" Copyright (c) 2018 by Delphix. All rights reserved.
5 .\" The contents of this file are subject to the terms of the Common Development
6 .\" and Distribution License (the "License"). You may not use this file except
7 .\" in compliance with the License. You can obtain a copy of the license at
8 .\" usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing.
9 .\"
10 .\" See the License for the specific language governing permissions and
11 .\" limitations under the License. When distributing Covered Code, include this
12 .\" CDDL HEADER in each file and include the License file at
13 .\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this
14 .\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
15 .\" own identifying information:
16 .\" Portions Copyright [yyyy] [name of copyright owner]
17 .TH ZFS-MODULE-PARAMETERS 5 "Oct 28, 2017"
18 .SH NAME
19 zfs\-module\-parameters \- ZFS module parameters
20 .SH DESCRIPTION
21 .sp
22 .LP
23 Description of the different parameters to the ZFS module.
24
25 .SS "Module parameters"
26 .sp
27 .LP
28
29 .sp
30 .ne 2
31 .na
32 \fBdbuf_cache_max_bytes\fR (ulong)
33 .ad
34 .RS 12n
35 Maximum size in bytes of the dbuf cache. When \fB0\fR this value will default
36 to \fB1/2^dbuf_cache_shift\fR (1/32) of the target ARC size, otherwise the
37 provided value in bytes will be used. The behavior of the dbuf cache and its
38 associated settings can be observed via the \fB/proc/spl/kstat/zfs/dbufstats\fR
39 kstat.
40 .sp
41 Default value: \fB0\fR.
42 .RE
43
44 .sp
45 .ne 2
46 .na
47 \fBdbuf_metadata_cache_max_bytes\fR (ulong)
48 .ad
49 .RS 12n
50 Maximum size in bytes of the metadata dbuf cache. When \fB0\fR this value will
51 default to \fB1/2^dbuf_cache_shift\fR (1/16) of the target ARC size, otherwise
52 the provided value in bytes will be used. The behavior of the metadata dbuf
53 cache and its associated settings can be observed via the
54 \fB/proc/spl/kstat/zfs/dbufstats\fR kstat.
55 .sp
56 Default value: \fB0\fR.
57 .RE
58
59 .sp
60 .ne 2
61 .na
62 \fBdbuf_cache_hiwater_pct\fR (uint)
63 .ad
64 .RS 12n
65 The percentage over \fBdbuf_cache_max_bytes\fR when dbufs must be evicted
66 directly.
67 .sp
68 Default value: \fB10\fR%.
69 .RE
70
71 .sp
72 .ne 2
73 .na
74 \fBdbuf_cache_lowater_pct\fR (uint)
75 .ad
76 .RS 12n
77 The percentage below \fBdbuf_cache_max_bytes\fR when the evict thread stops
78 evicting dbufs.
79 .sp
80 Default value: \fB10\fR%.
81 .RE
82
83 .sp
84 .ne 2
85 .na
86 \fBdbuf_cache_shift\fR (int)
87 .ad
88 .RS 12n
89 Set the size of the dbuf cache, \fBdbuf_cache_max_bytes\fR, to a log2 fraction
90 of the target arc size.
91 .sp
92 Default value: \fB5\fR.
93 .RE
94
95 .sp
96 .ne 2
97 .na
98 \fBdbuf_metadata_cache_shift\fR (int)
99 .ad
100 .RS 12n
101 Set the size of the dbuf metadata cache, \fBdbuf_metadata_cache_max_bytes\fR,
102 to a log2 fraction of the target arc size.
103 .sp
104 Default value: \fB6\fR.
105 .RE
106
107 .sp
108 .ne 2
109 .na
110 \fBignore_hole_birth\fR (int)
111 .ad
112 .RS 12n
113 When set, the hole_birth optimization will not be used, and all holes will
114 always be sent on zfs send. Useful if you suspect your datasets are affected
115 by a bug in hole_birth.
116 .sp
117 Use \fB1\fR for on (default) and \fB0\fR for off.
118 .RE
119
120 .sp
121 .ne 2
122 .na
123 \fBl2arc_feed_again\fR (int)
124 .ad
125 .RS 12n
126 Turbo L2ARC warm-up. When the L2ARC is cold the fill interval will be set as
127 fast as possible.
128 .sp
129 Use \fB1\fR for yes (default) and \fB0\fR to disable.
130 .RE
131
132 .sp
133 .ne 2
134 .na
135 \fBl2arc_feed_min_ms\fR (ulong)
136 .ad
137 .RS 12n
138 Min feed interval in milliseconds. Requires \fBl2arc_feed_again=1\fR and only
139 applicable in related situations.
140 .sp
141 Default value: \fB200\fR.
142 .RE
143
144 .sp
145 .ne 2
146 .na
147 \fBl2arc_feed_secs\fR (ulong)
148 .ad
149 .RS 12n
150 Seconds between L2ARC writing
151 .sp
152 Default value: \fB1\fR.
153 .RE
154
155 .sp
156 .ne 2
157 .na
158 \fBl2arc_headroom\fR (ulong)
159 .ad
160 .RS 12n
161 How far through the ARC lists to search for L2ARC cacheable content, expressed
162 as a multiplier of \fBl2arc_write_max\fR
163 .sp
164 Default value: \fB2\fR.
165 .RE
166
167 .sp
168 .ne 2
169 .na
170 \fBl2arc_headroom_boost\fR (ulong)
171 .ad
172 .RS 12n
173 Scales \fBl2arc_headroom\fR by this percentage when L2ARC contents are being
174 successfully compressed before writing. A value of 100 disables this feature.
175 .sp
176 Default value: \fB200\fR%.
177 .RE
178
179 .sp
180 .ne 2
181 .na
182 \fBl2arc_noprefetch\fR (int)
183 .ad
184 .RS 12n
185 Do not write buffers to L2ARC if they were prefetched but not used by
186 applications
187 .sp
188 Use \fB1\fR for yes (default) and \fB0\fR to disable.
189 .RE
190
191 .sp
192 .ne 2
193 .na
194 \fBl2arc_norw\fR (int)
195 .ad
196 .RS 12n
197 No reads during writes
198 .sp
199 Use \fB1\fR for yes and \fB0\fR for no (default).
200 .RE
201
202 .sp
203 .ne 2
204 .na
205 \fBl2arc_write_boost\fR (ulong)
206 .ad
207 .RS 12n
208 Cold L2ARC devices will have \fBl2arc_write_max\fR increased by this amount
209 while they remain cold.
210 .sp
211 Default value: \fB8,388,608\fR.
212 .RE
213
214 .sp
215 .ne 2
216 .na
217 \fBl2arc_write_max\fR (ulong)
218 .ad
219 .RS 12n
220 Max write bytes per interval
221 .sp
222 Default value: \fB8,388,608\fR.
223 .RE
224
225 .sp
226 .ne 2
227 .na
228 \fBmetaslab_aliquot\fR (ulong)
229 .ad
230 .RS 12n
231 Metaslab granularity, in bytes. This is roughly similar to what would be
232 referred to as the "stripe size" in traditional RAID arrays. In normal
233 operation, ZFS will try to write this amount of data to a top-level vdev
234 before moving on to the next one.
235 .sp
236 Default value: \fB524,288\fR.
237 .RE
238
239 .sp
240 .ne 2
241 .na
242 \fBmetaslab_bias_enabled\fR (int)
243 .ad
244 .RS 12n
245 Enable metaslab group biasing based on its vdev's over- or under-utilization
246 relative to the pool.
247 .sp
248 Use \fB1\fR for yes (default) and \fB0\fR for no.
249 .RE
250
251 .sp
252 .ne 2
253 .na
254 \fBmetaslab_force_ganging\fR (ulong)
255 .ad
256 .RS 12n
257 Make some blocks above a certain size be gang blocks. This option is used
258 by the test suite to facilitate testing.
259 .sp
260 Default value: \fB16,777,217\fR.
261 .RE
262
263 .sp
264 .ne 2
265 .na
266 \fBzfs_metaslab_segment_weight_enabled\fR (int)
267 .ad
268 .RS 12n
269 Enable/disable segment-based metaslab selection.
270 .sp
271 Use \fB1\fR for yes (default) and \fB0\fR for no.
272 .RE
273
274 .sp
275 .ne 2
276 .na
277 \fBzfs_metaslab_switch_threshold\fR (int)
278 .ad
279 .RS 12n
280 When using segment-based metaslab selection, continue allocating
281 from the active metaslab until \fBzfs_metaslab_switch_threshold\fR
282 worth of buckets have been exhausted.
283 .sp
284 Default value: \fB2\fR.
285 .RE
286
287 .sp
288 .ne 2
289 .na
290 \fBmetaslab_debug_load\fR (int)
291 .ad
292 .RS 12n
293 Load all metaslabs during pool import.
294 .sp
295 Use \fB1\fR for yes and \fB0\fR for no (default).
296 .RE
297
298 .sp
299 .ne 2
300 .na
301 \fBmetaslab_debug_unload\fR (int)
302 .ad
303 .RS 12n
304 Prevent metaslabs from being unloaded.
305 .sp
306 Use \fB1\fR for yes and \fB0\fR for no (default).
307 .RE
308
309 .sp
310 .ne 2
311 .na
312 \fBmetaslab_fragmentation_factor_enabled\fR (int)
313 .ad
314 .RS 12n
315 Enable use of the fragmentation metric in computing metaslab weights.
316 .sp
317 Use \fB1\fR for yes (default) and \fB0\fR for no.
318 .RE
319
320 .sp
321 .ne 2
322 .na
323 \fBvdev_max_ms_count\fR (int)
324 .ad
325 .RS 12n
326 When a vdev is added target this number of metaslabs per top-level vdev.
327 .sp
328 Default value: \fB200\fR.
329 .RE
330
331 .sp
332 .ne 2
333 .na
334 \fBvdev_min_ms_count\fR (int)
335 .ad
336 .RS 12n
337 Minimum number of metaslabs to create in a top-level vdev.
338 .sp
339 Default value: \fB16\fR.
340 .RE
341
342 .sp
343 .ne 2
344 .na
345 \fBvdev_ms_count_limit\fR (int)
346 .ad
347 .RS 12n
348 Practical upper limit of total metaslabs per top-level vdev.
349 .sp
350 Default value: \fB131,072\fR.
351 .RE
352
353 .sp
354 .ne 2
355 .na
356 \fBmetaslab_preload_enabled\fR (int)
357 .ad
358 .RS 12n
359 Enable metaslab group preloading.
360 .sp
361 Use \fB1\fR for yes (default) and \fB0\fR for no.
362 .RE
363
364 .sp
365 .ne 2
366 .na
367 \fBmetaslab_lba_weighting_enabled\fR (int)
368 .ad
369 .RS 12n
370 Give more weight to metaslabs with lower LBAs, assuming they have
371 greater bandwidth as is typically the case on a modern constant
372 angular velocity disk drive.
373 .sp
374 Use \fB1\fR for yes (default) and \fB0\fR for no.
375 .RE
376
377 .sp
378 .ne 2
379 .na
380 \fBspa_config_path\fR (charp)
381 .ad
382 .RS 12n
383 SPA config file
384 .sp
385 Default value: \fB/etc/zfs/zpool.cache\fR.
386 .RE
387
388 .sp
389 .ne 2
390 .na
391 \fBspa_asize_inflation\fR (int)
392 .ad
393 .RS 12n
394 Multiplication factor used to estimate actual disk consumption from the
395 size of data being written. The default value is a worst case estimate,
396 but lower values may be valid for a given pool depending on its
397 configuration. Pool administrators who understand the factors involved
398 may wish to specify a more realistic inflation factor, particularly if
399 they operate close to quota or capacity limits.
400 .sp
401 Default value: \fB24\fR.
402 .RE
403
404 .sp
405 .ne 2
406 .na
407 \fBspa_load_print_vdev_tree\fR (int)
408 .ad
409 .RS 12n
410 Whether to print the vdev tree in the debugging message buffer during pool import.
411 Use 0 to disable and 1 to enable.
412 .sp
413 Default value: \fB0\fR.
414 .RE
415
416 .sp
417 .ne 2
418 .na
419 \fBspa_load_verify_data\fR (int)
420 .ad
421 .RS 12n
422 Whether to traverse data blocks during an "extreme rewind" (\fB-X\fR)
423 import. Use 0 to disable and 1 to enable.
424
425 An extreme rewind import normally performs a full traversal of all
426 blocks in the pool for verification. If this parameter is set to 0,
427 the traversal skips non-metadata blocks. It can be toggled once the
428 import has started to stop or start the traversal of non-metadata blocks.
429 .sp
430 Default value: \fB1\fR.
431 .RE
432
433 .sp
434 .ne 2
435 .na
436 \fBspa_load_verify_metadata\fR (int)
437 .ad
438 .RS 12n
439 Whether to traverse blocks during an "extreme rewind" (\fB-X\fR)
440 pool import. Use 0 to disable and 1 to enable.
441
442 An extreme rewind import normally performs a full traversal of all
443 blocks in the pool for verification. If this parameter is set to 0,
444 the traversal is not performed. It can be toggled once the import has
445 started to stop or start the traversal.
446 .sp
447 Default value: \fB1\fR.
448 .RE
449
450 .sp
451 .ne 2
452 .na
453 \fBspa_load_verify_maxinflight\fR (int)
454 .ad
455 .RS 12n
456 Maximum concurrent I/Os during the traversal performed during an "extreme
457 rewind" (\fB-X\fR) pool import.
458 .sp
459 Default value: \fB10000\fR.
460 .RE
461
462 .sp
463 .ne 2
464 .na
465 \fBspa_slop_shift\fR (int)
466 .ad
467 .RS 12n
468 Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space
469 in the pool to be consumed. This ensures that we don't run the pool
470 completely out of space, due to unaccounted changes (e.g. to the MOS).
471 It also limits the worst-case time to allocate space. If we have
472 less than this amount of free space, most ZPL operations (e.g. write,
473 create) will return ENOSPC.
474 .sp
475 Default value: \fB5\fR.
476 .RE
477
478 .sp
479 .ne 2
480 .na
481 \fBvdev_removal_max_span\fR (int)
482 .ad
483 .RS 12n
484 During top-level vdev removal, chunks of data are copied from the vdev
485 which may include free space in order to trade bandwidth for IOPS.
486 This parameter determines the maximum span of free space (in bytes)
487 which will be included as "unnecessary" data in a chunk of copied data.
488
489 The default value here was chosen to align with
490 \fBzfs_vdev_read_gap_limit\fR, which is a similar concept when doing
491 regular reads (but there's no reason it has to be the same).
492 .sp
493 Default value: \fB32,768\fR.
494 .RE
495
496 .sp
497 .ne 2
498 .na
499 \fBzfetch_array_rd_sz\fR (ulong)
500 .ad
501 .RS 12n
502 If prefetching is enabled, disable prefetching for reads larger than this size.
503 .sp
504 Default value: \fB1,048,576\fR.
505 .RE
506
507 .sp
508 .ne 2
509 .na
510 \fBzfetch_max_distance\fR (uint)
511 .ad
512 .RS 12n
513 Max bytes to prefetch per stream (default 8MB).
514 .sp
515 Default value: \fB8,388,608\fR.
516 .RE
517
518 .sp
519 .ne 2
520 .na
521 \fBzfetch_max_streams\fR (uint)
522 .ad
523 .RS 12n
524 Max number of streams per zfetch (prefetch streams per file).
525 .sp
526 Default value: \fB8\fR.
527 .RE
528
529 .sp
530 .ne 2
531 .na
532 \fBzfetch_min_sec_reap\fR (uint)
533 .ad
534 .RS 12n
535 Min time before an active prefetch stream can be reclaimed
536 .sp
537 Default value: \fB2\fR.
538 .RE
539
540 .sp
541 .ne 2
542 .na
543 \fBzfs_arc_dnode_limit\fR (ulong)
544 .ad
545 .RS 12n
546 When the number of bytes consumed by dnodes in the ARC exceeds this number of
547 bytes, try to unpin some of it in response to demand for non-metadata. This
548 value acts as a ceiling to the amount of dnode metadata, and defaults to 0 which
549 indicates that a percent which is based on \fBzfs_arc_dnode_limit_percent\fR of
550 the ARC meta buffers that may be used for dnodes.
551
552 See also \fBzfs_arc_meta_prune\fR which serves a similar purpose but is used
553 when the amount of metadata in the ARC exceeds \fBzfs_arc_meta_limit\fR rather
554 than in response to overall demand for non-metadata.
555
556 .sp
557 Default value: \fB0\fR.
558 .RE
559
560 .sp
561 .ne 2
562 .na
563 \fBzfs_arc_dnode_limit_percent\fR (ulong)
564 .ad
565 .RS 12n
566 Percentage that can be consumed by dnodes of ARC meta buffers.
567 .sp
568 See also \fBzfs_arc_dnode_limit\fR which serves a similar purpose but has a
569 higher priority if set to nonzero value.
570 .sp
571 Default value: \fB10\fR%.
572 .RE
573
574 .sp
575 .ne 2
576 .na
577 \fBzfs_arc_dnode_reduce_percent\fR (ulong)
578 .ad
579 .RS 12n
580 Percentage of ARC dnodes to try to scan in response to demand for non-metadata
581 when the number of bytes consumed by dnodes exceeds \fBzfs_arc_dnode_limit\fR.
582
583 .sp
584 Default value: \fB10\fR% of the number of dnodes in the ARC.
585 .RE
586
587 .sp
588 .ne 2
589 .na
590 \fBzfs_arc_average_blocksize\fR (int)
591 .ad
592 .RS 12n
593 The ARC's buffer hash table is sized based on the assumption of an average
594 block size of \fBzfs_arc_average_blocksize\fR (default 8K). This works out
595 to roughly 1MB of hash table per 1GB of physical memory with 8-byte pointers.
596 For configurations with a known larger average block size this value can be
597 increased to reduce the memory footprint.
598
599 .sp
600 Default value: \fB8192\fR.
601 .RE
602
603 .sp
604 .ne 2
605 .na
606 \fBzfs_arc_evict_batch_limit\fR (int)
607 .ad
608 .RS 12n
609 Number ARC headers to evict per sub-list before proceeding to another sub-list.
610 This batch-style operation prevents entire sub-lists from being evicted at once
611 but comes at a cost of additional unlocking and locking.
612 .sp
613 Default value: \fB10\fR.
614 .RE
615
616 .sp
617 .ne 2
618 .na
619 \fBzfs_arc_grow_retry\fR (int)
620 .ad
621 .RS 12n
622 If set to a non zero value, it will replace the arc_grow_retry value with this value.
623 The arc_grow_retry value (default 5) is the number of seconds the ARC will wait before
624 trying to resume growth after a memory pressure event.
625 .sp
626 Default value: \fB0\fR.
627 .RE
628
629 .sp
630 .ne 2
631 .na
632 \fBzfs_arc_lotsfree_percent\fR (int)
633 .ad
634 .RS 12n
635 Throttle I/O when free system memory drops below this percentage of total
636 system memory. Setting this value to 0 will disable the throttle.
637 .sp
638 Default value: \fB10\fR%.
639 .RE
640
641 .sp
642 .ne 2
643 .na
644 \fBzfs_arc_max\fR (ulong)
645 .ad
646 .RS 12n
647 Max arc size of ARC in bytes. If set to 0 then it will consume 1/2 of system
648 RAM. This value must be at least 67108864 (64 megabytes).
649 .sp
650 This value can be changed dynamically with some caveats. It cannot be set back
651 to 0 while running and reducing it below the current ARC size will not cause
652 the ARC to shrink without memory pressure to induce shrinking.
653 .sp
654 Default value: \fB0\fR.
655 .RE
656
657 .sp
658 .ne 2
659 .na
660 \fBzfs_arc_meta_adjust_restarts\fR (ulong)
661 .ad
662 .RS 12n
663 The number of restart passes to make while scanning the ARC attempting
664 the free buffers in order to stay below the \fBzfs_arc_meta_limit\fR.
665 This value should not need to be tuned but is available to facilitate
666 performance analysis.
667 .sp
668 Default value: \fB4096\fR.
669 .RE
670
671 .sp
672 .ne 2
673 .na
674 \fBzfs_arc_meta_limit\fR (ulong)
675 .ad
676 .RS 12n
677 The maximum allowed size in bytes that meta data buffers are allowed to
678 consume in the ARC. When this limit is reached meta data buffers will
679 be reclaimed even if the overall arc_c_max has not been reached. This
680 value defaults to 0 which indicates that a percent which is based on
681 \fBzfs_arc_meta_limit_percent\fR of the ARC may be used for meta data.
682 .sp
683 This value my be changed dynamically except that it cannot be set back to 0
684 for a specific percent of the ARC; it must be set to an explicit value.
685 .sp
686 Default value: \fB0\fR.
687 .RE
688
689 .sp
690 .ne 2
691 .na
692 \fBzfs_arc_meta_limit_percent\fR (ulong)
693 .ad
694 .RS 12n
695 Percentage of ARC buffers that can be used for meta data.
696
697 See also \fBzfs_arc_meta_limit\fR which serves a similar purpose but has a
698 higher priority if set to nonzero value.
699
700 .sp
701 Default value: \fB75\fR%.
702 .RE
703
704 .sp
705 .ne 2
706 .na
707 \fBzfs_arc_meta_min\fR (ulong)
708 .ad
709 .RS 12n
710 The minimum allowed size in bytes that meta data buffers may consume in
711 the ARC. This value defaults to 0 which disables a floor on the amount
712 of the ARC devoted meta data.
713 .sp
714 Default value: \fB0\fR.
715 .RE
716
717 .sp
718 .ne 2
719 .na
720 \fBzfs_arc_meta_prune\fR (int)
721 .ad
722 .RS 12n
723 The number of dentries and inodes to be scanned looking for entries
724 which can be dropped. This may be required when the ARC reaches the
725 \fBzfs_arc_meta_limit\fR because dentries and inodes can pin buffers
726 in the ARC. Increasing this value will cause to dentry and inode caches
727 to be pruned more aggressively. Setting this value to 0 will disable
728 pruning the inode and dentry caches.
729 .sp
730 Default value: \fB10,000\fR.
731 .RE
732
733 .sp
734 .ne 2
735 .na
736 \fBzfs_arc_meta_strategy\fR (int)
737 .ad
738 .RS 12n
739 Define the strategy for ARC meta data buffer eviction (meta reclaim strategy).
740 A value of 0 (META_ONLY) will evict only the ARC meta data buffers.
741 A value of 1 (BALANCED) indicates that additional data buffers may be evicted if
742 that is required to in order to evict the required number of meta data buffers.
743 .sp
744 Default value: \fB1\fR.
745 .RE
746
747 .sp
748 .ne 2
749 .na
750 \fBzfs_arc_min\fR (ulong)
751 .ad
752 .RS 12n
753 Min arc size of ARC in bytes. If set to 0 then arc_c_min will default to
754 consuming the larger of 32M or 1/32 of total system memory.
755 .sp
756 Default value: \fB0\fR.
757 .RE
758
759 .sp
760 .ne 2
761 .na
762 \fBzfs_arc_min_prefetch_ms\fR (int)
763 .ad
764 .RS 12n
765 Minimum time prefetched blocks are locked in the ARC, specified in ms.
766 A value of \fB0\fR will default to 1000 ms.
767 .sp
768 Default value: \fB0\fR.
769 .RE
770
771 .sp
772 .ne 2
773 .na
774 \fBzfs_arc_min_prescient_prefetch_ms\fR (int)
775 .ad
776 .RS 12n
777 Minimum time "prescient prefetched" blocks are locked in the ARC, specified
778 in ms. These blocks are meant to be prefetched fairly aggresively ahead of
779 the code that may use them. A value of \fB0\fR will default to 6000 ms.
780 .sp
781 Default value: \fB0\fR.
782 .RE
783
784 .sp
785 .ne 2
786 .na
787 \fBzfs_max_missing_tvds\fR (int)
788 .ad
789 .RS 12n
790 Number of missing top-level vdevs which will be allowed during
791 pool import (only in read-only mode).
792 .sp
793 Default value: \fB0\fR
794 .RE
795
796 .sp
797 .ne 2
798 .na
799 \fBzfs_multilist_num_sublists\fR (int)
800 .ad
801 .RS 12n
802 To allow more fine-grained locking, each ARC state contains a series
803 of lists for both data and meta data objects. Locking is performed at
804 the level of these "sub-lists". This parameters controls the number of
805 sub-lists per ARC state, and also applies to other uses of the
806 multilist data structure.
807 .sp
808 Default value: \fB4\fR or the number of online CPUs, whichever is greater
809 .RE
810
811 .sp
812 .ne 2
813 .na
814 \fBzfs_arc_overflow_shift\fR (int)
815 .ad
816 .RS 12n
817 The ARC size is considered to be overflowing if it exceeds the current
818 ARC target size (arc_c) by a threshold determined by this parameter.
819 The threshold is calculated as a fraction of arc_c using the formula
820 "arc_c >> \fBzfs_arc_overflow_shift\fR".
821
822 The default value of 8 causes the ARC to be considered to be overflowing
823 if it exceeds the target size by 1/256th (0.3%) of the target size.
824
825 When the ARC is overflowing, new buffer allocations are stalled until
826 the reclaim thread catches up and the overflow condition no longer exists.
827 .sp
828 Default value: \fB8\fR.
829 .RE
830
831 .sp
832 .ne 2
833 .na
834
835 \fBzfs_arc_p_min_shift\fR (int)
836 .ad
837 .RS 12n
838 If set to a non zero value, this will update arc_p_min_shift (default 4)
839 with the new value.
840 arc_p_min_shift is used to shift of arc_c for calculating both min and max
841 max arc_p
842 .sp
843 Default value: \fB0\fR.
844 .RE
845
846 .sp
847 .ne 2
848 .na
849 \fBzfs_arc_p_dampener_disable\fR (int)
850 .ad
851 .RS 12n
852 Disable arc_p adapt dampener
853 .sp
854 Use \fB1\fR for yes (default) and \fB0\fR to disable.
855 .RE
856
857 .sp
858 .ne 2
859 .na
860 \fBzfs_arc_shrink_shift\fR (int)
861 .ad
862 .RS 12n
863 If set to a non zero value, this will update arc_shrink_shift (default 7)
864 with the new value.
865 .sp
866 Default value: \fB0\fR.
867 .RE
868
869 .sp
870 .ne 2
871 .na
872 \fBzfs_arc_pc_percent\fR (uint)
873 .ad
874 .RS 12n
875 Percent of pagecache to reclaim arc to
876
877 This tunable allows ZFS arc to play more nicely with the kernel's LRU
878 pagecache. It can guarantee that the arc size won't collapse under scanning
879 pressure on the pagecache, yet still allows arc to be reclaimed down to
880 zfs_arc_min if necessary. This value is specified as percent of pagecache
881 size (as measured by NR_FILE_PAGES) where that percent may exceed 100. This
882 only operates during memory pressure/reclaim.
883 .sp
884 Default value: \fB0\fR% (disabled).
885 .RE
886
887 .sp
888 .ne 2
889 .na
890 \fBzfs_arc_sys_free\fR (ulong)
891 .ad
892 .RS 12n
893 The target number of bytes the ARC should leave as free memory on the system.
894 Defaults to the larger of 1/64 of physical memory or 512K. Setting this
895 option to a non-zero value will override the default.
896 .sp
897 Default value: \fB0\fR.
898 .RE
899
900 .sp
901 .ne 2
902 .na
903 \fBzfs_autoimport_disable\fR (int)
904 .ad
905 .RS 12n
906 Disable pool import at module load by ignoring the cache file (typically \fB/etc/zfs/zpool.cache\fR).
907 .sp
908 Use \fB1\fR for yes (default) and \fB0\fR for no.
909 .RE
910
911 .sp
912 .ne 2
913 .na
914 \fBzfs_checksums_per_second\fR (int)
915 .ad
916 .RS 12n
917 Rate limit checksum events to this many per second. Note that this should
918 not be set below the zed thresholds (currently 10 checksums over 10 sec)
919 or else zed may not trigger any action.
920 .sp
921 Default value: 20
922 .RE
923
924 .sp
925 .ne 2
926 .na
927 \fBzfs_commit_timeout_pct\fR (int)
928 .ad
929 .RS 12n
930 This controls the amount of time that a ZIL block (lwb) will remain "open"
931 when it isn't "full", and it has a thread waiting for it to be committed to
932 stable storage. The timeout is scaled based on a percentage of the last lwb
933 latency to avoid significantly impacting the latency of each individual
934 transaction record (itx).
935 .sp
936 Default value: \fB5\fR%.
937 .RE
938
939 .sp
940 .ne 2
941 .na
942 \fBzfs_condense_indirect_vdevs_enable\fR (int)
943 .ad
944 .RS 12n
945 Enable condensing indirect vdev mappings. When set to a non-zero value,
946 attempt to condense indirect vdev mappings if the mapping uses more than
947 \fBzfs_condense_min_mapping_bytes\fR bytes of memory and if the obsolete
948 space map object uses more than \fBzfs_condense_max_obsolete_bytes\fR
949 bytes on-disk. The condensing process is an attempt to save memory by
950 removing obsolete mappings.
951 .sp
952 Default value: \fB1\fR.
953 .RE
954
955 .sp
956 .ne 2
957 .na
958 \fBzfs_condense_max_obsolete_bytes\fR (ulong)
959 .ad
960 .RS 12n
961 Only attempt to condense indirect vdev mappings if the on-disk size
962 of the obsolete space map object is greater than this number of bytes
963 (see \fBfBzfs_condense_indirect_vdevs_enable\fR).
964 .sp
965 Default value: \fB1,073,741,824\fR.
966 .RE
967
968 .sp
969 .ne 2
970 .na
971 \fBzfs_condense_min_mapping_bytes\fR (ulong)
972 .ad
973 .RS 12n
974 Minimum size vdev mapping to attempt to condense (see
975 \fBzfs_condense_indirect_vdevs_enable\fR).
976 .sp
977 Default value: \fB131,072\fR.
978 .RE
979
980 .sp
981 .ne 2
982 .na
983 \fBzfs_dbgmsg_enable\fR (int)
984 .ad
985 .RS 12n
986 Internally ZFS keeps a small log to facilitate debugging. By default the log
987 is disabled, to enable it set this option to 1. The contents of the log can
988 be accessed by reading the /proc/spl/kstat/zfs/dbgmsg file. Writing 0 to
989 this proc file clears the log.
990 .sp
991 Default value: \fB0\fR.
992 .RE
993
994 .sp
995 .ne 2
996 .na
997 \fBzfs_dbgmsg_maxsize\fR (int)
998 .ad
999 .RS 12n
1000 The maximum size in bytes of the internal ZFS debug log.
1001 .sp
1002 Default value: \fB4M\fR.
1003 .RE
1004
1005 .sp
1006 .ne 2
1007 .na
1008 \fBzfs_dbuf_state_index\fR (int)
1009 .ad
1010 .RS 12n
1011 This feature is currently unused. It is normally used for controlling what
1012 reporting is available under /proc/spl/kstat/zfs.
1013 .sp
1014 Default value: \fB0\fR.
1015 .RE
1016
1017 .sp
1018 .ne 2
1019 .na
1020 \fBzfs_deadman_enabled\fR (int)
1021 .ad
1022 .RS 12n
1023 When a pool sync operation takes longer than \fBzfs_deadman_synctime_ms\fR
1024 milliseconds, or when an individual I/O takes longer than
1025 \fBzfs_deadman_ziotime_ms\fR milliseconds, then the operation is considered to
1026 be "hung". If \fBzfs_deadman_enabled\fR is set then the deadman behavior is
1027 invoked as described by the \fBzfs_deadman_failmode\fR module option.
1028 By default the deadman is enabled and configured to \fBwait\fR which results
1029 in "hung" I/Os only being logged. The deadman is automatically disabled
1030 when a pool gets suspended.
1031 .sp
1032 Default value: \fB1\fR.
1033 .RE
1034
1035 .sp
1036 .ne 2
1037 .na
1038 \fBzfs_deadman_failmode\fR (charp)
1039 .ad
1040 .RS 12n
1041 Controls the failure behavior when the deadman detects a "hung" I/O. Valid
1042 values are \fBwait\fR, \fBcontinue\fR, and \fBpanic\fR.
1043 .sp
1044 \fBwait\fR - Wait for a "hung" I/O to complete. For each "hung" I/O a
1045 "deadman" event will be posted describing that I/O.
1046 .sp
1047 \fBcontinue\fR - Attempt to recover from a "hung" I/O by re-dispatching it
1048 to the I/O pipeline if possible.
1049 .sp
1050 \fBpanic\fR - Panic the system. This can be used to facilitate an automatic
1051 fail-over to a properly configured fail-over partner.
1052 .sp
1053 Default value: \fBwait\fR.
1054 .RE
1055
1056 .sp
1057 .ne 2
1058 .na
1059 \fBzfs_deadman_checktime_ms\fR (int)
1060 .ad
1061 .RS 12n
1062 Check time in milliseconds. This defines the frequency at which we check
1063 for hung I/O and potentially invoke the \fBzfs_deadman_failmode\fR behavior.
1064 .sp
1065 Default value: \fB60,000\fR.
1066 .RE
1067
1068 .sp
1069 .ne 2
1070 .na
1071 \fBzfs_deadman_synctime_ms\fR (ulong)
1072 .ad
1073 .RS 12n
1074 Interval in milliseconds after which the deadman is triggered and also
1075 the interval after which a pool sync operation is considered to be "hung".
1076 Once this limit is exceeded the deadman will be invoked every
1077 \fBzfs_deadman_checktime_ms\fR milliseconds until the pool sync completes.
1078 .sp
1079 Default value: \fB600,000\fR.
1080 .RE
1081
1082 .sp
1083 .ne 2
1084 .na
1085 \fBzfs_deadman_ziotime_ms\fR (ulong)
1086 .ad
1087 .RS 12n
1088 Interval in milliseconds after which the deadman is triggered and an
1089 individual I/O operation is considered to be "hung". As long as the I/O
1090 remains "hung" the deadman will be invoked every \fBzfs_deadman_checktime_ms\fR
1091 milliseconds until the I/O completes.
1092 .sp
1093 Default value: \fB300,000\fR.
1094 .RE
1095
1096 .sp
1097 .ne 2
1098 .na
1099 \fBzfs_dedup_prefetch\fR (int)
1100 .ad
1101 .RS 12n
1102 Enable prefetching dedup-ed blks
1103 .sp
1104 Use \fB1\fR for yes and \fB0\fR to disable (default).
1105 .RE
1106
1107 .sp
1108 .ne 2
1109 .na
1110 \fBzfs_delay_min_dirty_percent\fR (int)
1111 .ad
1112 .RS 12n
1113 Start to delay each transaction once there is this amount of dirty data,
1114 expressed as a percentage of \fBzfs_dirty_data_max\fR.
1115 This value should be >= zfs_vdev_async_write_active_max_dirty_percent.
1116 See the section "ZFS TRANSACTION DELAY".
1117 .sp
1118 Default value: \fB60\fR%.
1119 .RE
1120
1121 .sp
1122 .ne 2
1123 .na
1124 \fBzfs_delay_scale\fR (int)
1125 .ad
1126 .RS 12n
1127 This controls how quickly the transaction delay approaches infinity.
1128 Larger values cause longer delays for a given amount of dirty data.
1129 .sp
1130 For the smoothest delay, this value should be about 1 billion divided
1131 by the maximum number of operations per second. This will smoothly
1132 handle between 10x and 1/10th this number.
1133 .sp
1134 See the section "ZFS TRANSACTION DELAY".
1135 .sp
1136 Note: \fBzfs_delay_scale\fR * \fBzfs_dirty_data_max\fR must be < 2^64.
1137 .sp
1138 Default value: \fB500,000\fR.
1139 .RE
1140
1141 .sp
1142 .ne 2
1143 .na
1144 \fBzfs_slow_io_events_per_second\fR (int)
1145 .ad
1146 .RS 12n
1147 Rate limit delay zevents (which report slow I/Os) to this many per second.
1148 .sp
1149 Default value: 20
1150 .RE
1151
1152 .sp
1153 .ne 2
1154 .na
1155 \fBzfs_delete_blocks\fR (ulong)
1156 .ad
1157 .RS 12n
1158 This is the used to define a large file for the purposes of delete. Files
1159 containing more than \fBzfs_delete_blocks\fR will be deleted asynchronously
1160 while smaller files are deleted synchronously. Decreasing this value will
1161 reduce the time spent in an unlink(2) system call at the expense of a longer
1162 delay before the freed space is available.
1163 .sp
1164 Default value: \fB20,480\fR.
1165 .RE
1166
1167 .sp
1168 .ne 2
1169 .na
1170 \fBzfs_dirty_data_max\fR (int)
1171 .ad
1172 .RS 12n
1173 Determines the dirty space limit in bytes. Once this limit is exceeded, new
1174 writes are halted until space frees up. This parameter takes precedence
1175 over \fBzfs_dirty_data_max_percent\fR.
1176 See the section "ZFS TRANSACTION DELAY".
1177 .sp
1178 Default value: \fB10\fR% of physical RAM, capped at \fBzfs_dirty_data_max_max\fR.
1179 .RE
1180
1181 .sp
1182 .ne 2
1183 .na
1184 \fBzfs_dirty_data_max_max\fR (int)
1185 .ad
1186 .RS 12n
1187 Maximum allowable value of \fBzfs_dirty_data_max\fR, expressed in bytes.
1188 This limit is only enforced at module load time, and will be ignored if
1189 \fBzfs_dirty_data_max\fR is later changed. This parameter takes
1190 precedence over \fBzfs_dirty_data_max_max_percent\fR. See the section
1191 "ZFS TRANSACTION DELAY".
1192 .sp
1193 Default value: \fB25\fR% of physical RAM.
1194 .RE
1195
1196 .sp
1197 .ne 2
1198 .na
1199 \fBzfs_dirty_data_max_max_percent\fR (int)
1200 .ad
1201 .RS 12n
1202 Maximum allowable value of \fBzfs_dirty_data_max\fR, expressed as a
1203 percentage of physical RAM. This limit is only enforced at module load
1204 time, and will be ignored if \fBzfs_dirty_data_max\fR is later changed.
1205 The parameter \fBzfs_dirty_data_max_max\fR takes precedence over this
1206 one. See the section "ZFS TRANSACTION DELAY".
1207 .sp
1208 Default value: \fB25\fR%.
1209 .RE
1210
1211 .sp
1212 .ne 2
1213 .na
1214 \fBzfs_dirty_data_max_percent\fR (int)
1215 .ad
1216 .RS 12n
1217 Determines the dirty space limit, expressed as a percentage of all
1218 memory. Once this limit is exceeded, new writes are halted until space frees
1219 up. The parameter \fBzfs_dirty_data_max\fR takes precedence over this
1220 one. See the section "ZFS TRANSACTION DELAY".
1221 .sp
1222 Default value: \fB10\fR%, subject to \fBzfs_dirty_data_max_max\fR.
1223 .RE
1224
1225 .sp
1226 .ne 2
1227 .na
1228 \fBzfs_dirty_data_sync_percent\fR (int)
1229 .ad
1230 .RS 12n
1231 Start syncing out a transaction group if there's at least this much dirty data
1232 as a percentage of \fBzfs_dirty_data_max\fR. This should be less than
1233 \fBzfs_vdev_async_write_active_min_dirty_percent\fR.
1234 .sp
1235 Default value: \fB20\fR% of \fBzfs_dirty_data_max\fR.
1236 .RE
1237
1238 .sp
1239 .ne 2
1240 .na
1241 \fBzfs_fletcher_4_impl\fR (string)
1242 .ad
1243 .RS 12n
1244 Select a fletcher 4 implementation.
1245 .sp
1246 Supported selectors are: \fBfastest\fR, \fBscalar\fR, \fBsse2\fR, \fBssse3\fR,
1247 \fBavx2\fR, \fBavx512f\fR, and \fBaarch64_neon\fR.
1248 All of the selectors except \fBfastest\fR and \fBscalar\fR require instruction
1249 set extensions to be available and will only appear if ZFS detects that they are
1250 present at runtime. If multiple implementations of fletcher 4 are available,
1251 the \fBfastest\fR will be chosen using a micro benchmark. Selecting \fBscalar\fR
1252 results in the original, CPU based calculation, being used. Selecting any option
1253 other than \fBfastest\fR and \fBscalar\fR results in vector instructions from
1254 the respective CPU instruction set being used.
1255 .sp
1256 Default value: \fBfastest\fR.
1257 .RE
1258
1259 .sp
1260 .ne 2
1261 .na
1262 \fBzfs_free_bpobj_enabled\fR (int)
1263 .ad
1264 .RS 12n
1265 Enable/disable the processing of the free_bpobj object.
1266 .sp
1267 Default value: \fB1\fR.
1268 .RE
1269
1270 .sp
1271 .ne 2
1272 .na
1273 \fBzfs_async_block_max_blocks\fR (ulong)
1274 .ad
1275 .RS 12n
1276 Maximum number of blocks freed in a single txg.
1277 .sp
1278 Default value: \fB100,000\fR.
1279 .RE
1280
1281 .sp
1282 .ne 2
1283 .na
1284 \fBzfs_override_estimate_recordsize\fR (ulong)
1285 .ad
1286 .RS 12n
1287 Record size calculation override for zfs send estimates.
1288 .sp
1289 Default value: \fB0\fR.
1290 .RE
1291
1292 .sp
1293 .ne 2
1294 .na
1295 \fBzfs_vdev_async_read_max_active\fR (int)
1296 .ad
1297 .RS 12n
1298 Maximum asynchronous read I/Os active to each device.
1299 See the section "ZFS I/O SCHEDULER".
1300 .sp
1301 Default value: \fB3\fR.
1302 .RE
1303
1304 .sp
1305 .ne 2
1306 .na
1307 \fBzfs_vdev_async_read_min_active\fR (int)
1308 .ad
1309 .RS 12n
1310 Minimum asynchronous read I/Os active to each device.
1311 See the section "ZFS I/O SCHEDULER".
1312 .sp
1313 Default value: \fB1\fR.
1314 .RE
1315
1316 .sp
1317 .ne 2
1318 .na
1319 \fBzfs_vdev_async_write_active_max_dirty_percent\fR (int)
1320 .ad
1321 .RS 12n
1322 When the pool has more than
1323 \fBzfs_vdev_async_write_active_max_dirty_percent\fR dirty data, use
1324 \fBzfs_vdev_async_write_max_active\fR to limit active async writes. If
1325 the dirty data is between min and max, the active I/O limit is linearly
1326 interpolated. See the section "ZFS I/O SCHEDULER".
1327 .sp
1328 Default value: \fB60\fR%.
1329 .RE
1330
1331 .sp
1332 .ne 2
1333 .na
1334 \fBzfs_vdev_async_write_active_min_dirty_percent\fR (int)
1335 .ad
1336 .RS 12n
1337 When the pool has less than
1338 \fBzfs_vdev_async_write_active_min_dirty_percent\fR dirty data, use
1339 \fBzfs_vdev_async_write_min_active\fR to limit active async writes. If
1340 the dirty data is between min and max, the active I/O limit is linearly
1341 interpolated. See the section "ZFS I/O SCHEDULER".
1342 .sp
1343 Default value: \fB30\fR%.
1344 .RE
1345
1346 .sp
1347 .ne 2
1348 .na
1349 \fBzfs_vdev_async_write_max_active\fR (int)
1350 .ad
1351 .RS 12n
1352 Maximum asynchronous write I/Os active to each device.
1353 See the section "ZFS I/O SCHEDULER".
1354 .sp
1355 Default value: \fB10\fR.
1356 .RE
1357
1358 .sp
1359 .ne 2
1360 .na
1361 \fBzfs_vdev_async_write_min_active\fR (int)
1362 .ad
1363 .RS 12n
1364 Minimum asynchronous write I/Os active to each device.
1365 See the section "ZFS I/O SCHEDULER".
1366 .sp
1367 Lower values are associated with better latency on rotational media but poorer
1368 resilver performance. The default value of 2 was chosen as a compromise. A
1369 value of 3 has been shown to improve resilver performance further at a cost of
1370 further increasing latency.
1371 .sp
1372 Default value: \fB2\fR.
1373 .RE
1374
1375 .sp
1376 .ne 2
1377 .na
1378 \fBzfs_vdev_max_active\fR (int)
1379 .ad
1380 .RS 12n
1381 The maximum number of I/Os active to each device. Ideally, this will be >=
1382 the sum of each queue's max_active. It must be at least the sum of each
1383 queue's min_active. See the section "ZFS I/O SCHEDULER".
1384 .sp
1385 Default value: \fB1,000\fR.
1386 .RE
1387
1388 .sp
1389 .ne 2
1390 .na
1391 \fBzfs_vdev_scrub_max_active\fR (int)
1392 .ad
1393 .RS 12n
1394 Maximum scrub I/Os active to each device.
1395 See the section "ZFS I/O SCHEDULER".
1396 .sp
1397 Default value: \fB2\fR.
1398 .RE
1399
1400 .sp
1401 .ne 2
1402 .na
1403 \fBzfs_vdev_scrub_min_active\fR (int)
1404 .ad
1405 .RS 12n
1406 Minimum scrub I/Os active to each device.
1407 See the section "ZFS I/O SCHEDULER".
1408 .sp
1409 Default value: \fB1\fR.
1410 .RE
1411
1412 .sp
1413 .ne 2
1414 .na
1415 \fBzfs_vdev_sync_read_max_active\fR (int)
1416 .ad
1417 .RS 12n
1418 Maximum synchronous read I/Os active to each device.
1419 See the section "ZFS I/O SCHEDULER".
1420 .sp
1421 Default value: \fB10\fR.
1422 .RE
1423
1424 .sp
1425 .ne 2
1426 .na
1427 \fBzfs_vdev_sync_read_min_active\fR (int)
1428 .ad
1429 .RS 12n
1430 Minimum synchronous read I/Os active to each device.
1431 See the section "ZFS I/O SCHEDULER".
1432 .sp
1433 Default value: \fB10\fR.
1434 .RE
1435
1436 .sp
1437 .ne 2
1438 .na
1439 \fBzfs_vdev_sync_write_max_active\fR (int)
1440 .ad
1441 .RS 12n
1442 Maximum synchronous write I/Os active to each device.
1443 See the section "ZFS I/O SCHEDULER".
1444 .sp
1445 Default value: \fB10\fR.
1446 .RE
1447
1448 .sp
1449 .ne 2
1450 .na
1451 \fBzfs_vdev_sync_write_min_active\fR (int)
1452 .ad
1453 .RS 12n
1454 Minimum synchronous write I/Os active to each device.
1455 See the section "ZFS I/O SCHEDULER".
1456 .sp
1457 Default value: \fB10\fR.
1458 .RE
1459
1460 .sp
1461 .ne 2
1462 .na
1463 \fBzfs_vdev_queue_depth_pct\fR (int)
1464 .ad
1465 .RS 12n
1466 Maximum number of queued allocations per top-level vdev expressed as
1467 a percentage of \fBzfs_vdev_async_write_max_active\fR which allows the
1468 system to detect devices that are more capable of handling allocations
1469 and to allocate more blocks to those devices. It allows for dynamic
1470 allocation distribution when devices are imbalanced as fuller devices
1471 will tend to be slower than empty devices.
1472
1473 See also \fBzio_dva_throttle_enabled\fR.
1474 .sp
1475 Default value: \fB1000\fR%.
1476 .RE
1477
1478 .sp
1479 .ne 2
1480 .na
1481 \fBzfs_expire_snapshot\fR (int)
1482 .ad
1483 .RS 12n
1484 Seconds to expire .zfs/snapshot
1485 .sp
1486 Default value: \fB300\fR.
1487 .RE
1488
1489 .sp
1490 .ne 2
1491 .na
1492 \fBzfs_admin_snapshot\fR (int)
1493 .ad
1494 .RS 12n
1495 Allow the creation, removal, or renaming of entries in the .zfs/snapshot
1496 directory to cause the creation, destruction, or renaming of snapshots.
1497 When enabled this functionality works both locally and over NFS exports
1498 which have the 'no_root_squash' option set. This functionality is disabled
1499 by default.
1500 .sp
1501 Use \fB1\fR for yes and \fB0\fR for no (default).
1502 .RE
1503
1504 .sp
1505 .ne 2
1506 .na
1507 \fBzfs_flags\fR (int)
1508 .ad
1509 .RS 12n
1510 Set additional debugging flags. The following flags may be bitwise-or'd
1511 together.
1512 .sp
1513 .TS
1514 box;
1515 rB lB
1516 lB lB
1517 r l.
1518 Value Symbolic Name
1519 Description
1520 _
1521 1 ZFS_DEBUG_DPRINTF
1522 Enable dprintf entries in the debug log.
1523 _
1524 2 ZFS_DEBUG_DBUF_VERIFY *
1525 Enable extra dbuf verifications.
1526 _
1527 4 ZFS_DEBUG_DNODE_VERIFY *
1528 Enable extra dnode verifications.
1529 _
1530 8 ZFS_DEBUG_SNAPNAMES
1531 Enable snapshot name verification.
1532 _
1533 16 ZFS_DEBUG_MODIFY
1534 Check for illegally modified ARC buffers.
1535 _
1536 64 ZFS_DEBUG_ZIO_FREE
1537 Enable verification of block frees.
1538 _
1539 128 ZFS_DEBUG_HISTOGRAM_VERIFY
1540 Enable extra spacemap histogram verifications.
1541 _
1542 256 ZFS_DEBUG_METASLAB_VERIFY
1543 Verify space accounting on disk matches in-core range_trees.
1544 _
1545 512 ZFS_DEBUG_SET_ERROR
1546 Enable SET_ERROR and dprintf entries in the debug log.
1547 .TE
1548 .sp
1549 * Requires debug build.
1550 .sp
1551 Default value: \fB0\fR.
1552 .RE
1553
1554 .sp
1555 .ne 2
1556 .na
1557 \fBzfs_free_leak_on_eio\fR (int)
1558 .ad
1559 .RS 12n
1560 If destroy encounters an EIO while reading metadata (e.g. indirect
1561 blocks), space referenced by the missing metadata can not be freed.
1562 Normally this causes the background destroy to become "stalled", as
1563 it is unable to make forward progress. While in this stalled state,
1564 all remaining space to free from the error-encountering filesystem is
1565 "temporarily leaked". Set this flag to cause it to ignore the EIO,
1566 permanently leak the space from indirect blocks that can not be read,
1567 and continue to free everything else that it can.
1568
1569 The default, "stalling" behavior is useful if the storage partially
1570 fails (i.e. some but not all i/os fail), and then later recovers. In
1571 this case, we will be able to continue pool operations while it is
1572 partially failed, and when it recovers, we can continue to free the
1573 space, with no leaks. However, note that this case is actually
1574 fairly rare.
1575
1576 Typically pools either (a) fail completely (but perhaps temporarily,
1577 e.g. a top-level vdev going offline), or (b) have localized,
1578 permanent errors (e.g. disk returns the wrong data due to bit flip or
1579 firmware bug). In case (a), this setting does not matter because the
1580 pool will be suspended and the sync thread will not be able to make
1581 forward progress regardless. In case (b), because the error is
1582 permanent, the best we can do is leak the minimum amount of space,
1583 which is what setting this flag will do. Therefore, it is reasonable
1584 for this flag to normally be set, but we chose the more conservative
1585 approach of not setting it, so that there is no possibility of
1586 leaking space in the "partial temporary" failure case.
1587 .sp
1588 Default value: \fB0\fR.
1589 .RE
1590
1591 .sp
1592 .ne 2
1593 .na
1594 \fBzfs_free_min_time_ms\fR (int)
1595 .ad
1596 .RS 12n
1597 During a \fBzfs destroy\fR operation using \fBfeature@async_destroy\fR a minimum
1598 of this much time will be spent working on freeing blocks per txg.
1599 .sp
1600 Default value: \fB1,000\fR.
1601 .RE
1602
1603 .sp
1604 .ne 2
1605 .na
1606 \fBzfs_immediate_write_sz\fR (long)
1607 .ad
1608 .RS 12n
1609 Largest data block to write to zil. Larger blocks will be treated as if the
1610 dataset being written to had the property setting \fBlogbias=throughput\fR.
1611 .sp
1612 Default value: \fB32,768\fR.
1613 .RE
1614
1615 .sp
1616 .ne 2
1617 .na
1618 \fBzfs_lua_max_instrlimit\fR (ulong)
1619 .ad
1620 .RS 12n
1621 The maximum execution time limit that can be set for a ZFS channel program,
1622 specified as a number of Lua instructions.
1623 .sp
1624 Default value: \fB100,000,000\fR.
1625 .RE
1626
1627 .sp
1628 .ne 2
1629 .na
1630 \fBzfs_lua_max_memlimit\fR (ulong)
1631 .ad
1632 .RS 12n
1633 The maximum memory limit that can be set for a ZFS channel program, specified
1634 in bytes.
1635 .sp
1636 Default value: \fB104,857,600\fR.
1637 .RE
1638
1639 .sp
1640 .ne 2
1641 .na
1642 \fBzfs_max_dataset_nesting\fR (int)
1643 .ad
1644 .RS 12n
1645 The maximum depth of nested datasets. This value can be tuned temporarily to
1646 fix existing datasets that exceed the predefined limit.
1647 .sp
1648 Default value: \fB50\fR.
1649 .RE
1650
1651 .sp
1652 .ne 2
1653 .na
1654 \fBzfs_max_recordsize\fR (int)
1655 .ad
1656 .RS 12n
1657 We currently support block sizes from 512 bytes to 16MB. The benefits of
1658 larger blocks, and thus larger I/O, need to be weighed against the cost of
1659 COWing a giant block to modify one byte. Additionally, very large blocks
1660 can have an impact on i/o latency, and also potentially on the memory
1661 allocator. Therefore, we do not allow the recordsize to be set larger than
1662 zfs_max_recordsize (default 1MB). Larger blocks can be created by changing
1663 this tunable, and pools with larger blocks can always be imported and used,
1664 regardless of this setting.
1665 .sp
1666 Default value: \fB1,048,576\fR.
1667 .RE
1668
1669 .sp
1670 .ne 2
1671 .na
1672 \fBzfs_metaslab_fragmentation_threshold\fR (int)
1673 .ad
1674 .RS 12n
1675 Allow metaslabs to keep their active state as long as their fragmentation
1676 percentage is less than or equal to this value. An active metaslab that
1677 exceeds this threshold will no longer keep its active status allowing
1678 better metaslabs to be selected.
1679 .sp
1680 Default value: \fB70\fR.
1681 .RE
1682
1683 .sp
1684 .ne 2
1685 .na
1686 \fBzfs_mg_fragmentation_threshold\fR (int)
1687 .ad
1688 .RS 12n
1689 Metaslab groups are considered eligible for allocations if their
1690 fragmentation metric (measured as a percentage) is less than or equal to
1691 this value. If a metaslab group exceeds this threshold then it will be
1692 skipped unless all metaslab groups within the metaslab class have also
1693 crossed this threshold.
1694 .sp
1695 Default value: \fB85\fR.
1696 .RE
1697
1698 .sp
1699 .ne 2
1700 .na
1701 \fBzfs_mg_noalloc_threshold\fR (int)
1702 .ad
1703 .RS 12n
1704 Defines a threshold at which metaslab groups should be eligible for
1705 allocations. The value is expressed as a percentage of free space
1706 beyond which a metaslab group is always eligible for allocations.
1707 If a metaslab group's free space is less than or equal to the
1708 threshold, the allocator will avoid allocating to that group
1709 unless all groups in the pool have reached the threshold. Once all
1710 groups have reached the threshold, all groups are allowed to accept
1711 allocations. The default value of 0 disables the feature and causes
1712 all metaslab groups to be eligible for allocations.
1713
1714 This parameter allows one to deal with pools having heavily imbalanced
1715 vdevs such as would be the case when a new vdev has been added.
1716 Setting the threshold to a non-zero percentage will stop allocations
1717 from being made to vdevs that aren't filled to the specified percentage
1718 and allow lesser filled vdevs to acquire more allocations than they
1719 otherwise would under the old \fBzfs_mg_alloc_failures\fR facility.
1720 .sp
1721 Default value: \fB0\fR.
1722 .RE
1723
1724 .sp
1725 .ne 2
1726 .na
1727 \fBzfs_ddt_data_is_special\fR (int)
1728 .ad
1729 .RS 12n
1730 If enabled, ZFS will place DDT data into the special allocation class.
1731 .sp
1732 Default value: \fB1\fR.
1733 .RE
1734
1735 .sp
1736 .ne 2
1737 .na
1738 \fBzfs_user_indirect_is_special\fR (int)
1739 .ad
1740 .RS 12n
1741 If enabled, ZFS will place user data (both file and zvol) indirect blocks
1742 into the special allocation class.
1743 .sp
1744 Default value: \fB1\fR.
1745 .RE
1746
1747 .sp
1748 .ne 2
1749 .na
1750 \fBzfs_multihost_history\fR (int)
1751 .ad
1752 .RS 12n
1753 Historical statistics for the last N multihost updates will be available in
1754 \fB/proc/spl/kstat/zfs/<pool>/multihost\fR
1755 .sp
1756 Default value: \fB0\fR.
1757 .RE
1758
1759 .sp
1760 .ne 2
1761 .na
1762 \fBzfs_multihost_interval\fR (ulong)
1763 .ad
1764 .RS 12n
1765 Used to control the frequency of multihost writes which are performed when the
1766 \fBmultihost\fR pool property is on. This is one factor used to determine
1767 the length of the activity check during import.
1768 .sp
1769 The multihost write period is \fBzfs_multihost_interval / leaf-vdevs\fR milliseconds.
1770 This means that on average a multihost write will be issued for each leaf vdev every
1771 \fBzfs_multihost_interval\fR milliseconds. In practice, the observed period can
1772 vary with the I/O load and this observed value is the delay which is stored in
1773 the uberblock.
1774 .sp
1775 On import the activity check waits a minimum amount of time determined by
1776 \fBzfs_multihost_interval * zfs_multihost_import_intervals\fR. The activity
1777 check time may be further extended if the value of mmp delay found in the best
1778 uberblock indicates actual multihost updates happened at longer intervals than
1779 \fBzfs_multihost_interval\fR. A minimum value of \fB100ms\fR is enforced.
1780 .sp
1781 Default value: \fB1000\fR.
1782 .RE
1783
1784 .sp
1785 .ne 2
1786 .na
1787 \fBzfs_multihost_import_intervals\fR (uint)
1788 .ad
1789 .RS 12n
1790 Used to control the duration of the activity test on import. Smaller values of
1791 \fBzfs_multihost_import_intervals\fR will reduce the import time but increase
1792 the risk of failing to detect an active pool. The total activity check time is
1793 never allowed to drop below one second. A value of 0 is ignored and treated as
1794 if it was set to 1
1795 .sp
1796 Default value: \fB10\fR.
1797 .RE
1798
1799 .sp
1800 .ne 2
1801 .na
1802 \fBzfs_multihost_fail_intervals\fR (uint)
1803 .ad
1804 .RS 12n
1805 Controls the behavior of the pool when multihost write failures are detected.
1806 .sp
1807 When \fBzfs_multihost_fail_intervals = 0\fR then multihost write failures are ignored.
1808 The failures will still be reported to the ZED which depending on its
1809 configuration may take action such as suspending the pool or offlining a device.
1810 .sp
1811 When \fBzfs_multihost_fail_intervals > 0\fR then sequential multihost write failures
1812 will cause the pool to be suspended. This occurs when
1813 \fBzfs_multihost_fail_intervals * zfs_multihost_interval\fR milliseconds have
1814 passed since the last successful multihost write. This guarantees the activity test
1815 will see multihost writes if the pool is imported.
1816 .sp
1817 Default value: \fB5\fR.
1818 .RE
1819
1820 .sp
1821 .ne 2
1822 .na
1823 \fBzfs_no_scrub_io\fR (int)
1824 .ad
1825 .RS 12n
1826 Set for no scrub I/O. This results in scrubs not actually scrubbing data and
1827 simply doing a metadata crawl of the pool instead.
1828 .sp
1829 Use \fB1\fR for yes and \fB0\fR for no (default).
1830 .RE
1831
1832 .sp
1833 .ne 2
1834 .na
1835 \fBzfs_no_scrub_prefetch\fR (int)
1836 .ad
1837 .RS 12n
1838 Set to disable block prefetching for scrubs.
1839 .sp
1840 Use \fB1\fR for yes and \fB0\fR for no (default).
1841 .RE
1842
1843 .sp
1844 .ne 2
1845 .na
1846 \fBzfs_nocacheflush\fR (int)
1847 .ad
1848 .RS 12n
1849 Disable cache flush operations on disks when writing. Beware, this may cause
1850 corruption if disks re-order writes.
1851 .sp
1852 Use \fB1\fR for yes and \fB0\fR for no (default).
1853 .RE
1854
1855 .sp
1856 .ne 2
1857 .na
1858 \fBzfs_nopwrite_enabled\fR (int)
1859 .ad
1860 .RS 12n
1861 Enable NOP writes
1862 .sp
1863 Use \fB1\fR for yes (default) and \fB0\fR to disable.
1864 .RE
1865
1866 .sp
1867 .ne 2
1868 .na
1869 \fBzfs_dmu_offset_next_sync\fR (int)
1870 .ad
1871 .RS 12n
1872 Enable forcing txg sync to find holes. When enabled forces ZFS to act
1873 like prior versions when SEEK_HOLE or SEEK_DATA flags are used, which
1874 when a dnode is dirty causes txg's to be synced so that this data can be
1875 found.
1876 .sp
1877 Use \fB1\fR for yes and \fB0\fR to disable (default).
1878 .RE
1879
1880 .sp
1881 .ne 2
1882 .na
1883 \fBzfs_pd_bytes_max\fR (int)
1884 .ad
1885 .RS 12n
1886 The number of bytes which should be prefetched during a pool traversal
1887 (eg: \fBzfs send\fR or other data crawling operations)
1888 .sp
1889 Default value: \fB52,428,800\fR.
1890 .RE
1891
1892 .sp
1893 .ne 2
1894 .na
1895 \fBzfs_per_txg_dirty_frees_percent \fR (ulong)
1896 .ad
1897 .RS 12n
1898 Tunable to control percentage of dirtied blocks from frees in one TXG.
1899 After this threshold is crossed, additional dirty blocks from frees
1900 wait until the next TXG.
1901 A value of zero will disable this throttle.
1902 .sp
1903 Default value: \fB30\fR and \fB0\fR to disable.
1904 .RE
1905
1906
1907
1908 .sp
1909 .ne 2
1910 .na
1911 \fBzfs_prefetch_disable\fR (int)
1912 .ad
1913 .RS 12n
1914 This tunable disables predictive prefetch. Note that it leaves "prescient"
1915 prefetch (e.g. prefetch for zfs send) intact. Unlike predictive prefetch,
1916 prescient prefetch never issues i/os that end up not being needed, so it
1917 can't hurt performance.
1918 .sp
1919 Use \fB1\fR for yes and \fB0\fR for no (default).
1920 .RE
1921
1922 .sp
1923 .ne 2
1924 .na
1925 \fBzfs_read_chunk_size\fR (long)
1926 .ad
1927 .RS 12n
1928 Bytes to read per chunk
1929 .sp
1930 Default value: \fB1,048,576\fR.
1931 .RE
1932
1933 .sp
1934 .ne 2
1935 .na
1936 \fBzfs_read_history\fR (int)
1937 .ad
1938 .RS 12n
1939 Historical statistics for the last N reads will be available in
1940 \fB/proc/spl/kstat/zfs/<pool>/reads\fR
1941 .sp
1942 Default value: \fB0\fR (no data is kept).
1943 .RE
1944
1945 .sp
1946 .ne 2
1947 .na
1948 \fBzfs_read_history_hits\fR (int)
1949 .ad
1950 .RS 12n
1951 Include cache hits in read history
1952 .sp
1953 Use \fB1\fR for yes and \fB0\fR for no (default).
1954 .RE
1955
1956 .sp
1957 .ne 2
1958 .na
1959 \fBzfs_reconstruct_indirect_combinations_max\fR (int)
1960 .ad
1961 .RS 12na
1962 If an indirect split block contains more than this many possible unique
1963 combinations when being reconstructed, consider it too computationally
1964 expensive to check them all. Instead, try at most
1965 \fBzfs_reconstruct_indirect_combinations_max\fR randomly-selected
1966 combinations each time the block is accessed. This allows all segment
1967 copies to participate fairly in the reconstruction when all combinations
1968 cannot be checked and prevents repeated use of one bad copy.
1969 .sp
1970 Default value: \fB256\fR.
1971 .RE
1972
1973 .sp
1974 .ne 2
1975 .na
1976 \fBzfs_recover\fR (int)
1977 .ad
1978 .RS 12n
1979 Set to attempt to recover from fatal errors. This should only be used as a
1980 last resort, as it typically results in leaked space, or worse.
1981 .sp
1982 Use \fB1\fR for yes and \fB0\fR for no (default).
1983 .RE
1984
1985 .sp
1986 .ne 2
1987 .na
1988 \fBzfs_removal_ignore_errors\fR (int)
1989 .ad
1990 .RS 12n
1991 .sp
1992 Ignore hard IO errors during device removal. When set, if a device encounters
1993 a hard IO error during the removal process the removal will not be cancelled.
1994 This can result in a normally recoverable block becoming permanently damaged
1995 and is not recommended. This should only be used as a last resort when the
1996 pool cannot be returned to a healthy state prior to removing the device.
1997 .sp
1998 Default value: \fB0\fR.
1999 .RE
2000
2001 .sp
2002 .ne 2
2003 .na
2004 \fBzfs_resilver_min_time_ms\fR (int)
2005 .ad
2006 .RS 12n
2007 Resilvers are processed by the sync thread. While resilvering it will spend
2008 at least this much time working on a resilver between txg flushes.
2009 .sp
2010 Default value: \fB3,000\fR.
2011 .RE
2012
2013 .sp
2014 .ne 2
2015 .na
2016 \fBzfs_scan_ignore_errors\fR (int)
2017 .ad
2018 .RS 12n
2019 If set to a nonzero value, remove the DTL (dirty time list) upon
2020 completion of a pool scan (scrub) even if there were unrepairable
2021 errors. It is intended to be used during pool repair or recovery to
2022 stop resilvering when the pool is next imported.
2023 .sp
2024 Default value: \fB0\fR.
2025 .RE
2026
2027 .sp
2028 .ne 2
2029 .na
2030 \fBzfs_scrub_min_time_ms\fR (int)
2031 .ad
2032 .RS 12n
2033 Scrubs are processed by the sync thread. While scrubbing it will spend
2034 at least this much time working on a scrub between txg flushes.
2035 .sp
2036 Default value: \fB1,000\fR.
2037 .RE
2038
2039 .sp
2040 .ne 2
2041 .na
2042 \fBzfs_scan_checkpoint_intval\fR (int)
2043 .ad
2044 .RS 12n
2045 To preserve progress across reboots the sequential scan algorithm periodically
2046 needs to stop metadata scanning and issue all the verifications I/Os to disk.
2047 The frequency of this flushing is determined by the
2048 \fBzfs_scan_checkpoint_intval\fR tunable.
2049 .sp
2050 Default value: \fB7200\fR seconds (every 2 hours).
2051 .RE
2052
2053 .sp
2054 .ne 2
2055 .na
2056 \fBzfs_scan_fill_weight\fR (int)
2057 .ad
2058 .RS 12n
2059 This tunable affects how scrub and resilver I/O segments are ordered. A higher
2060 number indicates that we care more about how filled in a segment is, while a
2061 lower number indicates we care more about the size of the extent without
2062 considering the gaps within a segment. This value is only tunable upon module
2063 insertion. Changing the value afterwards will have no affect on scrub or
2064 resilver performance.
2065 .sp
2066 Default value: \fB3\fR.
2067 .RE
2068
2069 .sp
2070 .ne 2
2071 .na
2072 \fBzfs_scan_issue_strategy\fR (int)
2073 .ad
2074 .RS 12n
2075 Determines the order that data will be verified while scrubbing or resilvering.
2076 If set to \fB1\fR, data will be verified as sequentially as possible, given the
2077 amount of memory reserved for scrubbing (see \fBzfs_scan_mem_lim_fact\fR). This
2078 may improve scrub performance if the pool's data is very fragmented. If set to
2079 \fB2\fR, the largest mostly-contiguous chunk of found data will be verified
2080 first. By deferring scrubbing of small segments, we may later find adjacent data
2081 to coalesce and increase the segment size. If set to \fB0\fR, zfs will use
2082 strategy \fB1\fR during normal verification and strategy \fB2\fR while taking a
2083 checkpoint.
2084 .sp
2085 Default value: \fB0\fR.
2086 .RE
2087
2088 .sp
2089 .ne 2
2090 .na
2091 \fBzfs_scan_legacy\fR (int)
2092 .ad
2093 .RS 12n
2094 A value of 0 indicates that scrubs and resilvers will gather metadata in
2095 memory before issuing sequential I/O. A value of 1 indicates that the legacy
2096 algorithm will be used where I/O is initiated as soon as it is discovered.
2097 Changing this value to 0 will not affect scrubs or resilvers that are already
2098 in progress.
2099 .sp
2100 Default value: \fB0\fR.
2101 .RE
2102
2103 .sp
2104 .ne 2
2105 .na
2106 \fBzfs_scan_max_ext_gap\fR (int)
2107 .ad
2108 .RS 12n
2109 Indicates the largest gap in bytes between scrub / resilver I/Os that will still
2110 be considered sequential for sorting purposes. Changing this value will not
2111 affect scrubs or resilvers that are already in progress.
2112 .sp
2113 Default value: \fB2097152 (2 MB)\fR.
2114 .RE
2115
2116 .sp
2117 .ne 2
2118 .na
2119 \fBzfs_scan_mem_lim_fact\fR (int)
2120 .ad
2121 .RS 12n
2122 Maximum fraction of RAM used for I/O sorting by sequential scan algorithm.
2123 This tunable determines the hard limit for I/O sorting memory usage.
2124 When the hard limit is reached we stop scanning metadata and start issuing
2125 data verification I/O. This is done until we get below the soft limit.
2126 .sp
2127 Default value: \fB20\fR which is 5% of RAM (1/20).
2128 .RE
2129
2130 .sp
2131 .ne 2
2132 .na
2133 \fBzfs_scan_mem_lim_soft_fact\fR (int)
2134 .ad
2135 .RS 12n
2136 The fraction of the hard limit used to determined the soft limit for I/O sorting
2137 by the sequential scan algorithm. When we cross this limit from bellow no action
2138 is taken. When we cross this limit from above it is because we are issuing
2139 verification I/O. In this case (unless the metadata scan is done) we stop
2140 issuing verification I/O and start scanning metadata again until we get to the
2141 hard limit.
2142 .sp
2143 Default value: \fB20\fR which is 5% of the hard limit (1/20).
2144 .RE
2145
2146 .sp
2147 .ne 2
2148 .na
2149 \fBzfs_scan_vdev_limit\fR (int)
2150 .ad
2151 .RS 12n
2152 Maximum amount of data that can be concurrently issued at once for scrubs and
2153 resilvers per leaf device, given in bytes.
2154 .sp
2155 Default value: \fB41943040\fR.
2156 .RE
2157
2158 .sp
2159 .ne 2
2160 .na
2161 \fBzfs_send_corrupt_data\fR (int)
2162 .ad
2163 .RS 12n
2164 Allow sending of corrupt data (ignore read/checksum errors when sending data)
2165 .sp
2166 Use \fB1\fR for yes and \fB0\fR for no (default).
2167 .RE
2168
2169 .sp
2170 .ne 2
2171 .na
2172 \fBzfs_send_queue_length\fR (int)
2173 .ad
2174 .RS 12n
2175 The maximum number of bytes allowed in the \fBzfs send\fR queue. This value
2176 must be at least twice the maximum block size in use.
2177 .sp
2178 Default value: \fB16,777,216\fR.
2179 .RE
2180
2181 .sp
2182 .ne 2
2183 .na
2184 \fBzfs_recv_queue_length\fR (int)
2185 .ad
2186 .RS 12n
2187 .sp
2188 The maximum number of bytes allowed in the \fBzfs receive\fR queue. This value
2189 must be at least twice the maximum block size in use.
2190 .sp
2191 Default value: \fB16,777,216\fR.
2192 .RE
2193
2194 .sp
2195 .ne 2
2196 .na
2197 \fBzfs_sync_pass_deferred_free\fR (int)
2198 .ad
2199 .RS 12n
2200 Flushing of data to disk is done in passes. Defer frees starting in this pass
2201 .sp
2202 Default value: \fB2\fR.
2203 .RE
2204
2205 .sp
2206 .ne 2
2207 .na
2208 \fBzfs_spa_discard_memory_limit\fR (int)
2209 .ad
2210 .RS 12n
2211 Maximum memory used for prefetching a checkpoint's space map on each
2212 vdev while discarding the checkpoint.
2213 .sp
2214 Default value: \fB16,777,216\fR.
2215 .RE
2216
2217 .sp
2218 .ne 2
2219 .na
2220 \fBzfs_sync_pass_dont_compress\fR (int)
2221 .ad
2222 .RS 12n
2223 Don't compress starting in this pass
2224 .sp
2225 Default value: \fB5\fR.
2226 .RE
2227
2228 .sp
2229 .ne 2
2230 .na
2231 \fBzfs_sync_pass_rewrite\fR (int)
2232 .ad
2233 .RS 12n
2234 Rewrite new block pointers starting in this pass
2235 .sp
2236 Default value: \fB2\fR.
2237 .RE
2238
2239 .sp
2240 .ne 2
2241 .na
2242 \fBzfs_sync_taskq_batch_pct\fR (int)
2243 .ad
2244 .RS 12n
2245 This controls the number of threads used by the dp_sync_taskq. The default
2246 value of 75% will create a maximum of one thread per cpu.
2247 .sp
2248 Default value: \fB75\fR%.
2249 .RE
2250
2251 .sp
2252 .ne 2
2253 .na
2254 \fBzfs_txg_history\fR (int)
2255 .ad
2256 .RS 12n
2257 Historical statistics for the last N txgs will be available in
2258 \fB/proc/spl/kstat/zfs/<pool>/txgs\fR
2259 .sp
2260 Default value: \fB0\fR.
2261 .RE
2262
2263 .sp
2264 .ne 2
2265 .na
2266 \fBzfs_txg_timeout\fR (int)
2267 .ad
2268 .RS 12n
2269 Flush dirty data to disk at least every N seconds (maximum txg duration)
2270 .sp
2271 Default value: \fB5\fR.
2272 .RE
2273
2274 .sp
2275 .ne 2
2276 .na
2277 \fBzfs_vdev_aggregation_limit\fR (int)
2278 .ad
2279 .RS 12n
2280 Max vdev I/O aggregation size
2281 .sp
2282 Default value: \fB131,072\fR.
2283 .RE
2284
2285 .sp
2286 .ne 2
2287 .na
2288 \fBzfs_vdev_cache_bshift\fR (int)
2289 .ad
2290 .RS 12n
2291 Shift size to inflate reads too
2292 .sp
2293 Default value: \fB16\fR (effectively 65536).
2294 .RE
2295
2296 .sp
2297 .ne 2
2298 .na
2299 \fBzfs_vdev_cache_max\fR (int)
2300 .ad
2301 .RS 12n
2302 Inflate reads smaller than this value to meet the \fBzfs_vdev_cache_bshift\fR
2303 size (default 64k).
2304 .sp
2305 Default value: \fB16384\fR.
2306 .RE
2307
2308 .sp
2309 .ne 2
2310 .na
2311 \fBzfs_vdev_cache_size\fR (int)
2312 .ad
2313 .RS 12n
2314 Total size of the per-disk cache in bytes.
2315 .sp
2316 Currently this feature is disabled as it has been found to not be helpful
2317 for performance and in some cases harmful.
2318 .sp
2319 Default value: \fB0\fR.
2320 .RE
2321
2322 .sp
2323 .ne 2
2324 .na
2325 \fBzfs_vdev_mirror_rotating_inc\fR (int)
2326 .ad
2327 .RS 12n
2328 A number by which the balancing algorithm increments the load calculation for
2329 the purpose of selecting the least busy mirror member when an I/O immediately
2330 follows its predecessor on rotational vdevs for the purpose of making decisions
2331 based on load.
2332 .sp
2333 Default value: \fB0\fR.
2334 .RE
2335
2336 .sp
2337 .ne 2
2338 .na
2339 \fBzfs_vdev_mirror_rotating_seek_inc\fR (int)
2340 .ad
2341 .RS 12n
2342 A number by which the balancing algorithm increments the load calculation for
2343 the purpose of selecting the least busy mirror member when an I/O lacks
2344 locality as defined by the zfs_vdev_mirror_rotating_seek_offset. I/Os within
2345 this that are not immediately following the previous I/O are incremented by
2346 half.
2347 .sp
2348 Default value: \fB5\fR.
2349 .RE
2350
2351 .sp
2352 .ne 2
2353 .na
2354 \fBzfs_vdev_mirror_rotating_seek_offset\fR (int)
2355 .ad
2356 .RS 12n
2357 The maximum distance for the last queued I/O in which the balancing algorithm
2358 considers an I/O to have locality.
2359 See the section "ZFS I/O SCHEDULER".
2360 .sp
2361 Default value: \fB1048576\fR.
2362 .RE
2363
2364 .sp
2365 .ne 2
2366 .na
2367 \fBzfs_vdev_mirror_non_rotating_inc\fR (int)
2368 .ad
2369 .RS 12n
2370 A number by which the balancing algorithm increments the load calculation for
2371 the purpose of selecting the least busy mirror member on non-rotational vdevs
2372 when I/Os do not immediately follow one another.
2373 .sp
2374 Default value: \fB0\fR.
2375 .RE
2376
2377 .sp
2378 .ne 2
2379 .na
2380 \fBzfs_vdev_mirror_non_rotating_seek_inc\fR (int)
2381 .ad
2382 .RS 12n
2383 A number by which the balancing algorithm increments the load calculation for
2384 the purpose of selecting the least busy mirror member when an I/O lacks
2385 locality as defined by the zfs_vdev_mirror_rotating_seek_offset. I/Os within
2386 this that are not immediately following the previous I/O are incremented by
2387 half.
2388 .sp
2389 Default value: \fB1\fR.
2390 .RE
2391
2392 .sp
2393 .ne 2
2394 .na
2395 \fBzfs_vdev_read_gap_limit\fR (int)
2396 .ad
2397 .RS 12n
2398 Aggregate read I/O operations if the gap on-disk between them is within this
2399 threshold.
2400 .sp
2401 Default value: \fB32,768\fR.
2402 .RE
2403
2404 .sp
2405 .ne 2
2406 .na
2407 \fBzfs_vdev_scheduler\fR (charp)
2408 .ad
2409 .RS 12n
2410 Set the Linux I/O scheduler on whole disk vdevs to this scheduler. Valid options
2411 are noop, cfq, bfq & deadline
2412 .sp
2413 Default value: \fBnoop\fR.
2414 .RE
2415
2416 .sp
2417 .ne 2
2418 .na
2419 \fBzfs_vdev_write_gap_limit\fR (int)
2420 .ad
2421 .RS 12n
2422 Aggregate write I/O over gap
2423 .sp
2424 Default value: \fB4,096\fR.
2425 .RE
2426
2427 .sp
2428 .ne 2
2429 .na
2430 \fBzfs_vdev_raidz_impl\fR (string)
2431 .ad
2432 .RS 12n
2433 Parameter for selecting raidz parity implementation to use.
2434
2435 Options marked (always) below may be selected on module load as they are
2436 supported on all systems.
2437 The remaining options may only be set after the module is loaded, as they
2438 are available only if the implementations are compiled in and supported
2439 on the running system.
2440
2441 Once the module is loaded, the content of
2442 /sys/module/zfs/parameters/zfs_vdev_raidz_impl will show available options
2443 with the currently selected one enclosed in [].
2444 Possible options are:
2445 fastest - (always) implementation selected using built-in benchmark
2446 original - (always) original raidz implementation
2447 scalar - (always) scalar raidz implementation
2448 sse2 - implementation using SSE2 instruction set (64bit x86 only)
2449 ssse3 - implementation using SSSE3 instruction set (64bit x86 only)
2450 avx2 - implementation using AVX2 instruction set (64bit x86 only)
2451 avx512f - implementation using AVX512F instruction set (64bit x86 only)
2452 avx512bw - implementation using AVX512F & AVX512BW instruction sets (64bit x86 only)
2453 aarch64_neon - implementation using NEON (Aarch64/64 bit ARMv8 only)
2454 aarch64_neonx2 - implementation using NEON with more unrolling (Aarch64/64 bit ARMv8 only)
2455 .sp
2456 Default value: \fBfastest\fR.
2457 .RE
2458
2459 .sp
2460 .ne 2
2461 .na
2462 \fBzfs_zevent_cols\fR (int)
2463 .ad
2464 .RS 12n
2465 When zevents are logged to the console use this as the word wrap width.
2466 .sp
2467 Default value: \fB80\fR.
2468 .RE
2469
2470 .sp
2471 .ne 2
2472 .na
2473 \fBzfs_zevent_console\fR (int)
2474 .ad
2475 .RS 12n
2476 Log events to the console
2477 .sp
2478 Use \fB1\fR for yes and \fB0\fR for no (default).
2479 .RE
2480
2481 .sp
2482 .ne 2
2483 .na
2484 \fBzfs_zevent_len_max\fR (int)
2485 .ad
2486 .RS 12n
2487 Max event queue length. A value of 0 will result in a calculated value which
2488 increases with the number of CPUs in the system (minimum 64 events). Events
2489 in the queue can be viewed with the \fBzpool events\fR command.
2490 .sp
2491 Default value: \fB0\fR.
2492 .RE
2493
2494 .sp
2495 .ne 2
2496 .na
2497 \fBzfs_zil_clean_taskq_maxalloc\fR (int)
2498 .ad
2499 .RS 12n
2500 The maximum number of taskq entries that are allowed to be cached. When this
2501 limit is exceeded transaction records (itxs) will be cleaned synchronously.
2502 .sp
2503 Default value: \fB1048576\fR.
2504 .RE
2505
2506 .sp
2507 .ne 2
2508 .na
2509 \fBzfs_zil_clean_taskq_minalloc\fR (int)
2510 .ad
2511 .RS 12n
2512 The number of taskq entries that are pre-populated when the taskq is first
2513 created and are immediately available for use.
2514 .sp
2515 Default value: \fB1024\fR.
2516 .RE
2517
2518 .sp
2519 .ne 2
2520 .na
2521 \fBzfs_zil_clean_taskq_nthr_pct\fR (int)
2522 .ad
2523 .RS 12n
2524 This controls the number of threads used by the dp_zil_clean_taskq. The default
2525 value of 100% will create a maximum of one thread per cpu.
2526 .sp
2527 Default value: \fB100\fR%.
2528 .RE
2529
2530 .sp
2531 .ne 2
2532 .na
2533 \fBzil_replay_disable\fR (int)
2534 .ad
2535 .RS 12n
2536 Disable intent logging replay. Can be disabled for recovery from corrupted
2537 ZIL
2538 .sp
2539 Use \fB1\fR for yes and \fB0\fR for no (default).
2540 .RE
2541
2542 .sp
2543 .ne 2
2544 .na
2545 \fBzil_slog_bulk\fR (ulong)
2546 .ad
2547 .RS 12n
2548 Limit SLOG write size per commit executed with synchronous priority.
2549 Any writes above that will be executed with lower (asynchronous) priority
2550 to limit potential SLOG device abuse by single active ZIL writer.
2551 .sp
2552 Default value: \fB786,432\fR.
2553 .RE
2554
2555 .sp
2556 .ne 2
2557 .na
2558 \fBzio_decompress_fail_fraction\fR (int)
2559 .ad
2560 .RS 12n
2561 If non-zero, this value represents the denominator of the probability that zfs
2562 should induce a decompression failure. For instance, for a 5% decompression
2563 failure rate, this value should be set to 20.
2564 .sp
2565 Default value: \fB0\fR.
2566 .RE
2567
2568 .sp
2569 .ne 2
2570 .na
2571 \fBzio_slow_io_ms\fR (int)
2572 .ad
2573 .RS 12n
2574 When an I/O operation takes more than \fBzio_slow_io_ms\fR milliseconds to
2575 complete is marked as a slow I/O. Each slow I/O causes a delay zevent. Slow
2576 I/O counters can be seen with "zpool status -s".
2577
2578 .sp
2579 Default value: \fB30,000\fR.
2580 .RE
2581
2582 .sp
2583 .ne 2
2584 .na
2585 \fBzio_dva_throttle_enabled\fR (int)
2586 .ad
2587 .RS 12n
2588 Throttle block allocations in the I/O pipeline. This allows for
2589 dynamic allocation distribution when devices are imbalanced.
2590 When enabled, the maximum number of pending allocations per top-level vdev
2591 is limited by \fBzfs_vdev_queue_depth_pct\fR.
2592 .sp
2593 Default value: \fB1\fR.
2594 .RE
2595
2596 .sp
2597 .ne 2
2598 .na
2599 \fBzio_requeue_io_start_cut_in_line\fR (int)
2600 .ad
2601 .RS 12n
2602 Prioritize requeued I/O
2603 .sp
2604 Default value: \fB0\fR.
2605 .RE
2606
2607 .sp
2608 .ne 2
2609 .na
2610 \fBzio_taskq_batch_pct\fR (uint)
2611 .ad
2612 .RS 12n
2613 Percentage of online CPUs (or CPU cores, etc) which will run a worker thread
2614 for I/O. These workers are responsible for I/O work such as compression and
2615 checksum calculations. Fractional number of CPUs will be rounded down.
2616 .sp
2617 The default value of 75 was chosen to avoid using all CPUs which can result in
2618 latency issues and inconsistent application performance, especially when high
2619 compression is enabled.
2620 .sp
2621 Default value: \fB75\fR.
2622 .RE
2623
2624 .sp
2625 .ne 2
2626 .na
2627 \fBzvol_inhibit_dev\fR (uint)
2628 .ad
2629 .RS 12n
2630 Do not create zvol device nodes. This may slightly improve startup time on
2631 systems with a very large number of zvols.
2632 .sp
2633 Use \fB1\fR for yes and \fB0\fR for no (default).
2634 .RE
2635
2636 .sp
2637 .ne 2
2638 .na
2639 \fBzvol_major\fR (uint)
2640 .ad
2641 .RS 12n
2642 Major number for zvol block devices
2643 .sp
2644 Default value: \fB230\fR.
2645 .RE
2646
2647 .sp
2648 .ne 2
2649 .na
2650 \fBzvol_max_discard_blocks\fR (ulong)
2651 .ad
2652 .RS 12n
2653 Discard (aka TRIM) operations done on zvols will be done in batches of this
2654 many blocks, where block size is determined by the \fBvolblocksize\fR property
2655 of a zvol.
2656 .sp
2657 Default value: \fB16,384\fR.
2658 .RE
2659
2660 .sp
2661 .ne 2
2662 .na
2663 \fBzvol_prefetch_bytes\fR (uint)
2664 .ad
2665 .RS 12n
2666 When adding a zvol to the system prefetch \fBzvol_prefetch_bytes\fR
2667 from the start and end of the volume. Prefetching these regions
2668 of the volume is desirable because they are likely to be accessed
2669 immediately by \fBblkid(8)\fR or by the kernel scanning for a partition
2670 table.
2671 .sp
2672 Default value: \fB131,072\fR.
2673 .RE
2674
2675 .sp
2676 .ne 2
2677 .na
2678 \fBzvol_request_sync\fR (uint)
2679 .ad
2680 .RS 12n
2681 When processing I/O requests for a zvol submit them synchronously. This
2682 effectively limits the queue depth to 1 for each I/O submitter. When set
2683 to 0 requests are handled asynchronously by a thread pool. The number of
2684 requests which can be handled concurrently is controller by \fBzvol_threads\fR.
2685 .sp
2686 Default value: \fB0\fR.
2687 .RE
2688
2689 .sp
2690 .ne 2
2691 .na
2692 \fBzvol_threads\fR (uint)
2693 .ad
2694 .RS 12n
2695 Max number of threads which can handle zvol I/O requests concurrently.
2696 .sp
2697 Default value: \fB32\fR.
2698 .RE
2699
2700 .sp
2701 .ne 2
2702 .na
2703 \fBzvol_volmode\fR (uint)
2704 .ad
2705 .RS 12n
2706 Defines zvol block devices behaviour when \fBvolmode\fR is set to \fBdefault\fR.
2707 Valid values are \fB1\fR (full), \fB2\fR (dev) and \fB3\fR (none).
2708 .sp
2709 Default value: \fB1\fR.
2710 .RE
2711
2712 .sp
2713 .ne 2
2714 .na
2715 \fBzfs_qat_disable\fR (int)
2716 .ad
2717 .RS 12n
2718 This tunable disables qat hardware acceleration for gzip compression and.
2719 AES-GCM encryption. It is available only if qat acceleration is compiled in
2720 and the qat driver is present.
2721 .sp
2722 Use \fB1\fR for yes and \fB0\fR for no (default).
2723 .RE
2724
2725 .SH ZFS I/O SCHEDULER
2726 ZFS issues I/O operations to leaf vdevs to satisfy and complete I/Os.
2727 The I/O scheduler determines when and in what order those operations are
2728 issued. The I/O scheduler divides operations into five I/O classes
2729 prioritized in the following order: sync read, sync write, async read,
2730 async write, and scrub/resilver. Each queue defines the minimum and
2731 maximum number of concurrent operations that may be issued to the
2732 device. In addition, the device has an aggregate maximum,
2733 \fBzfs_vdev_max_active\fR. Note that the sum of the per-queue minimums
2734 must not exceed the aggregate maximum. If the sum of the per-queue
2735 maximums exceeds the aggregate maximum, then the number of active I/Os
2736 may reach \fBzfs_vdev_max_active\fR, in which case no further I/Os will
2737 be issued regardless of whether all per-queue minimums have been met.
2738 .sp
2739 For many physical devices, throughput increases with the number of
2740 concurrent operations, but latency typically suffers. Further, physical
2741 devices typically have a limit at which more concurrent operations have no
2742 effect on throughput or can actually cause it to decrease.
2743 .sp
2744 The scheduler selects the next operation to issue by first looking for an
2745 I/O class whose minimum has not been satisfied. Once all are satisfied and
2746 the aggregate maximum has not been hit, the scheduler looks for classes
2747 whose maximum has not been satisfied. Iteration through the I/O classes is
2748 done in the order specified above. No further operations are issued if the
2749 aggregate maximum number of concurrent operations has been hit or if there
2750 are no operations queued for an I/O class that has not hit its maximum.
2751 Every time an I/O is queued or an operation completes, the I/O scheduler
2752 looks for new operations to issue.
2753 .sp
2754 In general, smaller max_active's will lead to lower latency of synchronous
2755 operations. Larger max_active's may lead to higher overall throughput,
2756 depending on underlying storage.
2757 .sp
2758 The ratio of the queues' max_actives determines the balance of performance
2759 between reads, writes, and scrubs. E.g., increasing
2760 \fBzfs_vdev_scrub_max_active\fR will cause the scrub or resilver to complete
2761 more quickly, but reads and writes to have higher latency and lower throughput.
2762 .sp
2763 All I/O classes have a fixed maximum number of outstanding operations
2764 except for the async write class. Asynchronous writes represent the data
2765 that is committed to stable storage during the syncing stage for
2766 transaction groups. Transaction groups enter the syncing state
2767 periodically so the number of queued async writes will quickly burst up
2768 and then bleed down to zero. Rather than servicing them as quickly as
2769 possible, the I/O scheduler changes the maximum number of active async
2770 write I/Os according to the amount of dirty data in the pool. Since
2771 both throughput and latency typically increase with the number of
2772 concurrent operations issued to physical devices, reducing the
2773 burstiness in the number of concurrent operations also stabilizes the
2774 response time of operations from other -- and in particular synchronous
2775 -- queues. In broad strokes, the I/O scheduler will issue more
2776 concurrent operations from the async write queue as there's more dirty
2777 data in the pool.
2778 .sp
2779 Async Writes
2780 .sp
2781 The number of concurrent operations issued for the async write I/O class
2782 follows a piece-wise linear function defined by a few adjustable points.
2783 .nf
2784
2785 | o---------| <-- zfs_vdev_async_write_max_active
2786 ^ | /^ |
2787 | | / | |
2788 active | / | |
2789 I/O | / | |
2790 count | / | |
2791 | / | |
2792 |-------o | | <-- zfs_vdev_async_write_min_active
2793 0|_______^______|_________|
2794 0% | | 100% of zfs_dirty_data_max
2795 | |
2796 | `-- zfs_vdev_async_write_active_max_dirty_percent
2797 `--------- zfs_vdev_async_write_active_min_dirty_percent
2798
2799 .fi
2800 Until the amount of dirty data exceeds a minimum percentage of the dirty
2801 data allowed in the pool, the I/O scheduler will limit the number of
2802 concurrent operations to the minimum. As that threshold is crossed, the
2803 number of concurrent operations issued increases linearly to the maximum at
2804 the specified maximum percentage of the dirty data allowed in the pool.
2805 .sp
2806 Ideally, the amount of dirty data on a busy pool will stay in the sloped
2807 part of the function between \fBzfs_vdev_async_write_active_min_dirty_percent\fR
2808 and \fBzfs_vdev_async_write_active_max_dirty_percent\fR. If it exceeds the
2809 maximum percentage, this indicates that the rate of incoming data is
2810 greater than the rate that the backend storage can handle. In this case, we
2811 must further throttle incoming writes, as described in the next section.
2812
2813 .SH ZFS TRANSACTION DELAY
2814 We delay transactions when we've determined that the backend storage
2815 isn't able to accommodate the rate of incoming writes.
2816 .sp
2817 If there is already a transaction waiting, we delay relative to when
2818 that transaction will finish waiting. This way the calculated delay time
2819 is independent of the number of threads concurrently executing
2820 transactions.
2821 .sp
2822 If we are the only waiter, wait relative to when the transaction
2823 started, rather than the current time. This credits the transaction for
2824 "time already served", e.g. reading indirect blocks.
2825 .sp
2826 The minimum time for a transaction to take is calculated as:
2827 .nf
2828 min_time = zfs_delay_scale * (dirty - min) / (max - dirty)
2829 min_time is then capped at 100 milliseconds.
2830 .fi
2831 .sp
2832 The delay has two degrees of freedom that can be adjusted via tunables. The
2833 percentage of dirty data at which we start to delay is defined by
2834 \fBzfs_delay_min_dirty_percent\fR. This should typically be at or above
2835 \fBzfs_vdev_async_write_active_max_dirty_percent\fR so that we only start to
2836 delay after writing at full speed has failed to keep up with the incoming write
2837 rate. The scale of the curve is defined by \fBzfs_delay_scale\fR. Roughly speaking,
2838 this variable determines the amount of delay at the midpoint of the curve.
2839 .sp
2840 .nf
2841 delay
2842 10ms +-------------------------------------------------------------*+
2843 | *|
2844 9ms + *+
2845 | *|
2846 8ms + *+
2847 | * |
2848 7ms + * +
2849 | * |
2850 6ms + * +
2851 | * |
2852 5ms + * +
2853 | * |
2854 4ms + * +
2855 | * |
2856 3ms + * +
2857 | * |
2858 2ms + (midpoint) * +
2859 | | ** |
2860 1ms + v *** +
2861 | zfs_delay_scale ----------> ******** |
2862 0 +-------------------------------------*********----------------+
2863 0% <- zfs_dirty_data_max -> 100%
2864 .fi
2865 .sp
2866 Note that since the delay is added to the outstanding time remaining on the
2867 most recent transaction, the delay is effectively the inverse of IOPS.
2868 Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
2869 was chosen such that small changes in the amount of accumulated dirty data
2870 in the first 3/4 of the curve yield relatively small differences in the
2871 amount of delay.
2872 .sp
2873 The effects can be easier to understand when the amount of delay is
2874 represented on a log scale:
2875 .sp
2876 .nf
2877 delay
2878 100ms +-------------------------------------------------------------++
2879 + +
2880 | |
2881 + *+
2882 10ms + *+
2883 + ** +
2884 | (midpoint) ** |
2885 + | ** +
2886 1ms + v **** +
2887 + zfs_delay_scale ----------> ***** +
2888 | **** |
2889 + **** +
2890 100us + ** +
2891 + * +
2892 | * |
2893 + * +
2894 10us + * +
2895 + +
2896 | |
2897 + +
2898 +--------------------------------------------------------------+
2899 0% <- zfs_dirty_data_max -> 100%
2900 .fi
2901 .sp
2902 Note here that only as the amount of dirty data approaches its limit does
2903 the delay start to increase rapidly. The goal of a properly tuned system
2904 should be to keep the amount of dirty data out of that range by first
2905 ensuring that the appropriate limits are set for the I/O scheduler to reach
2906 optimal throughput on the backend storage, and then by changing the value
2907 of \fBzfs_delay_scale\fR to increase the steepness of the curve.