]> git.proxmox.com Git - mirror_zfs.git/blob - man/man5/zfs-module-parameters.5
6cba7f02c2b9eec3acbaed4bbf78d811628df6ec
[mirror_zfs.git] / man / man5 / zfs-module-parameters.5
1 '\" te
2 .\" Copyright (c) 2013 by Turbo Fredriksson <turbo@bayour.com>. All rights reserved.
3 .\" The contents of this file are subject to the terms of the Common Development
4 .\" and Distribution License (the "License"). You may not use this file except
5 .\" in compliance with the License. You can obtain a copy of the license at
6 .\" usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing.
7 .\"
8 .\" See the License for the specific language governing permissions and
9 .\" limitations under the License. When distributing Covered Code, include this
10 .\" CDDL HEADER in each file and include the License file at
11 .\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this
12 .\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
13 .\" own identifying information:
14 .\" Portions Copyright [yyyy] [name of copyright owner]
15 .TH ZFS-MODULE-PARAMETERS 5 "Nov 16, 2013"
16 .SH NAME
17 zfs\-module\-parameters \- ZFS module parameters
18 .SH DESCRIPTION
19 .sp
20 .LP
21 Description of the different parameters to the ZFS module.
22
23 .SS "Module parameters"
24 .sp
25 .LP
26
27 .sp
28 .ne 2
29 .na
30 \fBl2arc_feed_again\fR (int)
31 .ad
32 .RS 12n
33 Turbo L2ARC warmup
34 .sp
35 Use \fB1\fR for yes (default) and \fB0\fR to disable.
36 .RE
37
38 .sp
39 .ne 2
40 .na
41 \fBl2arc_feed_min_ms\fR (ulong)
42 .ad
43 .RS 12n
44 Min feed interval in milliseconds
45 .sp
46 Default value: \fB200\fR.
47 .RE
48
49 .sp
50 .ne 2
51 .na
52 \fBl2arc_feed_secs\fR (ulong)
53 .ad
54 .RS 12n
55 Seconds between L2ARC writing
56 .sp
57 Default value: \fB1\fR.
58 .RE
59
60 .sp
61 .ne 2
62 .na
63 \fBl2arc_headroom\fR (ulong)
64 .ad
65 .RS 12n
66 Number of max device writes to precache
67 .sp
68 Default value: \fB2\fR.
69 .RE
70
71 .sp
72 .ne 2
73 .na
74 \fBl2arc_headroom_boost\fR (ulong)
75 .ad
76 .RS 12n
77 Compressed l2arc_headroom multiplier
78 .sp
79 Default value: \fB200\fR.
80 .RE
81
82 .sp
83 .ne 2
84 .na
85 \fBl2arc_max_block_size\fR (ulong)
86 .ad
87 .RS 12n
88 The maximum block size which may be written to an L2ARC device, after
89 compression and other factors. This setting is used to prevent a small
90 number of large blocks from pushing a larger number of small blocks out
91 of the cache.
92 .sp
93 Default value: \fB16,777,216\fR.
94 .RE
95
96 .sp
97 .ne 2
98 .na
99 \fBl2arc_nocompress\fR (int)
100 .ad
101 .RS 12n
102 Skip compressing L2ARC buffers
103 .sp
104 Use \fB1\fR for yes and \fB0\fR for no (default).
105 .RE
106
107 .sp
108 .ne 2
109 .na
110 \fBl2arc_noprefetch\fR (int)
111 .ad
112 .RS 12n
113 Skip caching prefetched buffers
114 .sp
115 Use \fB1\fR for yes (default) and \fB0\fR to disable.
116 .RE
117
118 .sp
119 .ne 2
120 .na
121 \fBl2arc_norw\fR (int)
122 .ad
123 .RS 12n
124 No reads during writes
125 .sp
126 Use \fB1\fR for yes and \fB0\fR for no (default).
127 .RE
128
129 .sp
130 .ne 2
131 .na
132 \fBl2arc_write_boost\fR (ulong)
133 .ad
134 .RS 12n
135 Extra write bytes during device warmup
136 .sp
137 Default value: \fB8,388,608\fR.
138 .RE
139
140 .sp
141 .ne 2
142 .na
143 \fBl2arc_write_max\fR (ulong)
144 .ad
145 .RS 12n
146 Max write bytes per interval
147 .sp
148 Default value: \fB8,388,608\fR.
149 .RE
150
151 .sp
152 .ne 2
153 .na
154 \fBmetaslab_aliquot\fR (ulong)
155 .ad
156 .RS 12n
157 Metaslab granularity, in bytes. This is roughly similar to what would be
158 referred to as the "stripe size" in traditional RAID arrays. In normal
159 operation, ZFS will try to write this amount of data to a top-level vdev
160 before moving on to the next one.
161 .sp
162 Default value: \fB524,288\fR.
163 .RE
164
165 .sp
166 .ne 2
167 .na
168 \fBmetaslab_bias_enabled\fR (int)
169 .ad
170 .RS 12n
171 Enable metaslab group biasing based on its vdev's over- or under-utilization
172 relative to the pool.
173 .sp
174 Use \fB1\fR for yes (default) and \fB0\fR for no.
175 .RE
176
177 .sp
178 .ne 2
179 .na
180 \fBmetaslab_debug_load\fR (int)
181 .ad
182 .RS 12n
183 Load all metaslabs during pool import.
184 .sp
185 Use \fB1\fR for yes and \fB0\fR for no (default).
186 .RE
187
188 .sp
189 .ne 2
190 .na
191 \fBmetaslab_debug_unload\fR (int)
192 .ad
193 .RS 12n
194 Prevent metaslabs from being unloaded.
195 .sp
196 Use \fB1\fR for yes and \fB0\fR for no (default).
197 .RE
198
199 .sp
200 .ne 2
201 .na
202 \fBmetaslab_fragmentation_factor_enabled\fR (int)
203 .ad
204 .RS 12n
205 Enable use of the fragmentation metric in computing metaslab weights.
206 .sp
207 Use \fB1\fR for yes (default) and \fB0\fR for no.
208 .RE
209
210 .sp
211 .ne 2
212 .na
213 \fBmetaslabs_per_vdev\fR (int)
214 .ad
215 .RS 12n
216 When a vdev is added, it will be divided into approximately (but no more than) this number of metaslabs.
217 .sp
218 Default value: \fB200\fR.
219 .RE
220
221 .sp
222 .ne 2
223 .na
224 \fBmetaslab_preload_enabled\fR (int)
225 .ad
226 .RS 12n
227 Enable metaslab group preloading.
228 .sp
229 Use \fB1\fR for yes (default) and \fB0\fR for no.
230 .RE
231
232 .sp
233 .ne 2
234 .na
235 \fBmetaslab_lba_weighting_enabled\fR (int)
236 .ad
237 .RS 12n
238 Give more weight to metaslabs with lower LBAs, assuming they have
239 greater bandwidth as is typically the case on a modern constant
240 angular velocity disk drive.
241 .sp
242 Use \fB1\fR for yes (default) and \fB0\fR for no.
243 .RE
244
245 .sp
246 .ne 2
247 .na
248 \fBspa_config_path\fR (charp)
249 .ad
250 .RS 12n
251 SPA config file
252 .sp
253 Default value: \fB/etc/zfs/zpool.cache\fR.
254 .RE
255
256 .sp
257 .ne 2
258 .na
259 \fBspa_asize_inflation\fR (int)
260 .ad
261 .RS 12n
262 Multiplication factor used to estimate actual disk consumption from the
263 size of data being written. The default value is a worst case estimate,
264 but lower values may be valid for a given pool depending on its
265 configuration. Pool administrators who understand the factors involved
266 may wish to specify a more realistic inflation factor, particularly if
267 they operate close to quota or capacity limits.
268 .sp
269 Default value: 24
270 .RE
271
272 .sp
273 .ne 2
274 .na
275 \fBspa_load_verify_data\fR (int)
276 .ad
277 .RS 12n
278 Whether to traverse data blocks during an "extreme rewind" (\fB-X\fR)
279 import. Use 0 to disable and 1 to enable.
280
281 An extreme rewind import normally performs a full traversal of all
282 blocks in the pool for verification. If this parameter is set to 0,
283 the traversal skips non-metadata blocks. It can be toggled once the
284 import has started to stop or start the traversal of non-metadata blocks.
285 .sp
286 Default value: 1
287 .RE
288
289 .sp
290 .ne 2
291 .na
292 \fBspa_load_verify_metadata\fR (int)
293 .ad
294 .RS 12n
295 Whether to traverse blocks during an "extreme rewind" (\fB-X\fR)
296 pool import. Use 0 to disable and 1 to enable.
297
298 An extreme rewind import normally performs a full traversal of all
299 blocks in the pool for verification. If this parameter is set to 1,
300 the traversal is not performed. It can be toggled once the import has
301 started to stop or start the traversal.
302 .sp
303 Default value: 1
304 .RE
305
306 .sp
307 .ne 2
308 .na
309 \fBspa_load_verify_maxinflight\fR (int)
310 .ad
311 .RS 12n
312 Maximum concurrent I/Os during the traversal performed during an "extreme
313 rewind" (\fB-X\fR) pool import.
314 .sp
315 Default value: 10000
316 .RE
317
318 .sp
319 .ne 2
320 .na
321 \fBspa_slop_shift\fR (int)
322 .ad
323 .RS 12n
324 Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space
325 in the pool to be consumed. This ensures that we don't run the pool
326 completely out of space, due to unaccounted changes (e.g. to the MOS).
327 It also limits the worst-case time to allocate space. If we have
328 less than this amount of free space, most ZPL operations (e.g. write,
329 create) will return ENOSPC.
330 .sp
331 Default value: 5
332 .RE
333
334 .sp
335 .ne 2
336 .na
337 \fBzfetch_array_rd_sz\fR (ulong)
338 .ad
339 .RS 12n
340 If prefetching is enabled, disable prefetching for reads larger than this size.
341 .sp
342 Default value: \fB1,048,576\fR.
343 .RE
344
345 .sp
346 .ne 2
347 .na
348 \fBzfetch_max_distance\fR (uint)
349 .ad
350 .RS 12n
351 Max bytes to prefetch per stream (default 8MB).
352 .sp
353 Default value: \fB8,388,608\fR.
354 .RE
355
356 .sp
357 .ne 2
358 .na
359 \fBzfetch_max_streams\fR (uint)
360 .ad
361 .RS 12n
362 Max number of streams per zfetch (prefetch streams per file).
363 .sp
364 Default value: \fB8\fR.
365 .RE
366
367 .sp
368 .ne 2
369 .na
370 \fBzfetch_min_sec_reap\fR (uint)
371 .ad
372 .RS 12n
373 Min time before an active prefetch stream can be reclaimed
374 .sp
375 Default value: \fB2\fR.
376 .RE
377
378 .sp
379 .ne 2
380 .na
381 \fBzfs_arc_average_blocksize\fR (int)
382 .ad
383 .RS 12n
384 The ARC's buffer hash table is sized based on the assumption of an average
385 block size of \fBzfs_arc_average_blocksize\fR (default 8K). This works out
386 to roughly 1MB of hash table per 1GB of physical memory with 8-byte pointers.
387 For configurations with a known larger average block size this value can be
388 increased to reduce the memory footprint.
389
390 .sp
391 Default value: \fB8192\fR.
392 .RE
393
394 .sp
395 .ne 2
396 .na
397 \fBzfs_arc_evict_batch_limit\fR (int)
398 .ad
399 .RS 12n
400 Number ARC headers to evict per sub-list before proceeding to another sub-list.
401 This batch-style operation prevents entire sub-lists from being evicted at once
402 but comes at a cost of additional unlocking and locking.
403 .sp
404 Default value: \fB10\fR.
405 .RE
406
407 .sp
408 .ne 2
409 .na
410 \fBzfs_arc_grow_retry\fR (int)
411 .ad
412 .RS 12n
413 Seconds before growing arc size
414 .sp
415 Default value: \fB5\fR.
416 .RE
417
418 .sp
419 .ne 2
420 .na
421 \fBzfs_arc_lotsfree_percent\fR (int)
422 .ad
423 .RS 12n
424 Throttle I/O when free system memory drops below this percentage of total
425 system memory. Setting this value to 0 will disable the throttle.
426 .sp
427 Default value: \fB10\fR.
428 .RE
429
430 .sp
431 .ne 2
432 .na
433 \fBzfs_arc_max\fR (ulong)
434 .ad
435 .RS 12n
436 Max arc size
437 .sp
438 Default value: \fB0\fR.
439 .RE
440
441 .sp
442 .ne 2
443 .na
444 \fBzfs_arc_meta_limit\fR (ulong)
445 .ad
446 .RS 12n
447 The maximum allowed size in bytes that meta data buffers are allowed to
448 consume in the ARC. When this limit is reached meta data buffers will
449 be reclaimed even if the overall arc_c_max has not been reached. This
450 value defaults to 0 which indicates that 3/4 of the ARC may be used
451 for meta data.
452 .sp
453 Default value: \fB0\fR.
454 .RE
455
456 .sp
457 .ne 2
458 .na
459 \fBzfs_arc_meta_min\fR (ulong)
460 .ad
461 .RS 12n
462 The minimum allowed size in bytes that meta data buffers may consume in
463 the ARC. This value defaults to 0 which disables a floor on the amount
464 of the ARC devoted meta data.
465 .sp
466 Default value: \fB0\fR.
467 .RE
468
469 .sp
470 .ne 2
471 .na
472 \fBzfs_arc_meta_prune\fR (int)
473 .ad
474 .RS 12n
475 The number of dentries and inodes to be scanned looking for entries
476 which can be dropped. This may be required when the ARC reaches the
477 \fBzfs_arc_meta_limit\fR because dentries and inodes can pin buffers
478 in the ARC. Increasing this value will cause to dentry and inode caches
479 to be pruned more aggressively. Setting this value to 0 will disable
480 pruning the inode and dentry caches.
481 .sp
482 Default value: \fB10,000\fR.
483 .RE
484
485 .sp
486 .ne 2
487 .na
488 \fBzfs_arc_meta_adjust_restarts\fR (ulong)
489 .ad
490 .RS 12n
491 The number of restart passes to make while scanning the ARC attempting
492 the free buffers in order to stay below the \fBzfs_arc_meta_limit\fR.
493 This value should not need to be tuned but is available to facilitate
494 performance analysis.
495 .sp
496 Default value: \fB4096\fR.
497 .RE
498
499 .sp
500 .ne 2
501 .na
502 \fBzfs_arc_min\fR (ulong)
503 .ad
504 .RS 12n
505 Min arc size
506 .sp
507 Default value: \fB100\fR.
508 .RE
509
510 .sp
511 .ne 2
512 .na
513 \fBzfs_arc_min_prefetch_lifespan\fR (int)
514 .ad
515 .RS 12n
516 Min life of prefetch block
517 .sp
518 Default value: \fB100\fR.
519 .RE
520
521 .sp
522 .ne 2
523 .na
524 \fBzfs_arc_num_sublists_per_state\fR (int)
525 .ad
526 .RS 12n
527 To allow more fine-grained locking, each ARC state contains a series
528 of lists for both data and meta data objects. Locking is performed at
529 the level of these "sub-lists". This parameters controls the number of
530 sub-lists per ARC state.
531 .sp
532 Default value: 1 or the number of on-online CPUs, whichever is greater
533 .RE
534
535 .sp
536 .ne 2
537 .na
538 \fBzfs_arc_overflow_shift\fR (int)
539 .ad
540 .RS 12n
541 The ARC size is considered to be overflowing if it exceeds the current
542 ARC target size (arc_c) by a threshold determined by this parameter.
543 The threshold is calculated as a fraction of arc_c using the formula
544 "arc_c >> \fBzfs_arc_overflow_shift\fR".
545
546 The default value of 8 causes the ARC to be considered to be overflowing
547 if it exceeds the target size by 1/256th (0.3%) of the target size.
548
549 When the ARC is overflowing, new buffer allocations are stalled until
550 the reclaim thread catches up and the overflow condition no longer exists.
551 .sp
552 Default value: \fB8\fR.
553 .RE
554
555 .sp
556 .ne 2
557 .na
558
559 \fBzfs_arc_p_min_shift\fR (int)
560 .ad
561 .RS 12n
562 arc_c shift to calc min/max arc_p
563 .sp
564 Default value: \fB4\fR.
565 .RE
566
567 .sp
568 .ne 2
569 .na
570 \fBzfs_arc_p_aggressive_disable\fR (int)
571 .ad
572 .RS 12n
573 Disable aggressive arc_p growth
574 .sp
575 Use \fB1\fR for yes (default) and \fB0\fR to disable.
576 .RE
577
578 .sp
579 .ne 2
580 .na
581 \fBzfs_arc_p_dampener_disable\fR (int)
582 .ad
583 .RS 12n
584 Disable arc_p adapt dampener
585 .sp
586 Use \fB1\fR for yes (default) and \fB0\fR to disable.
587 .RE
588
589 .sp
590 .ne 2
591 .na
592 \fBzfs_arc_shrink_shift\fR (int)
593 .ad
594 .RS 12n
595 log2(fraction of arc to reclaim)
596 .sp
597 Default value: \fB5\fR.
598 .RE
599
600 .sp
601 .ne 2
602 .na
603 \fBzfs_arc_sys_free\fR (ulong)
604 .ad
605 .RS 12n
606 The target number of bytes the ARC should leave as free memory on the system.
607 Defaults to the larger of 1/64 of physical memory or 512K. Setting this
608 option to a non-zero value will override the default.
609 .sp
610 Default value: \fB0\fR.
611 .RE
612
613 .sp
614 .ne 2
615 .na
616 \fBzfs_autoimport_disable\fR (int)
617 .ad
618 .RS 12n
619 Disable pool import at module load by ignoring the cache file (typically \fB/etc/zfs/zpool.cache\fR).
620 .sp
621 Use \fB1\fR for yes (default) and \fB0\fR for no.
622 .RE
623
624 .sp
625 .ne 2
626 .na
627 \fBzfs_dbgmsg_enable\fR (int)
628 .ad
629 .RS 12n
630 Internally ZFS keeps a small log to facilitate debugging. By default the log
631 is disabled, to enable it set this option to 1. The contents of the log can
632 be accessed by reading the /proc/spl/kstat/zfs/dbgmsg file. Writing 0 to
633 this proc file clears the log.
634 .sp
635 Default value: \fB0\fR.
636 .RE
637
638 .sp
639 .ne 2
640 .na
641 \fBzfs_dbgmsg_maxsize\fR (int)
642 .ad
643 .RS 12n
644 The maximum size in bytes of the internal ZFS debug log.
645 .sp
646 Default value: \fB4M\fR.
647 .RE
648
649 .sp
650 .ne 2
651 .na
652 \fBzfs_dbuf_state_index\fR (int)
653 .ad
654 .RS 12n
655 Calculate arc header index
656 .sp
657 Default value: \fB0\fR.
658 .RE
659
660 .sp
661 .ne 2
662 .na
663 \fBzfs_deadman_enabled\fR (int)
664 .ad
665 .RS 12n
666 Enable deadman timer
667 .sp
668 Use \fB1\fR for yes (default) and \fB0\fR to disable.
669 .RE
670
671 .sp
672 .ne 2
673 .na
674 \fBzfs_deadman_synctime_ms\fR (ulong)
675 .ad
676 .RS 12n
677 Expiration time in milliseconds. This value has two meanings. First it is
678 used to determine when the spa_deadman() logic should fire. By default the
679 spa_deadman() will fire if spa_sync() has not completed in 1000 seconds.
680 Secondly, the value determines if an I/O is considered "hung". Any I/O that
681 has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
682 in a zevent being logged.
683 .sp
684 Default value: \fB1,000,000\fR.
685 .RE
686
687 .sp
688 .ne 2
689 .na
690 \fBzfs_dedup_prefetch\fR (int)
691 .ad
692 .RS 12n
693 Enable prefetching dedup-ed blks
694 .sp
695 Use \fB1\fR for yes and \fB0\fR to disable (default).
696 .RE
697
698 .sp
699 .ne 2
700 .na
701 \fBzfs_delay_min_dirty_percent\fR (int)
702 .ad
703 .RS 12n
704 Start to delay each transaction once there is this amount of dirty data,
705 expressed as a percentage of \fBzfs_dirty_data_max\fR.
706 This value should be >= zfs_vdev_async_write_active_max_dirty_percent.
707 See the section "ZFS TRANSACTION DELAY".
708 .sp
709 Default value: \fB60\fR.
710 .RE
711
712 .sp
713 .ne 2
714 .na
715 \fBzfs_delay_scale\fR (int)
716 .ad
717 .RS 12n
718 This controls how quickly the transaction delay approaches infinity.
719 Larger values cause longer delays for a given amount of dirty data.
720 .sp
721 For the smoothest delay, this value should be about 1 billion divided
722 by the maximum number of operations per second. This will smoothly
723 handle between 10x and 1/10th this number.
724 .sp
725 See the section "ZFS TRANSACTION DELAY".
726 .sp
727 Note: \fBzfs_delay_scale\fR * \fBzfs_dirty_data_max\fR must be < 2^64.
728 .sp
729 Default value: \fB500,000\fR.
730 .RE
731
732 .sp
733 .ne 2
734 .na
735 \fBzfs_delete_blocks\fR (ulong)
736 .ad
737 .RS 12n
738 This is the used to define a large file for the purposes of delete. Files
739 containing more than \fBzfs_delete_blocks\fR will be deleted asynchronously
740 while smaller files are deleted synchronously. Decreasing this value will
741 reduce the time spent in an unlink(2) system call at the expense of a longer
742 delay before the freed space is available.
743 .sp
744 Default value: \fB20,480\fR.
745 .RE
746
747 .sp
748 .ne 2
749 .na
750 \fBzfs_dirty_data_max\fR (int)
751 .ad
752 .RS 12n
753 Determines the dirty space limit in bytes. Once this limit is exceeded, new
754 writes are halted until space frees up. This parameter takes precedence
755 over \fBzfs_dirty_data_max_percent\fR.
756 See the section "ZFS TRANSACTION DELAY".
757 .sp
758 Default value: 10 percent of all memory, capped at \fBzfs_dirty_data_max_max\fR.
759 .RE
760
761 .sp
762 .ne 2
763 .na
764 \fBzfs_dirty_data_max_max\fR (int)
765 .ad
766 .RS 12n
767 Maximum allowable value of \fBzfs_dirty_data_max\fR, expressed in bytes.
768 This limit is only enforced at module load time, and will be ignored if
769 \fBzfs_dirty_data_max\fR is later changed. This parameter takes
770 precedence over \fBzfs_dirty_data_max_max_percent\fR. See the section
771 "ZFS TRANSACTION DELAY".
772 .sp
773 Default value: 25% of physical RAM.
774 .RE
775
776 .sp
777 .ne 2
778 .na
779 \fBzfs_dirty_data_max_max_percent\fR (int)
780 .ad
781 .RS 12n
782 Maximum allowable value of \fBzfs_dirty_data_max\fR, expressed as a
783 percentage of physical RAM. This limit is only enforced at module load
784 time, and will be ignored if \fBzfs_dirty_data_max\fR is later changed.
785 The parameter \fBzfs_dirty_data_max_max\fR takes precedence over this
786 one. See the section "ZFS TRANSACTION DELAY".
787 .sp
788 Default value: 25
789 .RE
790
791 .sp
792 .ne 2
793 .na
794 \fBzfs_dirty_data_max_percent\fR (int)
795 .ad
796 .RS 12n
797 Determines the dirty space limit, expressed as a percentage of all
798 memory. Once this limit is exceeded, new writes are halted until space frees
799 up. The parameter \fBzfs_dirty_data_max\fR takes precedence over this
800 one. See the section "ZFS TRANSACTION DELAY".
801 .sp
802 Default value: 10%, subject to \fBzfs_dirty_data_max_max\fR.
803 .RE
804
805 .sp
806 .ne 2
807 .na
808 \fBzfs_dirty_data_sync\fR (int)
809 .ad
810 .RS 12n
811 Start syncing out a transaction group if there is at least this much dirty data.
812 .sp
813 Default value: \fB67,108,864\fR.
814 .RE
815
816 .sp
817 .ne 2
818 .na
819 \fBzfs_free_bpobj_enabled\fR (int)
820 .ad
821 .RS 12n
822 Enable/disable the processing of the free_bpobj object.
823 .sp
824 Default value: \fB1\fR.
825 .RE
826
827 .sp
828 .ne 2
829 .na
830 \fBzfs_free_max_blocks\fR (ulong)
831 .ad
832 .RS 12n
833 Maximum number of blocks freed in a single txg.
834 .sp
835 Default value: \fB100,000\fR.
836 .RE
837
838 .sp
839 .ne 2
840 .na
841 \fBzfs_vdev_async_read_max_active\fR (int)
842 .ad
843 .RS 12n
844 Maxium asynchronous read I/Os active to each device.
845 See the section "ZFS I/O SCHEDULER".
846 .sp
847 Default value: \fB3\fR.
848 .RE
849
850 .sp
851 .ne 2
852 .na
853 \fBzfs_vdev_async_read_min_active\fR (int)
854 .ad
855 .RS 12n
856 Minimum asynchronous read I/Os active to each device.
857 See the section "ZFS I/O SCHEDULER".
858 .sp
859 Default value: \fB1\fR.
860 .RE
861
862 .sp
863 .ne 2
864 .na
865 \fBzfs_vdev_async_write_active_max_dirty_percent\fR (int)
866 .ad
867 .RS 12n
868 When the pool has more than
869 \fBzfs_vdev_async_write_active_max_dirty_percent\fR dirty data, use
870 \fBzfs_vdev_async_write_max_active\fR to limit active async writes. If
871 the dirty data is between min and max, the active I/O limit is linearly
872 interpolated. See the section "ZFS I/O SCHEDULER".
873 .sp
874 Default value: \fB60\fR.
875 .RE
876
877 .sp
878 .ne 2
879 .na
880 \fBzfs_vdev_async_write_active_min_dirty_percent\fR (int)
881 .ad
882 .RS 12n
883 When the pool has less than
884 \fBzfs_vdev_async_write_active_min_dirty_percent\fR dirty data, use
885 \fBzfs_vdev_async_write_min_active\fR to limit active async writes. If
886 the dirty data is between min and max, the active I/O limit is linearly
887 interpolated. See the section "ZFS I/O SCHEDULER".
888 .sp
889 Default value: \fB30\fR.
890 .RE
891
892 .sp
893 .ne 2
894 .na
895 \fBzfs_vdev_async_write_max_active\fR (int)
896 .ad
897 .RS 12n
898 Maxium asynchronous write I/Os active to each device.
899 See the section "ZFS I/O SCHEDULER".
900 .sp
901 Default value: \fB10\fR.
902 .RE
903
904 .sp
905 .ne 2
906 .na
907 \fBzfs_vdev_async_write_min_active\fR (int)
908 .ad
909 .RS 12n
910 Minimum asynchronous write I/Os active to each device.
911 See the section "ZFS I/O SCHEDULER".
912 .sp
913 Default value: \fB1\fR.
914 .RE
915
916 .sp
917 .ne 2
918 .na
919 \fBzfs_vdev_max_active\fR (int)
920 .ad
921 .RS 12n
922 The maximum number of I/Os active to each device. Ideally, this will be >=
923 the sum of each queue's max_active. It must be at least the sum of each
924 queue's min_active. See the section "ZFS I/O SCHEDULER".
925 .sp
926 Default value: \fB1,000\fR.
927 .RE
928
929 .sp
930 .ne 2
931 .na
932 \fBzfs_vdev_scrub_max_active\fR (int)
933 .ad
934 .RS 12n
935 Maxium scrub I/Os active to each device.
936 See the section "ZFS I/O SCHEDULER".
937 .sp
938 Default value: \fB2\fR.
939 .RE
940
941 .sp
942 .ne 2
943 .na
944 \fBzfs_vdev_scrub_min_active\fR (int)
945 .ad
946 .RS 12n
947 Minimum scrub I/Os active to each device.
948 See the section "ZFS I/O SCHEDULER".
949 .sp
950 Default value: \fB1\fR.
951 .RE
952
953 .sp
954 .ne 2
955 .na
956 \fBzfs_vdev_sync_read_max_active\fR (int)
957 .ad
958 .RS 12n
959 Maxium synchronous read I/Os active to each device.
960 See the section "ZFS I/O SCHEDULER".
961 .sp
962 Default value: \fB10\fR.
963 .RE
964
965 .sp
966 .ne 2
967 .na
968 \fBzfs_vdev_sync_read_min_active\fR (int)
969 .ad
970 .RS 12n
971 Minimum synchronous read I/Os active to each device.
972 See the section "ZFS I/O SCHEDULER".
973 .sp
974 Default value: \fB10\fR.
975 .RE
976
977 .sp
978 .ne 2
979 .na
980 \fBzfs_vdev_sync_write_max_active\fR (int)
981 .ad
982 .RS 12n
983 Maxium synchronous write I/Os active to each device.
984 See the section "ZFS I/O SCHEDULER".
985 .sp
986 Default value: \fB10\fR.
987 .RE
988
989 .sp
990 .ne 2
991 .na
992 \fBzfs_vdev_sync_write_min_active\fR (int)
993 .ad
994 .RS 12n
995 Minimum synchronous write I/Os active to each device.
996 See the section "ZFS I/O SCHEDULER".
997 .sp
998 Default value: \fB10\fR.
999 .RE
1000
1001 .sp
1002 .ne 2
1003 .na
1004 \fBzfs_disable_dup_eviction\fR (int)
1005 .ad
1006 .RS 12n
1007 Disable duplicate buffer eviction
1008 .sp
1009 Use \fB1\fR for yes and \fB0\fR for no (default).
1010 .RE
1011
1012 .sp
1013 .ne 2
1014 .na
1015 \fBzfs_expire_snapshot\fR (int)
1016 .ad
1017 .RS 12n
1018 Seconds to expire .zfs/snapshot
1019 .sp
1020 Default value: \fB300\fR.
1021 .RE
1022
1023 .sp
1024 .ne 2
1025 .na
1026 \fBzfs_admin_snapshot\fR (int)
1027 .ad
1028 .RS 12n
1029 Allow the creation, removal, or renaming of entries in the .zfs/snapshot
1030 directory to cause the creation, destruction, or renaming of snapshots.
1031 When enabled this functionality works both locally and over NFS exports
1032 which have the 'no_root_squash' option set. This functionality is disabled
1033 by default.
1034 .sp
1035 Use \fB1\fR for yes and \fB0\fR for no (default).
1036 .RE
1037
1038 .sp
1039 .ne 2
1040 .na
1041 \fBzfs_flags\fR (int)
1042 .ad
1043 .RS 12n
1044 Set additional debugging flags. The following flags may be bitwise-or'd
1045 together.
1046 .sp
1047 .TS
1048 box;
1049 rB lB
1050 lB lB
1051 r l.
1052 Value Symbolic Name
1053 Description
1054 _
1055 1 ZFS_DEBUG_DPRINTF
1056 Enable dprintf entries in the debug log.
1057 _
1058 2 ZFS_DEBUG_DBUF_VERIFY *
1059 Enable extra dbuf verifications.
1060 _
1061 4 ZFS_DEBUG_DNODE_VERIFY *
1062 Enable extra dnode verifications.
1063 _
1064 8 ZFS_DEBUG_SNAPNAMES
1065 Enable snapshot name verification.
1066 _
1067 16 ZFS_DEBUG_MODIFY
1068 Check for illegally modified ARC buffers.
1069 _
1070 32 ZFS_DEBUG_SPA
1071 Enable spa_dbgmsg entries in the debug log.
1072 _
1073 64 ZFS_DEBUG_ZIO_FREE
1074 Enable verification of block frees.
1075 _
1076 128 ZFS_DEBUG_HISTOGRAM_VERIFY
1077 Enable extra spacemap histogram verifications.
1078 .TE
1079 .sp
1080 * Requires debug build.
1081 .sp
1082 Default value: \fB0\fR.
1083 .RE
1084
1085 .sp
1086 .ne 2
1087 .na
1088 \fBzfs_free_leak_on_eio\fR (int)
1089 .ad
1090 .RS 12n
1091 If destroy encounters an EIO while reading metadata (e.g. indirect
1092 blocks), space referenced by the missing metadata can not be freed.
1093 Normally this causes the background destroy to become "stalled", as
1094 it is unable to make forward progress. While in this stalled state,
1095 all remaining space to free from the error-encountering filesystem is
1096 "temporarily leaked". Set this flag to cause it to ignore the EIO,
1097 permanently leak the space from indirect blocks that can not be read,
1098 and continue to free everything else that it can.
1099
1100 The default, "stalling" behavior is useful if the storage partially
1101 fails (i.e. some but not all i/os fail), and then later recovers. In
1102 this case, we will be able to continue pool operations while it is
1103 partially failed, and when it recovers, we can continue to free the
1104 space, with no leaks. However, note that this case is actually
1105 fairly rare.
1106
1107 Typically pools either (a) fail completely (but perhaps temporarily,
1108 e.g. a top-level vdev going offline), or (b) have localized,
1109 permanent errors (e.g. disk returns the wrong data due to bit flip or
1110 firmware bug). In case (a), this setting does not matter because the
1111 pool will be suspended and the sync thread will not be able to make
1112 forward progress regardless. In case (b), because the error is
1113 permanent, the best we can do is leak the minimum amount of space,
1114 which is what setting this flag will do. Therefore, it is reasonable
1115 for this flag to normally be set, but we chose the more conservative
1116 approach of not setting it, so that there is no possibility of
1117 leaking space in the "partial temporary" failure case.
1118 .sp
1119 Default value: \fB0\fR.
1120 .RE
1121
1122 .sp
1123 .ne 2
1124 .na
1125 \fBzfs_free_min_time_ms\fR (int)
1126 .ad
1127 .RS 12n
1128 Min millisecs to free per txg
1129 .sp
1130 Default value: \fB1,000\fR.
1131 .RE
1132
1133 .sp
1134 .ne 2
1135 .na
1136 \fBzfs_immediate_write_sz\fR (long)
1137 .ad
1138 .RS 12n
1139 Largest data block to write to zil
1140 .sp
1141 Default value: \fB32,768\fR.
1142 .RE
1143
1144 .sp
1145 .ne 2
1146 .na
1147 \fBzfs_max_recordsize\fR (int)
1148 .ad
1149 .RS 12n
1150 We currently support block sizes from 512 bytes to 16MB. The benefits of
1151 larger blocks, and thus larger IO, need to be weighed against the cost of
1152 COWing a giant block to modify one byte. Additionally, very large blocks
1153 can have an impact on i/o latency, and also potentially on the memory
1154 allocator. Therefore, we do not allow the recordsize to be set larger than
1155 zfs_max_recordsize (default 1MB). Larger blocks can be created by changing
1156 this tunable, and pools with larger blocks can always be imported and used,
1157 regardless of this setting.
1158 .sp
1159 Default value: \fB1,048,576\fR.
1160 .RE
1161
1162 .sp
1163 .ne 2
1164 .na
1165 \fBzfs_mdcomp_disable\fR (int)
1166 .ad
1167 .RS 12n
1168 Disable meta data compression
1169 .sp
1170 Use \fB1\fR for yes and \fB0\fR for no (default).
1171 .RE
1172
1173 .sp
1174 .ne 2
1175 .na
1176 \fBzfs_metaslab_fragmentation_threshold\fR (int)
1177 .ad
1178 .RS 12n
1179 Allow metaslabs to keep their active state as long as their fragmentation
1180 percentage is less than or equal to this value. An active metaslab that
1181 exceeds this threshold will no longer keep its active status allowing
1182 better metaslabs to be selected.
1183 .sp
1184 Default value: \fB70\fR.
1185 .RE
1186
1187 .sp
1188 .ne 2
1189 .na
1190 \fBzfs_mg_fragmentation_threshold\fR (int)
1191 .ad
1192 .RS 12n
1193 Metaslab groups are considered eligible for allocations if their
1194 fragmenation metric (measured as a percentage) is less than or equal to
1195 this value. If a metaslab group exceeds this threshold then it will be
1196 skipped unless all metaslab groups within the metaslab class have also
1197 crossed this threshold.
1198 .sp
1199 Default value: \fB85\fR.
1200 .RE
1201
1202 .sp
1203 .ne 2
1204 .na
1205 \fBzfs_mg_noalloc_threshold\fR (int)
1206 .ad
1207 .RS 12n
1208 Defines a threshold at which metaslab groups should be eligible for
1209 allocations. The value is expressed as a percentage of free space
1210 beyond which a metaslab group is always eligible for allocations.
1211 If a metaslab group's free space is less than or equal to the
1212 threshold, the allocator will avoid allocating to that group
1213 unless all groups in the pool have reached the threshold. Once all
1214 groups have reached the threshold, all groups are allowed to accept
1215 allocations. The default value of 0 disables the feature and causes
1216 all metaslab groups to be eligible for allocations.
1217
1218 This parameter allows to deal with pools having heavily imbalanced
1219 vdevs such as would be the case when a new vdev has been added.
1220 Setting the threshold to a non-zero percentage will stop allocations
1221 from being made to vdevs that aren't filled to the specified percentage
1222 and allow lesser filled vdevs to acquire more allocations than they
1223 otherwise would under the old \fBzfs_mg_alloc_failures\fR facility.
1224 .sp
1225 Default value: \fB0\fR.
1226 .RE
1227
1228 .sp
1229 .ne 2
1230 .na
1231 \fBzfs_no_scrub_io\fR (int)
1232 .ad
1233 .RS 12n
1234 Set for no scrub I/O
1235 .sp
1236 Use \fB1\fR for yes and \fB0\fR for no (default).
1237 .RE
1238
1239 .sp
1240 .ne 2
1241 .na
1242 \fBzfs_no_scrub_prefetch\fR (int)
1243 .ad
1244 .RS 12n
1245 Set for no scrub prefetching
1246 .sp
1247 Use \fB1\fR for yes and \fB0\fR for no (default).
1248 .RE
1249
1250 .sp
1251 .ne 2
1252 .na
1253 \fBzfs_nocacheflush\fR (int)
1254 .ad
1255 .RS 12n
1256 Disable cache flushes
1257 .sp
1258 Use \fB1\fR for yes and \fB0\fR for no (default).
1259 .RE
1260
1261 .sp
1262 .ne 2
1263 .na
1264 \fBzfs_nopwrite_enabled\fR (int)
1265 .ad
1266 .RS 12n
1267 Enable NOP writes
1268 .sp
1269 Use \fB1\fR for yes (default) and \fB0\fR to disable.
1270 .RE
1271
1272 .sp
1273 .ne 2
1274 .na
1275 \fBzfs_pd_bytes_max\fR (int)
1276 .ad
1277 .RS 12n
1278 The number of bytes which should be prefetched.
1279 .sp
1280 Default value: \fB52,428,800\fR.
1281 .RE
1282
1283 .sp
1284 .ne 2
1285 .na
1286 \fBzfs_prefetch_disable\fR (int)
1287 .ad
1288 .RS 12n
1289 This tunable disables predictive prefetch. Note that it leaves "prescient"
1290 prefetch (e.g. prefetch for zfs send) intact. Unlike predictive prefetch,
1291 prescient prefetch never issues i/os that end up not being needed, so it
1292 can't hurt performance.
1293 .sp
1294 Use \fB1\fR for yes and \fB0\fR for no (default).
1295 .RE
1296
1297 .sp
1298 .ne 2
1299 .na
1300 \fBzfs_read_chunk_size\fR (long)
1301 .ad
1302 .RS 12n
1303 Bytes to read per chunk
1304 .sp
1305 Default value: \fB1,048,576\fR.
1306 .RE
1307
1308 .sp
1309 .ne 2
1310 .na
1311 \fBzfs_read_history\fR (int)
1312 .ad
1313 .RS 12n
1314 Historic statistics for the last N reads
1315 .sp
1316 Default value: \fB0\fR.
1317 .RE
1318
1319 .sp
1320 .ne 2
1321 .na
1322 \fBzfs_read_history_hits\fR (int)
1323 .ad
1324 .RS 12n
1325 Include cache hits in read history
1326 .sp
1327 Use \fB1\fR for yes and \fB0\fR for no (default).
1328 .RE
1329
1330 .sp
1331 .ne 2
1332 .na
1333 \fBzfs_recover\fR (int)
1334 .ad
1335 .RS 12n
1336 Set to attempt to recover from fatal errors. This should only be used as a
1337 last resort, as it typically results in leaked space, or worse.
1338 .sp
1339 Use \fB1\fR for yes and \fB0\fR for no (default).
1340 .RE
1341
1342 .sp
1343 .ne 2
1344 .na
1345 \fBzfs_resilver_delay\fR (int)
1346 .ad
1347 .RS 12n
1348 Number of ticks to delay prior to issuing a resilver I/O operation when
1349 a non-resilver or non-scrub I/O operation has occurred within the past
1350 \fBzfs_scan_idle\fR ticks.
1351 .sp
1352 Default value: \fB2\fR.
1353 .RE
1354
1355 .sp
1356 .ne 2
1357 .na
1358 \fBzfs_resilver_min_time_ms\fR (int)
1359 .ad
1360 .RS 12n
1361 Min millisecs to resilver per txg
1362 .sp
1363 Default value: \fB3,000\fR.
1364 .RE
1365
1366 .sp
1367 .ne 2
1368 .na
1369 \fBzfs_scan_idle\fR (int)
1370 .ad
1371 .RS 12n
1372 Idle window in clock ticks. During a scrub or a resilver, if
1373 a non-scrub or non-resilver I/O operation has occurred during this
1374 window, the next scrub or resilver operation is delayed by, respectively
1375 \fBzfs_scrub_delay\fR or \fBzfs_resilver_delay\fR ticks.
1376 .sp
1377 Default value: \fB50\fR.
1378 .RE
1379
1380 .sp
1381 .ne 2
1382 .na
1383 \fBzfs_scan_min_time_ms\fR (int)
1384 .ad
1385 .RS 12n
1386 Min millisecs to scrub per txg
1387 .sp
1388 Default value: \fB1,000\fR.
1389 .RE
1390
1391 .sp
1392 .ne 2
1393 .na
1394 \fBzfs_scrub_delay\fR (int)
1395 .ad
1396 .RS 12n
1397 Number of ticks to delay prior to issuing a scrub I/O operation when
1398 a non-scrub or non-resilver I/O operation has occurred within the past
1399 \fBzfs_scan_idle\fR ticks.
1400 .sp
1401 Default value: \fB4\fR.
1402 .RE
1403
1404 .sp
1405 .ne 2
1406 .na
1407 \fBzfs_send_corrupt_data\fR (int)
1408 .ad
1409 .RS 12n
1410 Allow to send corrupt data (ignore read/checksum errors when sending data)
1411 .sp
1412 Use \fB1\fR for yes and \fB0\fR for no (default).
1413 .RE
1414
1415 .sp
1416 .ne 2
1417 .na
1418 \fBzfs_sync_pass_deferred_free\fR (int)
1419 .ad
1420 .RS 12n
1421 Defer frees starting in this pass
1422 .sp
1423 Default value: \fB2\fR.
1424 .RE
1425
1426 .sp
1427 .ne 2
1428 .na
1429 \fBzfs_sync_pass_dont_compress\fR (int)
1430 .ad
1431 .RS 12n
1432 Don't compress starting in this pass
1433 .sp
1434 Default value: \fB5\fR.
1435 .RE
1436
1437 .sp
1438 .ne 2
1439 .na
1440 \fBzfs_sync_pass_rewrite\fR (int)
1441 .ad
1442 .RS 12n
1443 Rewrite new bps starting in this pass
1444 .sp
1445 Default value: \fB2\fR.
1446 .RE
1447
1448 .sp
1449 .ne 2
1450 .na
1451 \fBzfs_top_maxinflight\fR (int)
1452 .ad
1453 .RS 12n
1454 Max I/Os per top-level vdev during scrub or resilver operations.
1455 .sp
1456 Default value: \fB32\fR.
1457 .RE
1458
1459 .sp
1460 .ne 2
1461 .na
1462 \fBzfs_txg_history\fR (int)
1463 .ad
1464 .RS 12n
1465 Historic statistics for the last N txgs
1466 .sp
1467 Default value: \fB0\fR.
1468 .RE
1469
1470 .sp
1471 .ne 2
1472 .na
1473 \fBzfs_txg_timeout\fR (int)
1474 .ad
1475 .RS 12n
1476 Max seconds worth of delta per txg
1477 .sp
1478 Default value: \fB5\fR.
1479 .RE
1480
1481 .sp
1482 .ne 2
1483 .na
1484 \fBzfs_vdev_aggregation_limit\fR (int)
1485 .ad
1486 .RS 12n
1487 Max vdev I/O aggregation size
1488 .sp
1489 Default value: \fB131,072\fR.
1490 .RE
1491
1492 .sp
1493 .ne 2
1494 .na
1495 \fBzfs_vdev_cache_bshift\fR (int)
1496 .ad
1497 .RS 12n
1498 Shift size to inflate reads too
1499 .sp
1500 Default value: \fB16\fR.
1501 .RE
1502
1503 .sp
1504 .ne 2
1505 .na
1506 \fBzfs_vdev_cache_max\fR (int)
1507 .ad
1508 .RS 12n
1509 Inflate reads small than max
1510 .RE
1511
1512 .sp
1513 .ne 2
1514 .na
1515 \fBzfs_vdev_cache_size\fR (int)
1516 .ad
1517 .RS 12n
1518 Total size of the per-disk cache
1519 .sp
1520 Default value: \fB0\fR.
1521 .RE
1522
1523 .sp
1524 .ne 2
1525 .na
1526 \fBzfs_vdev_mirror_switch_us\fR (int)
1527 .ad
1528 .RS 12n
1529 Switch mirrors every N usecs
1530 .sp
1531 Default value: \fB10,000\fR.
1532 .RE
1533
1534 .sp
1535 .ne 2
1536 .na
1537 \fBzfs_vdev_read_gap_limit\fR (int)
1538 .ad
1539 .RS 12n
1540 Aggregate read I/O over gap
1541 .sp
1542 Default value: \fB32,768\fR.
1543 .RE
1544
1545 .sp
1546 .ne 2
1547 .na
1548 \fBzfs_vdev_scheduler\fR (charp)
1549 .ad
1550 .RS 12n
1551 I/O scheduler
1552 .sp
1553 Default value: \fBnoop\fR.
1554 .RE
1555
1556 .sp
1557 .ne 2
1558 .na
1559 \fBzfs_vdev_write_gap_limit\fR (int)
1560 .ad
1561 .RS 12n
1562 Aggregate write I/O over gap
1563 .sp
1564 Default value: \fB4,096\fR.
1565 .RE
1566
1567 .sp
1568 .ne 2
1569 .na
1570 \fBzfs_zevent_cols\fR (int)
1571 .ad
1572 .RS 12n
1573 Max event column width
1574 .sp
1575 Default value: \fB80\fR.
1576 .RE
1577
1578 .sp
1579 .ne 2
1580 .na
1581 \fBzfs_zevent_console\fR (int)
1582 .ad
1583 .RS 12n
1584 Log events to the console
1585 .sp
1586 Use \fB1\fR for yes and \fB0\fR for no (default).
1587 .RE
1588
1589 .sp
1590 .ne 2
1591 .na
1592 \fBzfs_zevent_len_max\fR (int)
1593 .ad
1594 .RS 12n
1595 Max event queue length
1596 .sp
1597 Default value: \fB0\fR.
1598 .RE
1599
1600 .sp
1601 .ne 2
1602 .na
1603 \fBzil_replay_disable\fR (int)
1604 .ad
1605 .RS 12n
1606 Disable intent logging replay
1607 .sp
1608 Use \fB1\fR for yes and \fB0\fR for no (default).
1609 .RE
1610
1611 .sp
1612 .ne 2
1613 .na
1614 \fBzil_slog_limit\fR (ulong)
1615 .ad
1616 .RS 12n
1617 Max commit bytes to separate log device
1618 .sp
1619 Default value: \fB1,048,576\fR.
1620 .RE
1621
1622 .sp
1623 .ne 2
1624 .na
1625 \fBzio_delay_max\fR (int)
1626 .ad
1627 .RS 12n
1628 Max zio millisecond delay before posting event
1629 .sp
1630 Default value: \fB30,000\fR.
1631 .RE
1632
1633 .sp
1634 .ne 2
1635 .na
1636 \fBzio_requeue_io_start_cut_in_line\fR (int)
1637 .ad
1638 .RS 12n
1639 Prioritize requeued I/O
1640 .sp
1641 Default value: \fB0\fR.
1642 .RE
1643
1644 .sp
1645 .ne 2
1646 .na
1647 \fBzio_taskq_batch_pct\fR (uint)
1648 .ad
1649 .RS 12n
1650 Percentage of online CPUs (or CPU cores, etc) which will run a worker thread
1651 for IO. These workers are responsible for IO work such as compression and
1652 checksum calculations. Fractional number of CPUs will be rounded down.
1653 .sp
1654 The default value of 75 was chosen to avoid using all CPUs which can result in
1655 latency issues and inconsistent application performance, especially when high
1656 compression is enabled.
1657 .sp
1658 Default value: \fB75\fR.
1659 .RE
1660
1661 .sp
1662 .ne 2
1663 .na
1664 \fBzvol_inhibit_dev\fR (uint)
1665 .ad
1666 .RS 12n
1667 Do not create zvol device nodes
1668 .sp
1669 Use \fB1\fR for yes and \fB0\fR for no (default).
1670 .RE
1671
1672 .sp
1673 .ne 2
1674 .na
1675 \fBzvol_major\fR (uint)
1676 .ad
1677 .RS 12n
1678 Major number for zvol device
1679 .sp
1680 Default value: \fB230\fR.
1681 .RE
1682
1683 .sp
1684 .ne 2
1685 .na
1686 \fBzvol_max_discard_blocks\fR (ulong)
1687 .ad
1688 .RS 12n
1689 Max number of blocks to discard at once
1690 .sp
1691 Default value: \fB16,384\fR.
1692 .RE
1693
1694 .sp
1695 .ne 2
1696 .na
1697 \fBzvol_prefetch_bytes\fR (uint)
1698 .ad
1699 .RS 12n
1700 When adding a zvol to the system prefetch \fBzvol_prefetch_bytes\fR
1701 from the start and end of the volume. Prefetching these regions
1702 of the volume is desirable because they are likely to be accessed
1703 immediately by \fBblkid(8)\fR or by the kernel scanning for a partition
1704 table.
1705 .sp
1706 Default value: \fB131,072\fR.
1707 .RE
1708
1709 .SH ZFS I/O SCHEDULER
1710 ZFS issues I/O operations to leaf vdevs to satisfy and complete I/Os.
1711 The I/O scheduler determines when and in what order those operations are
1712 issued. The I/O scheduler divides operations into five I/O classes
1713 prioritized in the following order: sync read, sync write, async read,
1714 async write, and scrub/resilver. Each queue defines the minimum and
1715 maximum number of concurrent operations that may be issued to the
1716 device. In addition, the device has an aggregate maximum,
1717 \fBzfs_vdev_max_active\fR. Note that the sum of the per-queue minimums
1718 must not exceed the aggregate maximum. If the sum of the per-queue
1719 maximums exceeds the aggregate maximum, then the number of active I/Os
1720 may reach \fBzfs_vdev_max_active\fR, in which case no further I/Os will
1721 be issued regardless of whether all per-queue minimums have been met.
1722 .sp
1723 For many physical devices, throughput increases with the number of
1724 concurrent operations, but latency typically suffers. Further, physical
1725 devices typically have a limit at which more concurrent operations have no
1726 effect on throughput or can actually cause it to decrease.
1727 .sp
1728 The scheduler selects the next operation to issue by first looking for an
1729 I/O class whose minimum has not been satisfied. Once all are satisfied and
1730 the aggregate maximum has not been hit, the scheduler looks for classes
1731 whose maximum has not been satisfied. Iteration through the I/O classes is
1732 done in the order specified above. No further operations are issued if the
1733 aggregate maximum number of concurrent operations has been hit or if there
1734 are no operations queued for an I/O class that has not hit its maximum.
1735 Every time an I/O is queued or an operation completes, the I/O scheduler
1736 looks for new operations to issue.
1737 .sp
1738 In general, smaller max_active's will lead to lower latency of synchronous
1739 operations. Larger max_active's may lead to higher overall throughput,
1740 depending on underlying storage.
1741 .sp
1742 The ratio of the queues' max_actives determines the balance of performance
1743 between reads, writes, and scrubs. E.g., increasing
1744 \fBzfs_vdev_scrub_max_active\fR will cause the scrub or resilver to complete
1745 more quickly, but reads and writes to have higher latency and lower throughput.
1746 .sp
1747 All I/O classes have a fixed maximum number of outstanding operations
1748 except for the async write class. Asynchronous writes represent the data
1749 that is committed to stable storage during the syncing stage for
1750 transaction groups. Transaction groups enter the syncing state
1751 periodically so the number of queued async writes will quickly burst up
1752 and then bleed down to zero. Rather than servicing them as quickly as
1753 possible, the I/O scheduler changes the maximum number of active async
1754 write I/Os according to the amount of dirty data in the pool. Since
1755 both throughput and latency typically increase with the number of
1756 concurrent operations issued to physical devices, reducing the
1757 burstiness in the number of concurrent operations also stabilizes the
1758 response time of operations from other -- and in particular synchronous
1759 -- queues. In broad strokes, the I/O scheduler will issue more
1760 concurrent operations from the async write queue as there's more dirty
1761 data in the pool.
1762 .sp
1763 Async Writes
1764 .sp
1765 The number of concurrent operations issued for the async write I/O class
1766 follows a piece-wise linear function defined by a few adjustable points.
1767 .nf
1768
1769 | o---------| <-- zfs_vdev_async_write_max_active
1770 ^ | /^ |
1771 | | / | |
1772 active | / | |
1773 I/O | / | |
1774 count | / | |
1775 | / | |
1776 |-------o | | <-- zfs_vdev_async_write_min_active
1777 0|_______^______|_________|
1778 0% | | 100% of zfs_dirty_data_max
1779 | |
1780 | `-- zfs_vdev_async_write_active_max_dirty_percent
1781 `--------- zfs_vdev_async_write_active_min_dirty_percent
1782
1783 .fi
1784 Until the amount of dirty data exceeds a minimum percentage of the dirty
1785 data allowed in the pool, the I/O scheduler will limit the number of
1786 concurrent operations to the minimum. As that threshold is crossed, the
1787 number of concurrent operations issued increases linearly to the maximum at
1788 the specified maximum percentage of the dirty data allowed in the pool.
1789 .sp
1790 Ideally, the amount of dirty data on a busy pool will stay in the sloped
1791 part of the function between \fBzfs_vdev_async_write_active_min_dirty_percent\fR
1792 and \fBzfs_vdev_async_write_active_max_dirty_percent\fR. If it exceeds the
1793 maximum percentage, this indicates that the rate of incoming data is
1794 greater than the rate that the backend storage can handle. In this case, we
1795 must further throttle incoming writes, as described in the next section.
1796
1797 .SH ZFS TRANSACTION DELAY
1798 We delay transactions when we've determined that the backend storage
1799 isn't able to accommodate the rate of incoming writes.
1800 .sp
1801 If there is already a transaction waiting, we delay relative to when
1802 that transaction will finish waiting. This way the calculated delay time
1803 is independent of the number of threads concurrently executing
1804 transactions.
1805 .sp
1806 If we are the only waiter, wait relative to when the transaction
1807 started, rather than the current time. This credits the transaction for
1808 "time already served", e.g. reading indirect blocks.
1809 .sp
1810 The minimum time for a transaction to take is calculated as:
1811 .nf
1812 min_time = zfs_delay_scale * (dirty - min) / (max - dirty)
1813 min_time is then capped at 100 milliseconds.
1814 .fi
1815 .sp
1816 The delay has two degrees of freedom that can be adjusted via tunables. The
1817 percentage of dirty data at which we start to delay is defined by
1818 \fBzfs_delay_min_dirty_percent\fR. This should typically be at or above
1819 \fBzfs_vdev_async_write_active_max_dirty_percent\fR so that we only start to
1820 delay after writing at full speed has failed to keep up with the incoming write
1821 rate. The scale of the curve is defined by \fBzfs_delay_scale\fR. Roughly speaking,
1822 this variable determines the amount of delay at the midpoint of the curve.
1823 .sp
1824 .nf
1825 delay
1826 10ms +-------------------------------------------------------------*+
1827 | *|
1828 9ms + *+
1829 | *|
1830 8ms + *+
1831 | * |
1832 7ms + * +
1833 | * |
1834 6ms + * +
1835 | * |
1836 5ms + * +
1837 | * |
1838 4ms + * +
1839 | * |
1840 3ms + * +
1841 | * |
1842 2ms + (midpoint) * +
1843 | | ** |
1844 1ms + v *** +
1845 | zfs_delay_scale ----------> ******** |
1846 0 +-------------------------------------*********----------------+
1847 0% <- zfs_dirty_data_max -> 100%
1848 .fi
1849 .sp
1850 Note that since the delay is added to the outstanding time remaining on the
1851 most recent transaction, the delay is effectively the inverse of IOPS.
1852 Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
1853 was chosen such that small changes in the amount of accumulated dirty data
1854 in the first 3/4 of the curve yield relatively small differences in the
1855 amount of delay.
1856 .sp
1857 The effects can be easier to understand when the amount of delay is
1858 represented on a log scale:
1859 .sp
1860 .nf
1861 delay
1862 100ms +-------------------------------------------------------------++
1863 + +
1864 | |
1865 + *+
1866 10ms + *+
1867 + ** +
1868 | (midpoint) ** |
1869 + | ** +
1870 1ms + v **** +
1871 + zfs_delay_scale ----------> ***** +
1872 | **** |
1873 + **** +
1874 100us + ** +
1875 + * +
1876 | * |
1877 + * +
1878 10us + * +
1879 + +
1880 | |
1881 + +
1882 +--------------------------------------------------------------+
1883 0% <- zfs_dirty_data_max -> 100%
1884 .fi
1885 .sp
1886 Note here that only as the amount of dirty data approaches its limit does
1887 the delay start to increase rapidly. The goal of a properly tuned system
1888 should be to keep the amount of dirty data out of that range by first
1889 ensuring that the appropriate limits are set for the I/O scheduler to reach
1890 optimal throughput on the backend storage, and then by changing the value
1891 of \fBzfs_delay_scale\fR to increase the steepness of the curve.