]> git.proxmox.com Git - mirror_zfs.git/blob - man/man5/zfs-module-parameters.5
eabd8ebe9eeb9be99244682023eacdae8270815d
[mirror_zfs.git] / man / man5 / zfs-module-parameters.5
1 '\" te
2 .\" Copyright (c) 2013 by Turbo Fredriksson <turbo@bayour.com>. All rights reserved.
3 .\" The contents of this file are subject to the terms of the Common Development
4 .\" and Distribution License (the "License"). You may not use this file except
5 .\" in compliance with the License. You can obtain a copy of the license at
6 .\" usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing.
7 .\"
8 .\" See the License for the specific language governing permissions and
9 .\" limitations under the License. When distributing Covered Code, include this
10 .\" CDDL HEADER in each file and include the License file at
11 .\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this
12 .\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
13 .\" own identifying information:
14 .\" Portions Copyright [yyyy] [name of copyright owner]
15 .TH ZFS-MODULE-PARAMETERS 5 "Nov 16, 2013"
16 .SH NAME
17 zfs\-module\-parameters \- ZFS module parameters
18 .SH DESCRIPTION
19 .sp
20 .LP
21 Description of the different parameters to the ZFS module.
22
23 .SS "Module parameters"
24 .sp
25 .LP
26
27 .sp
28 .ne 2
29 .na
30 \fBl2arc_feed_again\fR (int)
31 .ad
32 .RS 12n
33 Turbo L2ARC warmup
34 .sp
35 Use \fB1\fR for yes (default) and \fB0\fR to disable.
36 .RE
37
38 .sp
39 .ne 2
40 .na
41 \fBl2arc_feed_min_ms\fR (ulong)
42 .ad
43 .RS 12n
44 Min feed interval in milliseconds
45 .sp
46 Default value: \fB200\fR.
47 .RE
48
49 .sp
50 .ne 2
51 .na
52 \fBl2arc_feed_secs\fR (ulong)
53 .ad
54 .RS 12n
55 Seconds between L2ARC writing
56 .sp
57 Default value: \fB1\fR.
58 .RE
59
60 .sp
61 .ne 2
62 .na
63 \fBl2arc_headroom\fR (ulong)
64 .ad
65 .RS 12n
66 Number of max device writes to precache
67 .sp
68 Default value: \fB2\fR.
69 .RE
70
71 .sp
72 .ne 2
73 .na
74 \fBl2arc_headroom_boost\fR (ulong)
75 .ad
76 .RS 12n
77 Compressed l2arc_headroom multiplier
78 .sp
79 Default value: \fB200\fR.
80 .RE
81
82 .sp
83 .ne 2
84 .na
85 \fBl2arc_nocompress\fR (int)
86 .ad
87 .RS 12n
88 Skip compressing L2ARC buffers
89 .sp
90 Use \fB1\fR for yes and \fB0\fR for no (default).
91 .RE
92
93 .sp
94 .ne 2
95 .na
96 \fBl2arc_noprefetch\fR (int)
97 .ad
98 .RS 12n
99 Skip caching prefetched buffers
100 .sp
101 Use \fB1\fR for yes (default) and \fB0\fR to disable.
102 .RE
103
104 .sp
105 .ne 2
106 .na
107 \fBl2arc_norw\fR (int)
108 .ad
109 .RS 12n
110 No reads during writes
111 .sp
112 Use \fB1\fR for yes and \fB0\fR for no (default).
113 .RE
114
115 .sp
116 .ne 2
117 .na
118 \fBl2arc_write_boost\fR (ulong)
119 .ad
120 .RS 12n
121 Extra write bytes during device warmup
122 .sp
123 Default value: \fB8,388,608\fR.
124 .RE
125
126 .sp
127 .ne 2
128 .na
129 \fBl2arc_write_max\fR (ulong)
130 .ad
131 .RS 12n
132 Max write bytes per interval
133 .sp
134 Default value: \fB8,388,608\fR.
135 .RE
136
137 .sp
138 .ne 2
139 .na
140 \fBmetaslab_aliquot\fR (ulong)
141 .ad
142 .RS 12n
143 Metaslab granularity, in bytes. This is roughly similar to what would be
144 referred to as the "stripe size" in traditional RAID arrays. In normal
145 operation, ZFS will try to write this amount of data to a top-level vdev
146 before moving on to the next one.
147 .sp
148 Default value: \fB524,288\fR.
149 .RE
150
151 .sp
152 .ne 2
153 .na
154 \fBmetaslab_bias_enabled\fR (int)
155 .ad
156 .RS 12n
157 Enable metaslab group biasing based on its vdev's over- or under-utilization
158 relative to the pool.
159 .sp
160 Use \fB1\fR for yes (default) and \fB0\fR for no.
161 .RE
162
163 .sp
164 .ne 2
165 .na
166 \fBmetaslab_debug_load\fR (int)
167 .ad
168 .RS 12n
169 Load all metaslabs during pool import.
170 .sp
171 Use \fB1\fR for yes and \fB0\fR for no (default).
172 .RE
173
174 .sp
175 .ne 2
176 .na
177 \fBmetaslab_debug_unload\fR (int)
178 .ad
179 .RS 12n
180 Prevent metaslabs from being unloaded.
181 .sp
182 Use \fB1\fR for yes and \fB0\fR for no (default).
183 .RE
184
185 .sp
186 .ne 2
187 .na
188 \fBmetaslab_fragmentation_factor_enabled\fR (int)
189 .ad
190 .RS 12n
191 Enable use of the fragmentation metric in computing metaslab weights.
192 .sp
193 Use \fB1\fR for yes (default) and \fB0\fR for no.
194 .RE
195
196 .sp
197 .ne 2
198 .na
199 \fBmetaslabs_per_vdev\fR (int)
200 .ad
201 .RS 12n
202 When a vdev is added, it will be divided into approximately (but no more than) this number of metaslabs.
203 .sp
204 Default value: \fB200\fR.
205 .RE
206
207 .sp
208 .ne 2
209 .na
210 \fBmetaslab_preload_enabled\fR (int)
211 .ad
212 .RS 12n
213 Enable metaslab group preloading.
214 .sp
215 Use \fB1\fR for yes (default) and \fB0\fR for no.
216 .RE
217
218 .sp
219 .ne 2
220 .na
221 \fBmetaslab_lba_weighting_enabled\fR (int)
222 .ad
223 .RS 12n
224 Give more weight to metaslabs with lower LBAs, assuming they have
225 greater bandwidth as is typically the case on a modern constant
226 angular velocity disk drive.
227 .sp
228 Use \fB1\fR for yes (default) and \fB0\fR for no.
229 .RE
230
231 .sp
232 .ne 2
233 .na
234 \fBspa_config_path\fR (charp)
235 .ad
236 .RS 12n
237 SPA config file
238 .sp
239 Default value: \fB/etc/zfs/zpool.cache\fR.
240 .RE
241
242 .sp
243 .ne 2
244 .na
245 \fBspa_asize_inflation\fR (int)
246 .ad
247 .RS 12n
248 Multiplication factor used to estimate actual disk consumption from the
249 size of data being written. The default value is a worst case estimate,
250 but lower values may be valid for a given pool depending on its
251 configuration. Pool administrators who understand the factors involved
252 may wish to specify a more realistic inflation factor, particularly if
253 they operate close to quota or capacity limits.
254 .sp
255 Default value: 24
256 .RE
257
258 .sp
259 .ne 2
260 .na
261 \fBspa_load_verify_data\fR (int)
262 .ad
263 .RS 12n
264 Whether to traverse data blocks during an "extreme rewind" (\fB-X\fR)
265 import. Use 0 to disable and 1 to enable.
266
267 An extreme rewind import normally performs a full traversal of all
268 blocks in the pool for verification. If this parameter is set to 0,
269 the traversal skips non-metadata blocks. It can be toggled once the
270 import has started to stop or start the traversal of non-metadata blocks.
271 .sp
272 Default value: 1
273 .RE
274
275 .sp
276 .ne 2
277 .na
278 \fBspa_load_verify_metadata\fR (int)
279 .ad
280 .RS 12n
281 Whether to traverse blocks during an "extreme rewind" (\fB-X\fR)
282 pool import. Use 0 to disable and 1 to enable.
283
284 An extreme rewind import normally performs a full traversal of all
285 blocks in the pool for verification. If this parameter is set to 1,
286 the traversal is not performed. It can be toggled once the import has
287 started to stop or start the traversal.
288 .sp
289 Default value: 1
290 .RE
291
292 .sp
293 .ne 2
294 .na
295 \fBspa_load_verify_maxinflight\fR (int)
296 .ad
297 .RS 12n
298 Maximum concurrent I/Os during the traversal performed during an "extreme
299 rewind" (\fB-X\fR) pool import.
300 .sp
301 Default value: 10000
302 .RE
303
304 .sp
305 .ne 2
306 .na
307 \fBspa_slop_shift\fR (int)
308 .ad
309 .RS 12n
310 Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space
311 in the pool to be consumed. This ensures that we don't run the pool
312 completely out of space, due to unaccounted changes (e.g. to the MOS).
313 It also limits the worst-case time to allocate space. If we have
314 less than this amount of free space, most ZPL operations (e.g. write,
315 create) will return ENOSPC.
316 .sp
317 Default value: 5
318 .RE
319
320 .sp
321 .ne 2
322 .na
323 \fBzfetch_array_rd_sz\fR (ulong)
324 .ad
325 .RS 12n
326 If prefetching is enabled, disable prefetching for reads larger than this size.
327 .sp
328 Default value: \fB1,048,576\fR.
329 .RE
330
331 .sp
332 .ne 2
333 .na
334 \fBzfetch_max_distance\fR (uint)
335 .ad
336 .RS 12n
337 Max bytes to prefetch per stream (default 8MB).
338 .sp
339 Default value: \fB8,388,608\fR.
340 .RE
341
342 .sp
343 .ne 2
344 .na
345 \fBzfetch_max_streams\fR (uint)
346 .ad
347 .RS 12n
348 Max number of streams per zfetch (prefetch streams per file).
349 .sp
350 Default value: \fB8\fR.
351 .RE
352
353 .sp
354 .ne 2
355 .na
356 \fBzfetch_min_sec_reap\fR (uint)
357 .ad
358 .RS 12n
359 Min time before an active prefetch stream can be reclaimed
360 .sp
361 Default value: \fB2\fR.
362 .RE
363
364 .sp
365 .ne 2
366 .na
367 \fBzfs_arc_average_blocksize\fR (int)
368 .ad
369 .RS 12n
370 The ARC's buffer hash table is sized based on the assumption of an average
371 block size of \fBzfs_arc_average_blocksize\fR (default 8K). This works out
372 to roughly 1MB of hash table per 1GB of physical memory with 8-byte pointers.
373 For configurations with a known larger average block size this value can be
374 increased to reduce the memory footprint.
375
376 .sp
377 Default value: \fB8192\fR.
378 .RE
379
380 .sp
381 .ne 2
382 .na
383 \fBzfs_arc_evict_batch_limit\fR (int)
384 .ad
385 .RS 12n
386 Number ARC headers to evict per sub-list before proceeding to another sub-list.
387 This batch-style operation prevents entire sub-lists from being evicted at once
388 but comes at a cost of additional unlocking and locking.
389 .sp
390 Default value: \fB10\fR.
391 .RE
392
393 .sp
394 .ne 2
395 .na
396 \fBzfs_arc_grow_retry\fR (int)
397 .ad
398 .RS 12n
399 Seconds before growing arc size
400 .sp
401 Default value: \fB5\fR.
402 .RE
403
404 .sp
405 .ne 2
406 .na
407 \fBzfs_arc_lotsfree_percent\fR (int)
408 .ad
409 .RS 12n
410 Throttle I/O when free system memory drops below this percentage of total
411 system memory. Setting this value to 0 will disable the throttle.
412 .sp
413 Default value: \fB10\fR.
414 .RE
415
416 .sp
417 .ne 2
418 .na
419 \fBzfs_arc_max\fR (ulong)
420 .ad
421 .RS 12n
422 Max arc size
423 .sp
424 Default value: \fB0\fR.
425 .RE
426
427 .sp
428 .ne 2
429 .na
430 \fBzfs_arc_meta_limit\fR (ulong)
431 .ad
432 .RS 12n
433 The maximum allowed size in bytes that meta data buffers are allowed to
434 consume in the ARC. When this limit is reached meta data buffers will
435 be reclaimed even if the overall arc_c_max has not been reached. This
436 value defaults to 0 which indicates that 3/4 of the ARC may be used
437 for meta data.
438 .sp
439 Default value: \fB0\fR.
440 .RE
441
442 .sp
443 .ne 2
444 .na
445 \fBzfs_arc_meta_min\fR (ulong)
446 .ad
447 .RS 12n
448 The minimum allowed size in bytes that meta data buffers may consume in
449 the ARC. This value defaults to 0 which disables a floor on the amount
450 of the ARC devoted meta data.
451 .sp
452 Default value: \fB0\fR.
453 .RE
454
455 .sp
456 .ne 2
457 .na
458 \fBzfs_arc_meta_prune\fR (int)
459 .ad
460 .RS 12n
461 The number of dentries and inodes to be scanned looking for entries
462 which can be dropped. This may be required when the ARC reaches the
463 \fBzfs_arc_meta_limit\fR because dentries and inodes can pin buffers
464 in the ARC. Increasing this value will cause to dentry and inode caches
465 to be pruned more aggressively. Setting this value to 0 will disable
466 pruning the inode and dentry caches.
467 .sp
468 Default value: \fB10,000\fR.
469 .RE
470
471 .sp
472 .ne 2
473 .na
474 \fBzfs_arc_meta_adjust_restarts\fR (ulong)
475 .ad
476 .RS 12n
477 The number of restart passes to make while scanning the ARC attempting
478 the free buffers in order to stay below the \fBzfs_arc_meta_limit\fR.
479 This value should not need to be tuned but is available to facilitate
480 performance analysis.
481 .sp
482 Default value: \fB4096\fR.
483 .RE
484
485 .sp
486 .ne 2
487 .na
488 \fBzfs_arc_min\fR (ulong)
489 .ad
490 .RS 12n
491 Min arc size
492 .sp
493 Default value: \fB100\fR.
494 .RE
495
496 .sp
497 .ne 2
498 .na
499 \fBzfs_arc_min_prefetch_lifespan\fR (int)
500 .ad
501 .RS 12n
502 Min life of prefetch block
503 .sp
504 Default value: \fB100\fR.
505 .RE
506
507 .sp
508 .ne 2
509 .na
510 \fBzfs_arc_num_sublists_per_state\fR (int)
511 .ad
512 .RS 12n
513 To allow more fine-grained locking, each ARC state contains a series
514 of lists for both data and meta data objects. Locking is performed at
515 the level of these "sub-lists". This parameters controls the number of
516 sub-lists per ARC state.
517 .sp
518 Default value: 1 or the number of on-online CPUs, whichever is greater
519 .RE
520
521 .sp
522 .ne 2
523 .na
524 \fBzfs_arc_overflow_shift\fR (int)
525 .ad
526 .RS 12n
527 The ARC size is considered to be overflowing if it exceeds the current
528 ARC target size (arc_c) by a threshold determined by this parameter.
529 The threshold is calculated as a fraction of arc_c using the formula
530 "arc_c >> \fBzfs_arc_overflow_shift\fR".
531
532 The default value of 8 causes the ARC to be considered to be overflowing
533 if it exceeds the target size by 1/256th (0.3%) of the target size.
534
535 When the ARC is overflowing, new buffer allocations are stalled until
536 the reclaim thread catches up and the overflow condition no longer exists.
537 .sp
538 Default value: \fB8\fR.
539 .RE
540
541 .sp
542 .ne 2
543 .na
544
545 \fBzfs_arc_p_min_shift\fR (int)
546 .ad
547 .RS 12n
548 arc_c shift to calc min/max arc_p
549 .sp
550 Default value: \fB4\fR.
551 .RE
552
553 .sp
554 .ne 2
555 .na
556 \fBzfs_arc_p_aggressive_disable\fR (int)
557 .ad
558 .RS 12n
559 Disable aggressive arc_p growth
560 .sp
561 Use \fB1\fR for yes (default) and \fB0\fR to disable.
562 .RE
563
564 .sp
565 .ne 2
566 .na
567 \fBzfs_arc_p_dampener_disable\fR (int)
568 .ad
569 .RS 12n
570 Disable arc_p adapt dampener
571 .sp
572 Use \fB1\fR for yes (default) and \fB0\fR to disable.
573 .RE
574
575 .sp
576 .ne 2
577 .na
578 \fBzfs_arc_shrink_shift\fR (int)
579 .ad
580 .RS 12n
581 log2(fraction of arc to reclaim)
582 .sp
583 Default value: \fB5\fR.
584 .RE
585
586 .sp
587 .ne 2
588 .na
589 \fBzfs_arc_sys_free\fR (ulong)
590 .ad
591 .RS 12n
592 The target number of bytes the ARC should leave as free memory on the system.
593 Defaults to the larger of 1/64 of physical memory or 512K. Setting this
594 option to a non-zero value will override the default.
595 .sp
596 Default value: \fB0\fR.
597 .RE
598
599 .sp
600 .ne 2
601 .na
602 \fBzfs_autoimport_disable\fR (int)
603 .ad
604 .RS 12n
605 Disable pool import at module load by ignoring the cache file (typically \fB/etc/zfs/zpool.cache\fR).
606 .sp
607 Use \fB1\fR for yes (default) and \fB0\fR for no.
608 .RE
609
610 .sp
611 .ne 2
612 .na
613 \fBzfs_dbgmsg_enable\fR (int)
614 .ad
615 .RS 12n
616 Internally ZFS keeps a small log to facilitate debugging. By default the log
617 is disabled, to enable it set this option to 1. The contents of the log can
618 be accessed by reading the /proc/spl/kstat/zfs/dbgmsg file. Writing 0 to
619 this proc file clears the log.
620 .sp
621 Default value: \fB0\fR.
622 .RE
623
624 .sp
625 .ne 2
626 .na
627 \fBzfs_dbgmsg_maxsize\fR (int)
628 .ad
629 .RS 12n
630 The maximum size in bytes of the internal ZFS debug log.
631 .sp
632 Default value: \fB4M\fR.
633 .RE
634
635 .sp
636 .ne 2
637 .na
638 \fBzfs_dbuf_state_index\fR (int)
639 .ad
640 .RS 12n
641 Calculate arc header index
642 .sp
643 Default value: \fB0\fR.
644 .RE
645
646 .sp
647 .ne 2
648 .na
649 \fBzfs_deadman_enabled\fR (int)
650 .ad
651 .RS 12n
652 Enable deadman timer
653 .sp
654 Use \fB1\fR for yes (default) and \fB0\fR to disable.
655 .RE
656
657 .sp
658 .ne 2
659 .na
660 \fBzfs_deadman_synctime_ms\fR (ulong)
661 .ad
662 .RS 12n
663 Expiration time in milliseconds. This value has two meanings. First it is
664 used to determine when the spa_deadman() logic should fire. By default the
665 spa_deadman() will fire if spa_sync() has not completed in 1000 seconds.
666 Secondly, the value determines if an I/O is considered "hung". Any I/O that
667 has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
668 in a zevent being logged.
669 .sp
670 Default value: \fB1,000,000\fR.
671 .RE
672
673 .sp
674 .ne 2
675 .na
676 \fBzfs_dedup_prefetch\fR (int)
677 .ad
678 .RS 12n
679 Enable prefetching dedup-ed blks
680 .sp
681 Use \fB1\fR for yes and \fB0\fR to disable (default).
682 .RE
683
684 .sp
685 .ne 2
686 .na
687 \fBzfs_delay_min_dirty_percent\fR (int)
688 .ad
689 .RS 12n
690 Start to delay each transaction once there is this amount of dirty data,
691 expressed as a percentage of \fBzfs_dirty_data_max\fR.
692 This value should be >= zfs_vdev_async_write_active_max_dirty_percent.
693 See the section "ZFS TRANSACTION DELAY".
694 .sp
695 Default value: \fB60\fR.
696 .RE
697
698 .sp
699 .ne 2
700 .na
701 \fBzfs_delay_scale\fR (int)
702 .ad
703 .RS 12n
704 This controls how quickly the transaction delay approaches infinity.
705 Larger values cause longer delays for a given amount of dirty data.
706 .sp
707 For the smoothest delay, this value should be about 1 billion divided
708 by the maximum number of operations per second. This will smoothly
709 handle between 10x and 1/10th this number.
710 .sp
711 See the section "ZFS TRANSACTION DELAY".
712 .sp
713 Note: \fBzfs_delay_scale\fR * \fBzfs_dirty_data_max\fR must be < 2^64.
714 .sp
715 Default value: \fB500,000\fR.
716 .RE
717
718 .sp
719 .ne 2
720 .na
721 \fBzfs_dirty_data_max\fR (int)
722 .ad
723 .RS 12n
724 Determines the dirty space limit in bytes. Once this limit is exceeded, new
725 writes are halted until space frees up. This parameter takes precedence
726 over \fBzfs_dirty_data_max_percent\fR.
727 See the section "ZFS TRANSACTION DELAY".
728 .sp
729 Default value: 10 percent of all memory, capped at \fBzfs_dirty_data_max_max\fR.
730 .RE
731
732 .sp
733 .ne 2
734 .na
735 \fBzfs_dirty_data_max_max\fR (int)
736 .ad
737 .RS 12n
738 Maximum allowable value of \fBzfs_dirty_data_max\fR, expressed in bytes.
739 This limit is only enforced at module load time, and will be ignored if
740 \fBzfs_dirty_data_max\fR is later changed. This parameter takes
741 precedence over \fBzfs_dirty_data_max_max_percent\fR. See the section
742 "ZFS TRANSACTION DELAY".
743 .sp
744 Default value: 25% of physical RAM.
745 .RE
746
747 .sp
748 .ne 2
749 .na
750 \fBzfs_dirty_data_max_max_percent\fR (int)
751 .ad
752 .RS 12n
753 Maximum allowable value of \fBzfs_dirty_data_max\fR, expressed as a
754 percentage of physical RAM. This limit is only enforced at module load
755 time, and will be ignored if \fBzfs_dirty_data_max\fR is later changed.
756 The parameter \fBzfs_dirty_data_max_max\fR takes precedence over this
757 one. See the section "ZFS TRANSACTION DELAY".
758 .sp
759 Default value: 25
760 .RE
761
762 .sp
763 .ne 2
764 .na
765 \fBzfs_dirty_data_max_percent\fR (int)
766 .ad
767 .RS 12n
768 Determines the dirty space limit, expressed as a percentage of all
769 memory. Once this limit is exceeded, new writes are halted until space frees
770 up. The parameter \fBzfs_dirty_data_max\fR takes precedence over this
771 one. See the section "ZFS TRANSACTION DELAY".
772 .sp
773 Default value: 10%, subject to \fBzfs_dirty_data_max_max\fR.
774 .RE
775
776 .sp
777 .ne 2
778 .na
779 \fBzfs_dirty_data_sync\fR (int)
780 .ad
781 .RS 12n
782 Start syncing out a transaction group if there is at least this much dirty data.
783 .sp
784 Default value: \fB67,108,864\fR.
785 .RE
786
787 .sp
788 .ne 2
789 .na
790 \fBzfs_free_max_blocks\fR (ulong)
791 .ad
792 .RS 12n
793 Maximum number of blocks freed in a single txg.
794 .sp
795 Default value: \fB100,000\fR.
796 .RE
797
798 .sp
799 .ne 2
800 .na
801 \fBzfs_vdev_async_read_max_active\fR (int)
802 .ad
803 .RS 12n
804 Maxium asynchronous read I/Os active to each device.
805 See the section "ZFS I/O SCHEDULER".
806 .sp
807 Default value: \fB3\fR.
808 .RE
809
810 .sp
811 .ne 2
812 .na
813 \fBzfs_vdev_async_read_min_active\fR (int)
814 .ad
815 .RS 12n
816 Minimum asynchronous read I/Os active to each device.
817 See the section "ZFS I/O SCHEDULER".
818 .sp
819 Default value: \fB1\fR.
820 .RE
821
822 .sp
823 .ne 2
824 .na
825 \fBzfs_vdev_async_write_active_max_dirty_percent\fR (int)
826 .ad
827 .RS 12n
828 When the pool has more than
829 \fBzfs_vdev_async_write_active_max_dirty_percent\fR dirty data, use
830 \fBzfs_vdev_async_write_max_active\fR to limit active async writes. If
831 the dirty data is between min and max, the active I/O limit is linearly
832 interpolated. See the section "ZFS I/O SCHEDULER".
833 .sp
834 Default value: \fB60\fR.
835 .RE
836
837 .sp
838 .ne 2
839 .na
840 \fBzfs_vdev_async_write_active_min_dirty_percent\fR (int)
841 .ad
842 .RS 12n
843 When the pool has less than
844 \fBzfs_vdev_async_write_active_min_dirty_percent\fR dirty data, use
845 \fBzfs_vdev_async_write_min_active\fR to limit active async writes. If
846 the dirty data is between min and max, the active I/O limit is linearly
847 interpolated. See the section "ZFS I/O SCHEDULER".
848 .sp
849 Default value: \fB30\fR.
850 .RE
851
852 .sp
853 .ne 2
854 .na
855 \fBzfs_vdev_async_write_max_active\fR (int)
856 .ad
857 .RS 12n
858 Maxium asynchronous write I/Os active to each device.
859 See the section "ZFS I/O SCHEDULER".
860 .sp
861 Default value: \fB10\fR.
862 .RE
863
864 .sp
865 .ne 2
866 .na
867 \fBzfs_vdev_async_write_min_active\fR (int)
868 .ad
869 .RS 12n
870 Minimum asynchronous write I/Os active to each device.
871 See the section "ZFS I/O SCHEDULER".
872 .sp
873 Default value: \fB1\fR.
874 .RE
875
876 .sp
877 .ne 2
878 .na
879 \fBzfs_vdev_max_active\fR (int)
880 .ad
881 .RS 12n
882 The maximum number of I/Os active to each device. Ideally, this will be >=
883 the sum of each queue's max_active. It must be at least the sum of each
884 queue's min_active. See the section "ZFS I/O SCHEDULER".
885 .sp
886 Default value: \fB1,000\fR.
887 .RE
888
889 .sp
890 .ne 2
891 .na
892 \fBzfs_vdev_scrub_max_active\fR (int)
893 .ad
894 .RS 12n
895 Maxium scrub I/Os active to each device.
896 See the section "ZFS I/O SCHEDULER".
897 .sp
898 Default value: \fB2\fR.
899 .RE
900
901 .sp
902 .ne 2
903 .na
904 \fBzfs_vdev_scrub_min_active\fR (int)
905 .ad
906 .RS 12n
907 Minimum scrub I/Os active to each device.
908 See the section "ZFS I/O SCHEDULER".
909 .sp
910 Default value: \fB1\fR.
911 .RE
912
913 .sp
914 .ne 2
915 .na
916 \fBzfs_vdev_sync_read_max_active\fR (int)
917 .ad
918 .RS 12n
919 Maxium synchronous read I/Os active to each device.
920 See the section "ZFS I/O SCHEDULER".
921 .sp
922 Default value: \fB10\fR.
923 .RE
924
925 .sp
926 .ne 2
927 .na
928 \fBzfs_vdev_sync_read_min_active\fR (int)
929 .ad
930 .RS 12n
931 Minimum synchronous read I/Os active to each device.
932 See the section "ZFS I/O SCHEDULER".
933 .sp
934 Default value: \fB10\fR.
935 .RE
936
937 .sp
938 .ne 2
939 .na
940 \fBzfs_vdev_sync_write_max_active\fR (int)
941 .ad
942 .RS 12n
943 Maxium synchronous write I/Os active to each device.
944 See the section "ZFS I/O SCHEDULER".
945 .sp
946 Default value: \fB10\fR.
947 .RE
948
949 .sp
950 .ne 2
951 .na
952 \fBzfs_vdev_sync_write_min_active\fR (int)
953 .ad
954 .RS 12n
955 Minimum synchronous write I/Os active to each device.
956 See the section "ZFS I/O SCHEDULER".
957 .sp
958 Default value: \fB10\fR.
959 .RE
960
961 .sp
962 .ne 2
963 .na
964 \fBzfs_disable_dup_eviction\fR (int)
965 .ad
966 .RS 12n
967 Disable duplicate buffer eviction
968 .sp
969 Use \fB1\fR for yes and \fB0\fR for no (default).
970 .RE
971
972 .sp
973 .ne 2
974 .na
975 \fBzfs_expire_snapshot\fR (int)
976 .ad
977 .RS 12n
978 Seconds to expire .zfs/snapshot
979 .sp
980 Default value: \fB300\fR.
981 .RE
982
983 .sp
984 .ne 2
985 .na
986 \fBzfs_admin_snapshot\fR (int)
987 .ad
988 .RS 12n
989 Allow the creation, removal, or renaming of entries in the .zfs/snapshot
990 directory to cause the creation, destruction, or renaming of snapshots.
991 When enabled this functionality works both locally and over NFS exports
992 which have the 'no_root_squash' option set. This functionality is disabled
993 by default.
994 .sp
995 Use \fB1\fR for yes and \fB0\fR for no (default).
996 .RE
997
998 .sp
999 .ne 2
1000 .na
1001 \fBzfs_flags\fR (int)
1002 .ad
1003 .RS 12n
1004 Set additional debugging flags. The following flags may be bitwise-or'd
1005 together.
1006 .sp
1007 .TS
1008 box;
1009 rB lB
1010 lB lB
1011 r l.
1012 Value Symbolic Name
1013 Description
1014 _
1015 1 ZFS_DEBUG_DPRINTF
1016 Enable dprintf entries in the debug log.
1017 _
1018 2 ZFS_DEBUG_DBUF_VERIFY *
1019 Enable extra dbuf verifications.
1020 _
1021 4 ZFS_DEBUG_DNODE_VERIFY *
1022 Enable extra dnode verifications.
1023 _
1024 8 ZFS_DEBUG_SNAPNAMES
1025 Enable snapshot name verification.
1026 _
1027 16 ZFS_DEBUG_MODIFY
1028 Check for illegally modified ARC buffers.
1029 _
1030 32 ZFS_DEBUG_SPA
1031 Enable spa_dbgmsg entries in the debug log.
1032 _
1033 64 ZFS_DEBUG_ZIO_FREE
1034 Enable verification of block frees.
1035 _
1036 128 ZFS_DEBUG_HISTOGRAM_VERIFY
1037 Enable extra spacemap histogram verifications.
1038 .TE
1039 .sp
1040 * Requires debug build.
1041 .sp
1042 Default value: \fB0\fR.
1043 .RE
1044
1045 .sp
1046 .ne 2
1047 .na
1048 \fBzfs_free_leak_on_eio\fR (int)
1049 .ad
1050 .RS 12n
1051 If destroy encounters an EIO while reading metadata (e.g. indirect
1052 blocks), space referenced by the missing metadata can not be freed.
1053 Normally this causes the background destroy to become "stalled", as
1054 it is unable to make forward progress. While in this stalled state,
1055 all remaining space to free from the error-encountering filesystem is
1056 "temporarily leaked". Set this flag to cause it to ignore the EIO,
1057 permanently leak the space from indirect blocks that can not be read,
1058 and continue to free everything else that it can.
1059
1060 The default, "stalling" behavior is useful if the storage partially
1061 fails (i.e. some but not all i/os fail), and then later recovers. In
1062 this case, we will be able to continue pool operations while it is
1063 partially failed, and when it recovers, we can continue to free the
1064 space, with no leaks. However, note that this case is actually
1065 fairly rare.
1066
1067 Typically pools either (a) fail completely (but perhaps temporarily,
1068 e.g. a top-level vdev going offline), or (b) have localized,
1069 permanent errors (e.g. disk returns the wrong data due to bit flip or
1070 firmware bug). In case (a), this setting does not matter because the
1071 pool will be suspended and the sync thread will not be able to make
1072 forward progress regardless. In case (b), because the error is
1073 permanent, the best we can do is leak the minimum amount of space,
1074 which is what setting this flag will do. Therefore, it is reasonable
1075 for this flag to normally be set, but we chose the more conservative
1076 approach of not setting it, so that there is no possibility of
1077 leaking space in the "partial temporary" failure case.
1078 .sp
1079 Default value: \fB0\fR.
1080 .RE
1081
1082 .sp
1083 .ne 2
1084 .na
1085 \fBzfs_free_min_time_ms\fR (int)
1086 .ad
1087 .RS 12n
1088 Min millisecs to free per txg
1089 .sp
1090 Default value: \fB1,000\fR.
1091 .RE
1092
1093 .sp
1094 .ne 2
1095 .na
1096 \fBzfs_immediate_write_sz\fR (long)
1097 .ad
1098 .RS 12n
1099 Largest data block to write to zil
1100 .sp
1101 Default value: \fB32,768\fR.
1102 .RE
1103
1104 .sp
1105 .ne 2
1106 .na
1107 \fBzfs_max_recordsize\fR (int)
1108 .ad
1109 .RS 12n
1110 We currently support block sizes from 512 bytes to 16MB. The benefits of
1111 larger blocks, and thus larger IO, need to be weighed against the cost of
1112 COWing a giant block to modify one byte. Additionally, very large blocks
1113 can have an impact on i/o latency, and also potentially on the memory
1114 allocator. Therefore, we do not allow the recordsize to be set larger than
1115 zfs_max_recordsize (default 1MB). Larger blocks can be created by changing
1116 this tunable, and pools with larger blocks can always be imported and used,
1117 regardless of this setting.
1118 .sp
1119 Default value: \fB1,048,576\fR.
1120 .RE
1121
1122 .sp
1123 .ne 2
1124 .na
1125 \fBzfs_mdcomp_disable\fR (int)
1126 .ad
1127 .RS 12n
1128 Disable meta data compression
1129 .sp
1130 Use \fB1\fR for yes and \fB0\fR for no (default).
1131 .RE
1132
1133 .sp
1134 .ne 2
1135 .na
1136 \fBzfs_metaslab_fragmentation_threshold\fR (int)
1137 .ad
1138 .RS 12n
1139 Allow metaslabs to keep their active state as long as their fragmentation
1140 percentage is less than or equal to this value. An active metaslab that
1141 exceeds this threshold will no longer keep its active status allowing
1142 better metaslabs to be selected.
1143 .sp
1144 Default value: \fB70\fR.
1145 .RE
1146
1147 .sp
1148 .ne 2
1149 .na
1150 \fBzfs_mg_fragmentation_threshold\fR (int)
1151 .ad
1152 .RS 12n
1153 Metaslab groups are considered eligible for allocations if their
1154 fragmenation metric (measured as a percentage) is less than or equal to
1155 this value. If a metaslab group exceeds this threshold then it will be
1156 skipped unless all metaslab groups within the metaslab class have also
1157 crossed this threshold.
1158 .sp
1159 Default value: \fB85\fR.
1160 .RE
1161
1162 .sp
1163 .ne 2
1164 .na
1165 \fBzfs_mg_noalloc_threshold\fR (int)
1166 .ad
1167 .RS 12n
1168 Defines a threshold at which metaslab groups should be eligible for
1169 allocations. The value is expressed as a percentage of free space
1170 beyond which a metaslab group is always eligible for allocations.
1171 If a metaslab group's free space is less than or equal to the
1172 threshold, the allocator will avoid allocating to that group
1173 unless all groups in the pool have reached the threshold. Once all
1174 groups have reached the threshold, all groups are allowed to accept
1175 allocations. The default value of 0 disables the feature and causes
1176 all metaslab groups to be eligible for allocations.
1177
1178 This parameter allows to deal with pools having heavily imbalanced
1179 vdevs such as would be the case when a new vdev has been added.
1180 Setting the threshold to a non-zero percentage will stop allocations
1181 from being made to vdevs that aren't filled to the specified percentage
1182 and allow lesser filled vdevs to acquire more allocations than they
1183 otherwise would under the old \fBzfs_mg_alloc_failures\fR facility.
1184 .sp
1185 Default value: \fB0\fR.
1186 .RE
1187
1188 .sp
1189 .ne 2
1190 .na
1191 \fBzfs_no_scrub_io\fR (int)
1192 .ad
1193 .RS 12n
1194 Set for no scrub I/O
1195 .sp
1196 Use \fB1\fR for yes and \fB0\fR for no (default).
1197 .RE
1198
1199 .sp
1200 .ne 2
1201 .na
1202 \fBzfs_no_scrub_prefetch\fR (int)
1203 .ad
1204 .RS 12n
1205 Set for no scrub prefetching
1206 .sp
1207 Use \fB1\fR for yes and \fB0\fR for no (default).
1208 .RE
1209
1210 .sp
1211 .ne 2
1212 .na
1213 \fBzfs_nocacheflush\fR (int)
1214 .ad
1215 .RS 12n
1216 Disable cache flushes
1217 .sp
1218 Use \fB1\fR for yes and \fB0\fR for no (default).
1219 .RE
1220
1221 .sp
1222 .ne 2
1223 .na
1224 \fBzfs_nopwrite_enabled\fR (int)
1225 .ad
1226 .RS 12n
1227 Enable NOP writes
1228 .sp
1229 Use \fB1\fR for yes (default) and \fB0\fR to disable.
1230 .RE
1231
1232 .sp
1233 .ne 2
1234 .na
1235 \fBzfs_pd_bytes_max\fR (int)
1236 .ad
1237 .RS 12n
1238 The number of bytes which should be prefetched.
1239 .sp
1240 Default value: \fB52,428,800\fR.
1241 .RE
1242
1243 .sp
1244 .ne 2
1245 .na
1246 \fBzfs_prefetch_disable\fR (int)
1247 .ad
1248 .RS 12n
1249 This tunable disables predictive prefetch. Note that it leaves "prescient"
1250 prefetch (e.g. prefetch for zfs send) intact. Unlike predictive prefetch,
1251 prescient prefetch never issues i/os that end up not being needed, so it
1252 can't hurt performance.
1253 .sp
1254 Use \fB1\fR for yes and \fB0\fR for no (default).
1255 .RE
1256
1257 .sp
1258 .ne 2
1259 .na
1260 \fBzfs_read_chunk_size\fR (long)
1261 .ad
1262 .RS 12n
1263 Bytes to read per chunk
1264 .sp
1265 Default value: \fB1,048,576\fR.
1266 .RE
1267
1268 .sp
1269 .ne 2
1270 .na
1271 \fBzfs_read_history\fR (int)
1272 .ad
1273 .RS 12n
1274 Historic statistics for the last N reads
1275 .sp
1276 Default value: \fB0\fR.
1277 .RE
1278
1279 .sp
1280 .ne 2
1281 .na
1282 \fBzfs_read_history_hits\fR (int)
1283 .ad
1284 .RS 12n
1285 Include cache hits in read history
1286 .sp
1287 Use \fB1\fR for yes and \fB0\fR for no (default).
1288 .RE
1289
1290 .sp
1291 .ne 2
1292 .na
1293 \fBzfs_recover\fR (int)
1294 .ad
1295 .RS 12n
1296 Set to attempt to recover from fatal errors. This should only be used as a
1297 last resort, as it typically results in leaked space, or worse.
1298 .sp
1299 Use \fB1\fR for yes and \fB0\fR for no (default).
1300 .RE
1301
1302 .sp
1303 .ne 2
1304 .na
1305 \fBzfs_resilver_delay\fR (int)
1306 .ad
1307 .RS 12n
1308 Number of ticks to delay prior to issuing a resilver I/O operation when
1309 a non-resilver or non-scrub I/O operation has occurred within the past
1310 \fBzfs_scan_idle\fR ticks.
1311 .sp
1312 Default value: \fB2\fR.
1313 .RE
1314
1315 .sp
1316 .ne 2
1317 .na
1318 \fBzfs_resilver_min_time_ms\fR (int)
1319 .ad
1320 .RS 12n
1321 Min millisecs to resilver per txg
1322 .sp
1323 Default value: \fB3,000\fR.
1324 .RE
1325
1326 .sp
1327 .ne 2
1328 .na
1329 \fBzfs_scan_idle\fR (int)
1330 .ad
1331 .RS 12n
1332 Idle window in clock ticks. During a scrub or a resilver, if
1333 a non-scrub or non-resilver I/O operation has occurred during this
1334 window, the next scrub or resilver operation is delayed by, respectively
1335 \fBzfs_scrub_delay\fR or \fBzfs_resilver_delay\fR ticks.
1336 .sp
1337 Default value: \fB50\fR.
1338 .RE
1339
1340 .sp
1341 .ne 2
1342 .na
1343 \fBzfs_scan_min_time_ms\fR (int)
1344 .ad
1345 .RS 12n
1346 Min millisecs to scrub per txg
1347 .sp
1348 Default value: \fB1,000\fR.
1349 .RE
1350
1351 .sp
1352 .ne 2
1353 .na
1354 \fBzfs_scrub_delay\fR (int)
1355 .ad
1356 .RS 12n
1357 Number of ticks to delay prior to issuing a scrub I/O operation when
1358 a non-scrub or non-resilver I/O operation has occurred within the past
1359 \fBzfs_scan_idle\fR ticks.
1360 .sp
1361 Default value: \fB4\fR.
1362 .RE
1363
1364 .sp
1365 .ne 2
1366 .na
1367 \fBzfs_send_corrupt_data\fR (int)
1368 .ad
1369 .RS 12n
1370 Allow to send corrupt data (ignore read/checksum errors when sending data)
1371 .sp
1372 Use \fB1\fR for yes and \fB0\fR for no (default).
1373 .RE
1374
1375 .sp
1376 .ne 2
1377 .na
1378 \fBzfs_sync_pass_deferred_free\fR (int)
1379 .ad
1380 .RS 12n
1381 Defer frees starting in this pass
1382 .sp
1383 Default value: \fB2\fR.
1384 .RE
1385
1386 .sp
1387 .ne 2
1388 .na
1389 \fBzfs_sync_pass_dont_compress\fR (int)
1390 .ad
1391 .RS 12n
1392 Don't compress starting in this pass
1393 .sp
1394 Default value: \fB5\fR.
1395 .RE
1396
1397 .sp
1398 .ne 2
1399 .na
1400 \fBzfs_sync_pass_rewrite\fR (int)
1401 .ad
1402 .RS 12n
1403 Rewrite new bps starting in this pass
1404 .sp
1405 Default value: \fB2\fR.
1406 .RE
1407
1408 .sp
1409 .ne 2
1410 .na
1411 \fBzfs_top_maxinflight\fR (int)
1412 .ad
1413 .RS 12n
1414 Max I/Os per top-level vdev during scrub or resilver operations.
1415 .sp
1416 Default value: \fB32\fR.
1417 .RE
1418
1419 .sp
1420 .ne 2
1421 .na
1422 \fBzfs_txg_history\fR (int)
1423 .ad
1424 .RS 12n
1425 Historic statistics for the last N txgs
1426 .sp
1427 Default value: \fB0\fR.
1428 .RE
1429
1430 .sp
1431 .ne 2
1432 .na
1433 \fBzfs_txg_timeout\fR (int)
1434 .ad
1435 .RS 12n
1436 Max seconds worth of delta per txg
1437 .sp
1438 Default value: \fB5\fR.
1439 .RE
1440
1441 .sp
1442 .ne 2
1443 .na
1444 \fBzfs_vdev_aggregation_limit\fR (int)
1445 .ad
1446 .RS 12n
1447 Max vdev I/O aggregation size
1448 .sp
1449 Default value: \fB131,072\fR.
1450 .RE
1451
1452 .sp
1453 .ne 2
1454 .na
1455 \fBzfs_vdev_cache_bshift\fR (int)
1456 .ad
1457 .RS 12n
1458 Shift size to inflate reads too
1459 .sp
1460 Default value: \fB16\fR.
1461 .RE
1462
1463 .sp
1464 .ne 2
1465 .na
1466 \fBzfs_vdev_cache_max\fR (int)
1467 .ad
1468 .RS 12n
1469 Inflate reads small than max
1470 .RE
1471
1472 .sp
1473 .ne 2
1474 .na
1475 \fBzfs_vdev_cache_size\fR (int)
1476 .ad
1477 .RS 12n
1478 Total size of the per-disk cache
1479 .sp
1480 Default value: \fB0\fR.
1481 .RE
1482
1483 .sp
1484 .ne 2
1485 .na
1486 \fBzfs_vdev_mirror_switch_us\fR (int)
1487 .ad
1488 .RS 12n
1489 Switch mirrors every N usecs
1490 .sp
1491 Default value: \fB10,000\fR.
1492 .RE
1493
1494 .sp
1495 .ne 2
1496 .na
1497 \fBzfs_vdev_read_gap_limit\fR (int)
1498 .ad
1499 .RS 12n
1500 Aggregate read I/O over gap
1501 .sp
1502 Default value: \fB32,768\fR.
1503 .RE
1504
1505 .sp
1506 .ne 2
1507 .na
1508 \fBzfs_vdev_scheduler\fR (charp)
1509 .ad
1510 .RS 12n
1511 I/O scheduler
1512 .sp
1513 Default value: \fBnoop\fR.
1514 .RE
1515
1516 .sp
1517 .ne 2
1518 .na
1519 \fBzfs_vdev_write_gap_limit\fR (int)
1520 .ad
1521 .RS 12n
1522 Aggregate write I/O over gap
1523 .sp
1524 Default value: \fB4,096\fR.
1525 .RE
1526
1527 .sp
1528 .ne 2
1529 .na
1530 \fBzfs_zevent_cols\fR (int)
1531 .ad
1532 .RS 12n
1533 Max event column width
1534 .sp
1535 Default value: \fB80\fR.
1536 .RE
1537
1538 .sp
1539 .ne 2
1540 .na
1541 \fBzfs_zevent_console\fR (int)
1542 .ad
1543 .RS 12n
1544 Log events to the console
1545 .sp
1546 Use \fB1\fR for yes and \fB0\fR for no (default).
1547 .RE
1548
1549 .sp
1550 .ne 2
1551 .na
1552 \fBzfs_zevent_len_max\fR (int)
1553 .ad
1554 .RS 12n
1555 Max event queue length
1556 .sp
1557 Default value: \fB0\fR.
1558 .RE
1559
1560 .sp
1561 .ne 2
1562 .na
1563 \fBzil_replay_disable\fR (int)
1564 .ad
1565 .RS 12n
1566 Disable intent logging replay
1567 .sp
1568 Use \fB1\fR for yes and \fB0\fR for no (default).
1569 .RE
1570
1571 .sp
1572 .ne 2
1573 .na
1574 \fBzil_slog_limit\fR (ulong)
1575 .ad
1576 .RS 12n
1577 Max commit bytes to separate log device
1578 .sp
1579 Default value: \fB1,048,576\fR.
1580 .RE
1581
1582 .sp
1583 .ne 2
1584 .na
1585 \fBzio_delay_max\fR (int)
1586 .ad
1587 .RS 12n
1588 Max zio millisecond delay before posting event
1589 .sp
1590 Default value: \fB30,000\fR.
1591 .RE
1592
1593 .sp
1594 .ne 2
1595 .na
1596 \fBzio_requeue_io_start_cut_in_line\fR (int)
1597 .ad
1598 .RS 12n
1599 Prioritize requeued I/O
1600 .sp
1601 Default value: \fB0\fR.
1602 .RE
1603
1604 .sp
1605 .ne 2
1606 .na
1607 \fBzio_taskq_batch_pct\fR (uint)
1608 .ad
1609 .RS 12n
1610 Percentage of online CPUs (or CPU cores, etc) which will run a worker thread
1611 for IO. These workers are responsible for IO work such as compression and
1612 checksum calculations. Fractional number of CPUs will be rounded down.
1613 .sp
1614 The default value of 75 was chosen to avoid using all CPUs which can result in
1615 latency issues and inconsistent application performance, especially when high
1616 compression is enabled.
1617 .sp
1618 Default value: \fB75\fR.
1619 .RE
1620
1621 .sp
1622 .ne 2
1623 .na
1624 \fBzvol_inhibit_dev\fR (uint)
1625 .ad
1626 .RS 12n
1627 Do not create zvol device nodes
1628 .sp
1629 Use \fB1\fR for yes and \fB0\fR for no (default).
1630 .RE
1631
1632 .sp
1633 .ne 2
1634 .na
1635 \fBzvol_major\fR (uint)
1636 .ad
1637 .RS 12n
1638 Major number for zvol device
1639 .sp
1640 Default value: \fB230\fR.
1641 .RE
1642
1643 .sp
1644 .ne 2
1645 .na
1646 \fBzvol_max_discard_blocks\fR (ulong)
1647 .ad
1648 .RS 12n
1649 Max number of blocks to discard at once
1650 .sp
1651 Default value: \fB16,384\fR.
1652 .RE
1653
1654 .sp
1655 .ne 2
1656 .na
1657 \fBzvol_prefetch_bytes\fR (uint)
1658 .ad
1659 .RS 12n
1660 When adding a zvol to the system prefetch \fBzvol_prefetch_bytes\fR
1661 from the start and end of the volume. Prefetching these regions
1662 of the volume is desirable because they are likely to be accessed
1663 immediately by \fBblkid(8)\fR or by the kernel scanning for a partition
1664 table.
1665 .sp
1666 Default value: \fB131,072\fR.
1667 .RE
1668
1669 .SH ZFS I/O SCHEDULER
1670 ZFS issues I/O operations to leaf vdevs to satisfy and complete I/Os.
1671 The I/O scheduler determines when and in what order those operations are
1672 issued. The I/O scheduler divides operations into five I/O classes
1673 prioritized in the following order: sync read, sync write, async read,
1674 async write, and scrub/resilver. Each queue defines the minimum and
1675 maximum number of concurrent operations that may be issued to the
1676 device. In addition, the device has an aggregate maximum,
1677 \fBzfs_vdev_max_active\fR. Note that the sum of the per-queue minimums
1678 must not exceed the aggregate maximum. If the sum of the per-queue
1679 maximums exceeds the aggregate maximum, then the number of active I/Os
1680 may reach \fBzfs_vdev_max_active\fR, in which case no further I/Os will
1681 be issued regardless of whether all per-queue minimums have been met.
1682 .sp
1683 For many physical devices, throughput increases with the number of
1684 concurrent operations, but latency typically suffers. Further, physical
1685 devices typically have a limit at which more concurrent operations have no
1686 effect on throughput or can actually cause it to decrease.
1687 .sp
1688 The scheduler selects the next operation to issue by first looking for an
1689 I/O class whose minimum has not been satisfied. Once all are satisfied and
1690 the aggregate maximum has not been hit, the scheduler looks for classes
1691 whose maximum has not been satisfied. Iteration through the I/O classes is
1692 done in the order specified above. No further operations are issued if the
1693 aggregate maximum number of concurrent operations has been hit or if there
1694 are no operations queued for an I/O class that has not hit its maximum.
1695 Every time an I/O is queued or an operation completes, the I/O scheduler
1696 looks for new operations to issue.
1697 .sp
1698 In general, smaller max_active's will lead to lower latency of synchronous
1699 operations. Larger max_active's may lead to higher overall throughput,
1700 depending on underlying storage.
1701 .sp
1702 The ratio of the queues' max_actives determines the balance of performance
1703 between reads, writes, and scrubs. E.g., increasing
1704 \fBzfs_vdev_scrub_max_active\fR will cause the scrub or resilver to complete
1705 more quickly, but reads and writes to have higher latency and lower throughput.
1706 .sp
1707 All I/O classes have a fixed maximum number of outstanding operations
1708 except for the async write class. Asynchronous writes represent the data
1709 that is committed to stable storage during the syncing stage for
1710 transaction groups. Transaction groups enter the syncing state
1711 periodically so the number of queued async writes will quickly burst up
1712 and then bleed down to zero. Rather than servicing them as quickly as
1713 possible, the I/O scheduler changes the maximum number of active async
1714 write I/Os according to the amount of dirty data in the pool. Since
1715 both throughput and latency typically increase with the number of
1716 concurrent operations issued to physical devices, reducing the
1717 burstiness in the number of concurrent operations also stabilizes the
1718 response time of operations from other -- and in particular synchronous
1719 -- queues. In broad strokes, the I/O scheduler will issue more
1720 concurrent operations from the async write queue as there's more dirty
1721 data in the pool.
1722 .sp
1723 Async Writes
1724 .sp
1725 The number of concurrent operations issued for the async write I/O class
1726 follows a piece-wise linear function defined by a few adjustable points.
1727 .nf
1728
1729 | o---------| <-- zfs_vdev_async_write_max_active
1730 ^ | /^ |
1731 | | / | |
1732 active | / | |
1733 I/O | / | |
1734 count | / | |
1735 | / | |
1736 |-------o | | <-- zfs_vdev_async_write_min_active
1737 0|_______^______|_________|
1738 0% | | 100% of zfs_dirty_data_max
1739 | |
1740 | `-- zfs_vdev_async_write_active_max_dirty_percent
1741 `--------- zfs_vdev_async_write_active_min_dirty_percent
1742
1743 .fi
1744 Until the amount of dirty data exceeds a minimum percentage of the dirty
1745 data allowed in the pool, the I/O scheduler will limit the number of
1746 concurrent operations to the minimum. As that threshold is crossed, the
1747 number of concurrent operations issued increases linearly to the maximum at
1748 the specified maximum percentage of the dirty data allowed in the pool.
1749 .sp
1750 Ideally, the amount of dirty data on a busy pool will stay in the sloped
1751 part of the function between \fBzfs_vdev_async_write_active_min_dirty_percent\fR
1752 and \fBzfs_vdev_async_write_active_max_dirty_percent\fR. If it exceeds the
1753 maximum percentage, this indicates that the rate of incoming data is
1754 greater than the rate that the backend storage can handle. In this case, we
1755 must further throttle incoming writes, as described in the next section.
1756
1757 .SH ZFS TRANSACTION DELAY
1758 We delay transactions when we've determined that the backend storage
1759 isn't able to accommodate the rate of incoming writes.
1760 .sp
1761 If there is already a transaction waiting, we delay relative to when
1762 that transaction will finish waiting. This way the calculated delay time
1763 is independent of the number of threads concurrently executing
1764 transactions.
1765 .sp
1766 If we are the only waiter, wait relative to when the transaction
1767 started, rather than the current time. This credits the transaction for
1768 "time already served", e.g. reading indirect blocks.
1769 .sp
1770 The minimum time for a transaction to take is calculated as:
1771 .nf
1772 min_time = zfs_delay_scale * (dirty - min) / (max - dirty)
1773 min_time is then capped at 100 milliseconds.
1774 .fi
1775 .sp
1776 The delay has two degrees of freedom that can be adjusted via tunables. The
1777 percentage of dirty data at which we start to delay is defined by
1778 \fBzfs_delay_min_dirty_percent\fR. This should typically be at or above
1779 \fBzfs_vdev_async_write_active_max_dirty_percent\fR so that we only start to
1780 delay after writing at full speed has failed to keep up with the incoming write
1781 rate. The scale of the curve is defined by \fBzfs_delay_scale\fR. Roughly speaking,
1782 this variable determines the amount of delay at the midpoint of the curve.
1783 .sp
1784 .nf
1785 delay
1786 10ms +-------------------------------------------------------------*+
1787 | *|
1788 9ms + *+
1789 | *|
1790 8ms + *+
1791 | * |
1792 7ms + * +
1793 | * |
1794 6ms + * +
1795 | * |
1796 5ms + * +
1797 | * |
1798 4ms + * +
1799 | * |
1800 3ms + * +
1801 | * |
1802 2ms + (midpoint) * +
1803 | | ** |
1804 1ms + v *** +
1805 | zfs_delay_scale ----------> ******** |
1806 0 +-------------------------------------*********----------------+
1807 0% <- zfs_dirty_data_max -> 100%
1808 .fi
1809 .sp
1810 Note that since the delay is added to the outstanding time remaining on the
1811 most recent transaction, the delay is effectively the inverse of IOPS.
1812 Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
1813 was chosen such that small changes in the amount of accumulated dirty data
1814 in the first 3/4 of the curve yield relatively small differences in the
1815 amount of delay.
1816 .sp
1817 The effects can be easier to understand when the amount of delay is
1818 represented on a log scale:
1819 .sp
1820 .nf
1821 delay
1822 100ms +-------------------------------------------------------------++
1823 + +
1824 | |
1825 + *+
1826 10ms + *+
1827 + ** +
1828 | (midpoint) ** |
1829 + | ** +
1830 1ms + v **** +
1831 + zfs_delay_scale ----------> ***** +
1832 | **** |
1833 + **** +
1834 100us + ** +
1835 + * +
1836 | * |
1837 + * +
1838 10us + * +
1839 + +
1840 | |
1841 + +
1842 +--------------------------------------------------------------+
1843 0% <- zfs_dirty_data_max -> 100%
1844 .fi
1845 .sp
1846 Note here that only as the amount of dirty data approaches its limit does
1847 the delay start to increase rapidly. The goal of a properly tuned system
1848 should be to keep the amount of dirty data out of that range by first
1849 ensuring that the appropriate limits are set for the I/O scheduler to reach
1850 optimal throughput on the backend storage, and then by changing the value
1851 of \fBzfs_delay_scale\fR to increase the steepness of the curve.