]> git.proxmox.com Git - mirror_zfs.git/blob - man/man5/zfs-module-parameters.5
Fix type mismatch on 32-bit systems
[mirror_zfs.git] / man / man5 / zfs-module-parameters.5
1 '\" te
2 .\" Copyright (c) 2013 by Turbo Fredriksson <turbo@bayour.com>. All rights reserved.
3 .\" The contents of this file are subject to the terms of the Common Development
4 .\" and Distribution License (the "License"). You may not use this file except
5 .\" in compliance with the License. You can obtain a copy of the license at
6 .\" usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing.
7 .\"
8 .\" See the License for the specific language governing permissions and
9 .\" limitations under the License. When distributing Covered Code, include this
10 .\" CDDL HEADER in each file and include the License file at
11 .\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this
12 .\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
13 .\" own identifying information:
14 .\" Portions Copyright [yyyy] [name of copyright owner]
15 .TH ZFS-MODULE-PARAMETERS 5 "Nov 16, 2013"
16 .SH NAME
17 zfs\-module\-parameters \- ZFS module parameters
18 .SH DESCRIPTION
19 .sp
20 .LP
21 Description of the different parameters to the ZFS module.
22
23 .SS "Module parameters"
24 .sp
25 .LP
26
27 .sp
28 .ne 2
29 .na
30 \fBl2arc_feed_again\fR (int)
31 .ad
32 .RS 12n
33 Turbo L2ARC warmup
34 .sp
35 Use \fB1\fR for yes (default) and \fB0\fR to disable.
36 .RE
37
38 .sp
39 .ne 2
40 .na
41 \fBl2arc_feed_min_ms\fR (ulong)
42 .ad
43 .RS 12n
44 Min feed interval in milliseconds
45 .sp
46 Default value: \fB200\fR.
47 .RE
48
49 .sp
50 .ne 2
51 .na
52 \fBl2arc_feed_secs\fR (ulong)
53 .ad
54 .RS 12n
55 Seconds between L2ARC writing
56 .sp
57 Default value: \fB1\fR.
58 .RE
59
60 .sp
61 .ne 2
62 .na
63 \fBl2arc_headroom\fR (ulong)
64 .ad
65 .RS 12n
66 Number of max device writes to precache
67 .sp
68 Default value: \fB2\fR.
69 .RE
70
71 .sp
72 .ne 2
73 .na
74 \fBl2arc_headroom_boost\fR (ulong)
75 .ad
76 .RS 12n
77 Compressed l2arc_headroom multiplier
78 .sp
79 Default value: \fB200\fR.
80 .RE
81
82 .sp
83 .ne 2
84 .na
85 \fBl2arc_nocompress\fR (int)
86 .ad
87 .RS 12n
88 Skip compressing L2ARC buffers
89 .sp
90 Use \fB1\fR for yes and \fB0\fR for no (default).
91 .RE
92
93 .sp
94 .ne 2
95 .na
96 \fBl2arc_noprefetch\fR (int)
97 .ad
98 .RS 12n
99 Skip caching prefetched buffers
100 .sp
101 Use \fB1\fR for yes (default) and \fB0\fR to disable.
102 .RE
103
104 .sp
105 .ne 2
106 .na
107 \fBl2arc_norw\fR (int)
108 .ad
109 .RS 12n
110 No reads during writes
111 .sp
112 Use \fB1\fR for yes and \fB0\fR for no (default).
113 .RE
114
115 .sp
116 .ne 2
117 .na
118 \fBl2arc_write_boost\fR (ulong)
119 .ad
120 .RS 12n
121 Extra write bytes during device warmup
122 .sp
123 Default value: \fB8,388,608\fR.
124 .RE
125
126 .sp
127 .ne 2
128 .na
129 \fBl2arc_write_max\fR (ulong)
130 .ad
131 .RS 12n
132 Max write bytes per interval
133 .sp
134 Default value: \fB8,388,608\fR.
135 .RE
136
137 .sp
138 .ne 2
139 .na
140 \fBmetaslab_bias_enabled\fR (int)
141 .ad
142 .RS 12n
143 Enable metaslab group biasing based on its vdev's over- or under-utilization
144 relative to the pool.
145 .sp
146 Use \fB1\fR for yes (default) and \fB0\fR for no.
147 .RE
148
149 .sp
150 .ne 2
151 .na
152 \fBmetaslab_debug_load\fR (int)
153 .ad
154 .RS 12n
155 Load all metaslabs during pool import.
156 .sp
157 Use \fB1\fR for yes and \fB0\fR for no (default).
158 .RE
159
160 .sp
161 .ne 2
162 .na
163 \fBmetaslab_debug_unload\fR (int)
164 .ad
165 .RS 12n
166 Prevent metaslabs from being unloaded.
167 .sp
168 Use \fB1\fR for yes and \fB0\fR for no (default).
169 .RE
170
171 .sp
172 .ne 2
173 .na
174 \fBmetaslab_fragmentation_factor_enabled\fR (int)
175 .ad
176 .RS 12n
177 Enable use of the fragmentation metric in computing metaslab weights.
178 .sp
179 Use \fB1\fR for yes (default) and \fB0\fR for no.
180 .RE
181
182 .sp
183 .ne 2
184 .na
185 \fBmetaslabs_per_vdev\fR (int)
186 .ad
187 .RS 12n
188 When a vdev is added, it will be divided into approximately (but no more than) this number of metaslabs.
189 .sp
190 Default value: \fB200\fR.
191 .RE
192
193 .sp
194 .ne 2
195 .na
196 \fBmetaslab_preload_enabled\fR (int)
197 .ad
198 .RS 12n
199 Enable metaslab group preloading.
200 .sp
201 Use \fB1\fR for yes (default) and \fB0\fR for no.
202 .RE
203
204 .sp
205 .ne 2
206 .na
207 \fBmetaslab_lba_weighting_enabled\fR (int)
208 .ad
209 .RS 12n
210 Give more weight to metaslabs with lower LBAs, assuming they have
211 greater bandwidth as is typically the case on a modern constant
212 angular velocity disk drive.
213 .sp
214 Use \fB1\fR for yes (default) and \fB0\fR for no.
215 .RE
216
217 .sp
218 .ne 2
219 .na
220 \fBspa_config_path\fR (charp)
221 .ad
222 .RS 12n
223 SPA config file
224 .sp
225 Default value: \fB/etc/zfs/zpool.cache\fR.
226 .RE
227
228 .sp
229 .ne 2
230 .na
231 \fBspa_asize_inflation\fR (int)
232 .ad
233 .RS 12n
234 Multiplication factor used to estimate actual disk consumption from the
235 size of data being written. The default value is a worst case estimate,
236 but lower values may be valid for a given pool depending on its
237 configuration. Pool administrators who understand the factors involved
238 may wish to specify a more realistic inflation factor, particularly if
239 they operate close to quota or capacity limits.
240 .sp
241 Default value: 24
242 .RE
243
244 .sp
245 .ne 2
246 .na
247 \fBspa_load_verify_data\fR (int)
248 .ad
249 .RS 12n
250 Whether to traverse data blocks during an "extreme rewind" (\fB-X\fR)
251 import. Use 0 to disable and 1 to enable.
252
253 An extreme rewind import normally performs a full traversal of all
254 blocks in the pool for verification. If this parameter is set to 0,
255 the traversal skips non-metadata blocks. It can be toggled once the
256 import has started to stop or start the traversal of non-metadata blocks.
257 .sp
258 Default value: 1
259 .RE
260
261 .sp
262 .ne 2
263 .na
264 \fBspa_load_verify_metadata\fR (int)
265 .ad
266 .RS 12n
267 Whether to traverse blocks during an "extreme rewind" (\fB-X\fR)
268 pool import. Use 0 to disable and 1 to enable.
269
270 An extreme rewind import normally performs a full traversal of all
271 blocks in the pool for verification. If this parameter is set to 1,
272 the traversal is not performed. It can be toggled once the import has
273 started to stop or start the traversal.
274 .sp
275 Default value: 1
276 .RE
277
278 .sp
279 .ne 2
280 .na
281 \fBspa_load_verify_maxinflight\fR (int)
282 .ad
283 .RS 12n
284 Maximum concurrent I/Os during the traversal performed during an "extreme
285 rewind" (\fB-X\fR) pool import.
286 .sp
287 Default value: 10000
288 .RE
289
290 .sp
291 .ne 2
292 .na
293 \fBzfetch_array_rd_sz\fR (ulong)
294 .ad
295 .RS 12n
296 If prefetching is enabled, disable prefetching for reads larger than this size.
297 .sp
298 Default value: \fB1,048,576\fR.
299 .RE
300
301 .sp
302 .ne 2
303 .na
304 \fBzfetch_block_cap\fR (uint)
305 .ad
306 .RS 12n
307 Max number of blocks to prefetch at a time
308 .sp
309 Default value: \fB256\fR.
310 .RE
311
312 .sp
313 .ne 2
314 .na
315 \fBzfetch_max_streams\fR (uint)
316 .ad
317 .RS 12n
318 Max number of streams per zfetch (prefetch streams per file).
319 .sp
320 Default value: \fB8\fR.
321 .RE
322
323 .sp
324 .ne 2
325 .na
326 \fBzfetch_min_sec_reap\fR (uint)
327 .ad
328 .RS 12n
329 Min time before an active prefetch stream can be reclaimed
330 .sp
331 Default value: \fB2\fR.
332 .RE
333
334 .sp
335 .ne 2
336 .na
337 \fBzfs_arc_average_blocksize\fR (int)
338 .ad
339 .RS 12n
340 The ARC's buffer hash table is sized based on the assumption of an average
341 block size of \fBzfs_arc_average_blocksize\fR (default 8K). This works out
342 to roughly 1MB of hash table per 1GB of physical memory with 8-byte pointers.
343 For configurations with a known larger average block size this value can be
344 increased to reduce the memory footprint.
345
346 .sp
347 Default value: \fB8192\fR.
348 .RE
349
350 .sp
351 .ne 2
352 .na
353 \fBzfs_arc_grow_retry\fR (int)
354 .ad
355 .RS 12n
356 Seconds before growing arc size
357 .sp
358 Default value: \fB5\fR.
359 .RE
360
361 .sp
362 .ne 2
363 .na
364 \fBzfs_arc_max\fR (ulong)
365 .ad
366 .RS 12n
367 Max arc size
368 .sp
369 Default value: \fB0\fR.
370 .RE
371
372 .sp
373 .ne 2
374 .na
375 \fBzfs_arc_memory_throttle_disable\fR (int)
376 .ad
377 .RS 12n
378 Disable memory throttle
379 .sp
380 Use \fB1\fR for yes (default) and \fB0\fR to disable.
381 .RE
382
383 .sp
384 .ne 2
385 .na
386 \fBzfs_arc_meta_limit\fR (ulong)
387 .ad
388 .RS 12n
389 The maximum allowed size in bytes that meta data buffers are allowed to
390 consume in the ARC. When this limit is reached meta data buffers will
391 be reclaimed even if the overall arc_c_max has not been reached. This
392 value defaults to 0 which indicates that 3/4 of the ARC may be used
393 for meta data.
394 .sp
395 Default value: \fB0\fR.
396 .RE
397
398 .sp
399 .ne 2
400 .na
401 \fBzfs_arc_meta_prune\fR (int)
402 .ad
403 .RS 12n
404 The number of dentries and inodes to be scanned looking for entries
405 which can be dropped. This may be required when the ARC reaches the
406 \fBzfs_arc_meta_limit\fR because dentries and inodes can pin buffers
407 in the ARC. Increasing this value will cause to dentry and inode caches
408 to be pruned more aggressively. Setting this value to 0 will disable
409 pruning the inode and dentry caches.
410 .sp
411 Default value: \fB10,000\fR.
412 .RE
413
414 .sp
415 .ne 2
416 .na
417 \fBzfs_arc_meta_adjust_restarts\fR (ulong)
418 .ad
419 .RS 12n
420 The number of restart passes to make while scanning the ARC attempting
421 the free buffers in order to stay below the \fBzfs_arc_meta_limit\fR.
422 This value should not need to be tuned but is available to facilitate
423 performance analysis.
424 .sp
425 Default value: \fB4096\fR.
426 .RE
427
428 .sp
429 .ne 2
430 .na
431 \fBzfs_arc_min\fR (ulong)
432 .ad
433 .RS 12n
434 Min arc size
435 .sp
436 Default value: \fB100\fR.
437 .RE
438
439 .sp
440 .ne 2
441 .na
442 \fBzfs_arc_min_prefetch_lifespan\fR (int)
443 .ad
444 .RS 12n
445 Min life of prefetch block
446 .sp
447 Default value: \fB100\fR.
448 .RE
449
450 .sp
451 .ne 2
452 .na
453 \fBzfs_arc_p_aggressive_disable\fR (int)
454 .ad
455 .RS 12n
456 Disable aggressive arc_p growth
457 .sp
458 Use \fB1\fR for yes (default) and \fB0\fR to disable.
459 .RE
460
461 .sp
462 .ne 2
463 .na
464 \fBzfs_arc_p_dampener_disable\fR (int)
465 .ad
466 .RS 12n
467 Disable arc_p adapt dampener
468 .sp
469 Use \fB1\fR for yes (default) and \fB0\fR to disable.
470 .RE
471
472 .sp
473 .ne 2
474 .na
475 \fBzfs_arc_shrink_shift\fR (int)
476 .ad
477 .RS 12n
478 log2(fraction of arc to reclaim)
479 .sp
480 Default value: \fB5\fR.
481 .RE
482
483 .sp
484 .ne 2
485 .na
486 \fBzfs_autoimport_disable\fR (int)
487 .ad
488 .RS 12n
489 Disable pool import at module load by ignoring the cache file (typically \fB/etc/zfs/zpool.cache\fR).
490 .sp
491 Use \fB1\fR for yes (default) and \fB0\fR for no.
492 .RE
493
494 .sp
495 .ne 2
496 .na
497 \fBzfs_dbuf_state_index\fR (int)
498 .ad
499 .RS 12n
500 Calculate arc header index
501 .sp
502 Default value: \fB0\fR.
503 .RE
504
505 .sp
506 .ne 2
507 .na
508 \fBzfs_deadman_enabled\fR (int)
509 .ad
510 .RS 12n
511 Enable deadman timer
512 .sp
513 Use \fB1\fR for yes (default) and \fB0\fR to disable.
514 .RE
515
516 .sp
517 .ne 2
518 .na
519 \fBzfs_deadman_synctime_ms\fR (ulong)
520 .ad
521 .RS 12n
522 Expiration time in milliseconds. This value has two meanings. First it is
523 used to determine when the spa_deadman() logic should fire. By default the
524 spa_deadman() will fire if spa_sync() has not completed in 1000 seconds.
525 Secondly, the value determines if an I/O is considered "hung". Any I/O that
526 has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
527 in a zevent being logged.
528 .sp
529 Default value: \fB1,000,000\fR.
530 .RE
531
532 .sp
533 .ne 2
534 .na
535 \fBzfs_dedup_prefetch\fR (int)
536 .ad
537 .RS 12n
538 Enable prefetching dedup-ed blks
539 .sp
540 Use \fB1\fR for yes and \fB0\fR to disable (default).
541 .RE
542
543 .sp
544 .ne 2
545 .na
546 \fBzfs_delay_min_dirty_percent\fR (int)
547 .ad
548 .RS 12n
549 Start to delay each transaction once there is this amount of dirty data,
550 expressed as a percentage of \fBzfs_dirty_data_max\fR.
551 This value should be >= zfs_vdev_async_write_active_max_dirty_percent.
552 See the section "ZFS TRANSACTION DELAY".
553 .sp
554 Default value: \fB60\fR.
555 .RE
556
557 .sp
558 .ne 2
559 .na
560 \fBzfs_delay_scale\fR (int)
561 .ad
562 .RS 12n
563 This controls how quickly the transaction delay approaches infinity.
564 Larger values cause longer delays for a given amount of dirty data.
565 .sp
566 For the smoothest delay, this value should be about 1 billion divided
567 by the maximum number of operations per second. This will smoothly
568 handle between 10x and 1/10th this number.
569 .sp
570 See the section "ZFS TRANSACTION DELAY".
571 .sp
572 Note: \fBzfs_delay_scale\fR * \fBzfs_dirty_data_max\fR must be < 2^64.
573 .sp
574 Default value: \fB500,000\fR.
575 .RE
576
577 .sp
578 .ne 2
579 .na
580 \fBzfs_dirty_data_max\fR (int)
581 .ad
582 .RS 12n
583 Determines the dirty space limit in bytes. Once this limit is exceeded, new
584 writes are halted until space frees up. This parameter takes precedence
585 over \fBzfs_dirty_data_max_percent\fR.
586 See the section "ZFS TRANSACTION DELAY".
587 .sp
588 Default value: 10 percent of all memory, capped at \fBzfs_dirty_data_max_max\fR.
589 .RE
590
591 .sp
592 .ne 2
593 .na
594 \fBzfs_dirty_data_max_max\fR (int)
595 .ad
596 .RS 12n
597 Maximum allowable value of \fBzfs_dirty_data_max\fR, expressed in bytes.
598 This limit is only enforced at module load time, and will be ignored if
599 \fBzfs_dirty_data_max\fR is later changed. This parameter takes
600 precedence over \fBzfs_dirty_data_max_max_percent\fR. See the section
601 "ZFS TRANSACTION DELAY".
602 .sp
603 Default value: 25% of physical RAM.
604 .RE
605
606 .sp
607 .ne 2
608 .na
609 \fBzfs_dirty_data_max_max_percent\fR (int)
610 .ad
611 .RS 12n
612 Maximum allowable value of \fBzfs_dirty_data_max\fR, expressed as a
613 percentage of physical RAM. This limit is only enforced at module load
614 time, and will be ignored if \fBzfs_dirty_data_max\fR is later changed.
615 The parameter \fBzfs_dirty_data_max_max\fR takes precedence over this
616 one. See the section "ZFS TRANSACTION DELAY".
617 .sp
618 Default value: 25
619 .RE
620
621 .sp
622 .ne 2
623 .na
624 \fBzfs_dirty_data_max_percent\fR (int)
625 .ad
626 .RS 12n
627 Determines the dirty space limit, expressed as a percentage of all
628 memory. Once this limit is exceeded, new writes are halted until space frees
629 up. The parameter \fBzfs_dirty_data_max\fR takes precedence over this
630 one. See the section "ZFS TRANSACTION DELAY".
631 .sp
632 Default value: 10%, subject to \fBzfs_dirty_data_max_max\fR.
633 .RE
634
635 .sp
636 .ne 2
637 .na
638 \fBzfs_dirty_data_sync\fR (int)
639 .ad
640 .RS 12n
641 Start syncing out a transaction group if there is at least this much dirty data.
642 .sp
643 Default value: \fB67,108,864\fR.
644 .RE
645
646 .sp
647 .ne 2
648 .na
649 \fBzfs_free_max_blocks\fR (ulong)
650 .ad
651 .RS 12n
652 Maximum number of blocks freed in a single txg.
653 .sp
654 Default value: \fB100,000\fR.
655 .RE
656
657 .sp
658 .ne 2
659 .na
660 \fBzfs_vdev_async_read_max_active\fR (int)
661 .ad
662 .RS 12n
663 Maxium asynchronous read I/Os active to each device.
664 See the section "ZFS I/O SCHEDULER".
665 .sp
666 Default value: \fB3\fR.
667 .RE
668
669 .sp
670 .ne 2
671 .na
672 \fBzfs_vdev_async_read_min_active\fR (int)
673 .ad
674 .RS 12n
675 Minimum asynchronous read I/Os active to each device.
676 See the section "ZFS I/O SCHEDULER".
677 .sp
678 Default value: \fB1\fR.
679 .RE
680
681 .sp
682 .ne 2
683 .na
684 \fBzfs_vdev_async_write_active_max_dirty_percent\fR (int)
685 .ad
686 .RS 12n
687 When the pool has more than
688 \fBzfs_vdev_async_write_active_max_dirty_percent\fR dirty data, use
689 \fBzfs_vdev_async_write_max_active\fR to limit active async writes. If
690 the dirty data is between min and max, the active I/O limit is linearly
691 interpolated. See the section "ZFS I/O SCHEDULER".
692 .sp
693 Default value: \fB60\fR.
694 .RE
695
696 .sp
697 .ne 2
698 .na
699 \fBzfs_vdev_async_write_active_min_dirty_percent\fR (int)
700 .ad
701 .RS 12n
702 When the pool has less than
703 \fBzfs_vdev_async_write_active_min_dirty_percent\fR dirty data, use
704 \fBzfs_vdev_async_write_min_active\fR to limit active async writes. If
705 the dirty data is between min and max, the active I/O limit is linearly
706 interpolated. See the section "ZFS I/O SCHEDULER".
707 .sp
708 Default value: \fB30\fR.
709 .RE
710
711 .sp
712 .ne 2
713 .na
714 \fBzfs_vdev_async_write_max_active\fR (int)
715 .ad
716 .RS 12n
717 Maxium asynchronous write I/Os active to each device.
718 See the section "ZFS I/O SCHEDULER".
719 .sp
720 Default value: \fB10\fR.
721 .RE
722
723 .sp
724 .ne 2
725 .na
726 \fBzfs_vdev_async_write_min_active\fR (int)
727 .ad
728 .RS 12n
729 Minimum asynchronous write I/Os active to each device.
730 See the section "ZFS I/O SCHEDULER".
731 .sp
732 Default value: \fB1\fR.
733 .RE
734
735 .sp
736 .ne 2
737 .na
738 \fBzfs_vdev_max_active\fR (int)
739 .ad
740 .RS 12n
741 The maximum number of I/Os active to each device. Ideally, this will be >=
742 the sum of each queue's max_active. It must be at least the sum of each
743 queue's min_active. See the section "ZFS I/O SCHEDULER".
744 .sp
745 Default value: \fB1,000\fR.
746 .RE
747
748 .sp
749 .ne 2
750 .na
751 \fBzfs_vdev_scrub_max_active\fR (int)
752 .ad
753 .RS 12n
754 Maxium scrub I/Os active to each device.
755 See the section "ZFS I/O SCHEDULER".
756 .sp
757 Default value: \fB2\fR.
758 .RE
759
760 .sp
761 .ne 2
762 .na
763 \fBzfs_vdev_scrub_min_active\fR (int)
764 .ad
765 .RS 12n
766 Minimum scrub I/Os active to each device.
767 See the section "ZFS I/O SCHEDULER".
768 .sp
769 Default value: \fB1\fR.
770 .RE
771
772 .sp
773 .ne 2
774 .na
775 \fBzfs_vdev_sync_read_max_active\fR (int)
776 .ad
777 .RS 12n
778 Maxium synchronous read I/Os active to each device.
779 See the section "ZFS I/O SCHEDULER".
780 .sp
781 Default value: \fB10\fR.
782 .RE
783
784 .sp
785 .ne 2
786 .na
787 \fBzfs_vdev_sync_read_min_active\fR (int)
788 .ad
789 .RS 12n
790 Minimum synchronous read I/Os active to each device.
791 See the section "ZFS I/O SCHEDULER".
792 .sp
793 Default value: \fB10\fR.
794 .RE
795
796 .sp
797 .ne 2
798 .na
799 \fBzfs_vdev_sync_write_max_active\fR (int)
800 .ad
801 .RS 12n
802 Maxium synchronous write I/Os active to each device.
803 See the section "ZFS I/O SCHEDULER".
804 .sp
805 Default value: \fB10\fR.
806 .RE
807
808 .sp
809 .ne 2
810 .na
811 \fBzfs_vdev_sync_write_min_active\fR (int)
812 .ad
813 .RS 12n
814 Minimum synchronous write I/Os active to each device.
815 See the section "ZFS I/O SCHEDULER".
816 .sp
817 Default value: \fB10\fR.
818 .RE
819
820 .sp
821 .ne 2
822 .na
823 \fBzfs_disable_dup_eviction\fR (int)
824 .ad
825 .RS 12n
826 Disable duplicate buffer eviction
827 .sp
828 Use \fB1\fR for yes and \fB0\fR for no (default).
829 .RE
830
831 .sp
832 .ne 2
833 .na
834 \fBzfs_expire_snapshot\fR (int)
835 .ad
836 .RS 12n
837 Seconds to expire .zfs/snapshot
838 .sp
839 Default value: \fB300\fR.
840 .RE
841
842 .sp
843 .ne 2
844 .na
845 \fBzfs_flags\fR (int)
846 .ad
847 .RS 12n
848 Set additional debugging flags. The following flags may be bitwise-or'd
849 together.
850 .sp
851 .TS
852 box;
853 rB lB
854 lB lB
855 r l.
856 Value Symbolic Name
857 Description
858 _
859 1 ZFS_DEBUG_DPRINTF
860 Enable dprintf entries in the debug log.
861 _
862 2 ZFS_DEBUG_DBUF_VERIFY *
863 Enable extra dbuf verifications.
864 _
865 4 ZFS_DEBUG_DNODE_VERIFY *
866 Enable extra dnode verifications.
867 _
868 8 ZFS_DEBUG_SNAPNAMES
869 Enable snapshot name verification.
870 _
871 16 ZFS_DEBUG_MODIFY
872 Check for illegally modified ARC buffers.
873 _
874 32 ZFS_DEBUG_SPA
875 Enable spa_dbgmsg entries in the debug log.
876 _
877 64 ZFS_DEBUG_ZIO_FREE
878 Enable verification of block frees.
879 _
880 128 ZFS_DEBUG_HISTOGRAM_VERIFY
881 Enable extra spacemap histogram verifications.
882 .TE
883 .sp
884 * Requires debug build.
885 .sp
886 Default value: \fB0\fR.
887 .RE
888
889 .sp
890 .ne 2
891 .na
892 \fBzfs_free_leak_on_eio\fR (int)
893 .ad
894 .RS 12n
895 If destroy encounters an EIO while reading metadata (e.g. indirect
896 blocks), space referenced by the missing metadata can not be freed.
897 Normally this causes the background destroy to become "stalled", as
898 it is unable to make forward progress. While in this stalled state,
899 all remaining space to free from the error-encountering filesystem is
900 "temporarily leaked". Set this flag to cause it to ignore the EIO,
901 permanently leak the space from indirect blocks that can not be read,
902 and continue to free everything else that it can.
903
904 The default, "stalling" behavior is useful if the storage partially
905 fails (i.e. some but not all i/os fail), and then later recovers. In
906 this case, we will be able to continue pool operations while it is
907 partially failed, and when it recovers, we can continue to free the
908 space, with no leaks. However, note that this case is actually
909 fairly rare.
910
911 Typically pools either (a) fail completely (but perhaps temporarily,
912 e.g. a top-level vdev going offline), or (b) have localized,
913 permanent errors (e.g. disk returns the wrong data due to bit flip or
914 firmware bug). In case (a), this setting does not matter because the
915 pool will be suspended and the sync thread will not be able to make
916 forward progress regardless. In case (b), because the error is
917 permanent, the best we can do is leak the minimum amount of space,
918 which is what setting this flag will do. Therefore, it is reasonable
919 for this flag to normally be set, but we chose the more conservative
920 approach of not setting it, so that there is no possibility of
921 leaking space in the "partial temporary" failure case.
922 .sp
923 Default value: \fB0\fR.
924 .RE
925
926 .sp
927 .ne 2
928 .na
929 \fBzfs_free_min_time_ms\fR (int)
930 .ad
931 .RS 12n
932 Min millisecs to free per txg
933 .sp
934 Default value: \fB1,000\fR.
935 .RE
936
937 .sp
938 .ne 2
939 .na
940 \fBzfs_immediate_write_sz\fR (long)
941 .ad
942 .RS 12n
943 Largest data block to write to zil
944 .sp
945 Default value: \fB32,768\fR.
946 .RE
947
948 .sp
949 .ne 2
950 .na
951 \fBzfs_mdcomp_disable\fR (int)
952 .ad
953 .RS 12n
954 Disable meta data compression
955 .sp
956 Use \fB1\fR for yes and \fB0\fR for no (default).
957 .RE
958
959 .sp
960 .ne 2
961 .na
962 \fBzfs_metaslab_fragmentation_threshold\fR (int)
963 .ad
964 .RS 12n
965 Allow metaslabs to keep their active state as long as their fragmentation
966 percentage is less than or equal to this value. An active metaslab that
967 exceeds this threshold will no longer keep its active status allowing
968 better metaslabs to be selected.
969 .sp
970 Default value: \fB70\fR.
971 .RE
972
973 .sp
974 .ne 2
975 .na
976 \fBzfs_mg_fragmentation_threshold\fR (int)
977 .ad
978 .RS 12n
979 Metaslab groups are considered eligible for allocations if their
980 fragmenation metric (measured as a percentage) is less than or equal to
981 this value. If a metaslab group exceeds this threshold then it will be
982 skipped unless all metaslab groups within the metaslab class have also
983 crossed this threshold.
984 .sp
985 Default value: \fB85\fR.
986 .RE
987
988 .sp
989 .ne 2
990 .na
991 \fBzfs_mg_noalloc_threshold\fR (int)
992 .ad
993 .RS 12n
994 Defines a threshold at which metaslab groups should be eligible for
995 allocations. The value is expressed as a percentage of free space
996 beyond which a metaslab group is always eligible for allocations.
997 If a metaslab group's free space is less than or equal to the
998 the threshold, the allocator will avoid allocating to that group
999 unless all groups in the pool have reached the threshold. Once all
1000 groups have reached the threshold, all groups are allowed to accept
1001 allocations. The default value of 0 disables the feature and causes
1002 all metaslab groups to be eligible for allocations.
1003
1004 This parameter allows to deal with pools having heavily imbalanced
1005 vdevs such as would be the case when a new vdev has been added.
1006 Setting the threshold to a non-zero percentage will stop allocations
1007 from being made to vdevs that aren't filled to the specified percentage
1008 and allow lesser filled vdevs to acquire more allocations than they
1009 otherwise would under the old \fBzfs_mg_alloc_failures\fR facility.
1010 .sp
1011 Default value: \fB0\fR.
1012 .RE
1013
1014 .sp
1015 .ne 2
1016 .na
1017 \fBzfs_no_scrub_io\fR (int)
1018 .ad
1019 .RS 12n
1020 Set for no scrub I/O
1021 .sp
1022 Use \fB1\fR for yes and \fB0\fR for no (default).
1023 .RE
1024
1025 .sp
1026 .ne 2
1027 .na
1028 \fBzfs_no_scrub_prefetch\fR (int)
1029 .ad
1030 .RS 12n
1031 Set for no scrub prefetching
1032 .sp
1033 Use \fB1\fR for yes and \fB0\fR for no (default).
1034 .RE
1035
1036 .sp
1037 .ne 2
1038 .na
1039 \fBzfs_nocacheflush\fR (int)
1040 .ad
1041 .RS 12n
1042 Disable cache flushes
1043 .sp
1044 Use \fB1\fR for yes and \fB0\fR for no (default).
1045 .RE
1046
1047 .sp
1048 .ne 2
1049 .na
1050 \fBzfs_nopwrite_enabled\fR (int)
1051 .ad
1052 .RS 12n
1053 Enable NOP writes
1054 .sp
1055 Use \fB1\fR for yes (default) and \fB0\fR to disable.
1056 .RE
1057
1058 .sp
1059 .ne 2
1060 .na
1061 \fBzfs_pd_bytes_max\fR (int)
1062 .ad
1063 .RS 12n
1064 The number of bytes which should be prefetched.
1065 .sp
1066 Default value: \fB52,428,800\fR.
1067 .RE
1068
1069 .sp
1070 .ne 2
1071 .na
1072 \fBzfs_prefetch_disable\fR (int)
1073 .ad
1074 .RS 12n
1075 Disable all ZFS prefetching
1076 .sp
1077 Use \fB1\fR for yes and \fB0\fR for no (default).
1078 .RE
1079
1080 .sp
1081 .ne 2
1082 .na
1083 \fBzfs_read_chunk_size\fR (long)
1084 .ad
1085 .RS 12n
1086 Bytes to read per chunk
1087 .sp
1088 Default value: \fB1,048,576\fR.
1089 .RE
1090
1091 .sp
1092 .ne 2
1093 .na
1094 \fBzfs_read_history\fR (int)
1095 .ad
1096 .RS 12n
1097 Historic statistics for the last N reads
1098 .sp
1099 Default value: \fB0\fR.
1100 .RE
1101
1102 .sp
1103 .ne 2
1104 .na
1105 \fBzfs_read_history_hits\fR (int)
1106 .ad
1107 .RS 12n
1108 Include cache hits in read history
1109 .sp
1110 Use \fB1\fR for yes and \fB0\fR for no (default).
1111 .RE
1112
1113 .sp
1114 .ne 2
1115 .na
1116 \fBzfs_recover\fR (int)
1117 .ad
1118 .RS 12n
1119 Set to attempt to recover from fatal errors. This should only be used as a
1120 last resort, as it typically results in leaked space, or worse.
1121 .sp
1122 Use \fB1\fR for yes and \fB0\fR for no (default).
1123 .RE
1124
1125 .sp
1126 .ne 2
1127 .na
1128 \fBzfs_resilver_delay\fR (int)
1129 .ad
1130 .RS 12n
1131 Number of ticks to delay prior to issuing a resilver I/O operation when
1132 a non-resilver or non-scrub I/O operation has occurred within the past
1133 \fBzfs_scan_idle\fR ticks.
1134 .sp
1135 Default value: \fB2\fR.
1136 .RE
1137
1138 .sp
1139 .ne 2
1140 .na
1141 \fBzfs_resilver_min_time_ms\fR (int)
1142 .ad
1143 .RS 12n
1144 Min millisecs to resilver per txg
1145 .sp
1146 Default value: \fB3,000\fR.
1147 .RE
1148
1149 .sp
1150 .ne 2
1151 .na
1152 \fBzfs_scan_idle\fR (int)
1153 .ad
1154 .RS 12n
1155 Idle window in clock ticks. During a scrub or a resilver, if
1156 a non-scrub or non-resilver I/O operation has occurred during this
1157 window, the next scrub or resilver operation is delayed by, respectively
1158 \fBzfs_scrub_delay\fR or \fBzfs_resilver_delay\fR ticks.
1159 .sp
1160 Default value: \fB50\fR.
1161 .RE
1162
1163 .sp
1164 .ne 2
1165 .na
1166 \fBzfs_scan_min_time_ms\fR (int)
1167 .ad
1168 .RS 12n
1169 Min millisecs to scrub per txg
1170 .sp
1171 Default value: \fB1,000\fR.
1172 .RE
1173
1174 .sp
1175 .ne 2
1176 .na
1177 \fBzfs_scrub_delay\fR (int)
1178 .ad
1179 .RS 12n
1180 Number of ticks to delay prior to issuing a scrub I/O operation when
1181 a non-scrub or non-resilver I/O operation has occurred within the past
1182 \fBzfs_scan_idle\fR ticks.
1183 .sp
1184 Default value: \fB4\fR.
1185 .RE
1186
1187 .sp
1188 .ne 2
1189 .na
1190 \fBzfs_send_corrupt_data\fR (int)
1191 .ad
1192 .RS 12n
1193 Allow to send corrupt data (ignore read/checksum errors when sending data)
1194 .sp
1195 Use \fB1\fR for yes and \fB0\fR for no (default).
1196 .RE
1197
1198 .sp
1199 .ne 2
1200 .na
1201 \fBzfs_sync_pass_deferred_free\fR (int)
1202 .ad
1203 .RS 12n
1204 Defer frees starting in this pass
1205 .sp
1206 Default value: \fB2\fR.
1207 .RE
1208
1209 .sp
1210 .ne 2
1211 .na
1212 \fBzfs_sync_pass_dont_compress\fR (int)
1213 .ad
1214 .RS 12n
1215 Don't compress starting in this pass
1216 .sp
1217 Default value: \fB5\fR.
1218 .RE
1219
1220 .sp
1221 .ne 2
1222 .na
1223 \fBzfs_sync_pass_rewrite\fR (int)
1224 .ad
1225 .RS 12n
1226 Rewrite new bps starting in this pass
1227 .sp
1228 Default value: \fB2\fR.
1229 .RE
1230
1231 .sp
1232 .ne 2
1233 .na
1234 \fBzfs_top_maxinflight\fR (int)
1235 .ad
1236 .RS 12n
1237 Max I/Os per top-level vdev during scrub or resilver operations.
1238 .sp
1239 Default value: \fB32\fR.
1240 .RE
1241
1242 .sp
1243 .ne 2
1244 .na
1245 \fBzfs_txg_history\fR (int)
1246 .ad
1247 .RS 12n
1248 Historic statistics for the last N txgs
1249 .sp
1250 Default value: \fB0\fR.
1251 .RE
1252
1253 .sp
1254 .ne 2
1255 .na
1256 \fBzfs_txg_timeout\fR (int)
1257 .ad
1258 .RS 12n
1259 Max seconds worth of delta per txg
1260 .sp
1261 Default value: \fB5\fR.
1262 .RE
1263
1264 .sp
1265 .ne 2
1266 .na
1267 \fBzfs_vdev_aggregation_limit\fR (int)
1268 .ad
1269 .RS 12n
1270 Max vdev I/O aggregation size
1271 .sp
1272 Default value: \fB131,072\fR.
1273 .RE
1274
1275 .sp
1276 .ne 2
1277 .na
1278 \fBzfs_vdev_cache_bshift\fR (int)
1279 .ad
1280 .RS 12n
1281 Shift size to inflate reads too
1282 .sp
1283 Default value: \fB16\fR.
1284 .RE
1285
1286 .sp
1287 .ne 2
1288 .na
1289 \fBzfs_vdev_cache_max\fR (int)
1290 .ad
1291 .RS 12n
1292 Inflate reads small than max
1293 .RE
1294
1295 .sp
1296 .ne 2
1297 .na
1298 \fBzfs_vdev_cache_size\fR (int)
1299 .ad
1300 .RS 12n
1301 Total size of the per-disk cache
1302 .sp
1303 Default value: \fB0\fR.
1304 .RE
1305
1306 .sp
1307 .ne 2
1308 .na
1309 \fBzfs_vdev_mirror_switch_us\fR (int)
1310 .ad
1311 .RS 12n
1312 Switch mirrors every N usecs
1313 .sp
1314 Default value: \fB10,000\fR.
1315 .RE
1316
1317 .sp
1318 .ne 2
1319 .na
1320 \fBzfs_vdev_read_gap_limit\fR (int)
1321 .ad
1322 .RS 12n
1323 Aggregate read I/O over gap
1324 .sp
1325 Default value: \fB32,768\fR.
1326 .RE
1327
1328 .sp
1329 .ne 2
1330 .na
1331 \fBzfs_vdev_scheduler\fR (charp)
1332 .ad
1333 .RS 12n
1334 I/O scheduler
1335 .sp
1336 Default value: \fBnoop\fR.
1337 .RE
1338
1339 .sp
1340 .ne 2
1341 .na
1342 \fBzfs_vdev_write_gap_limit\fR (int)
1343 .ad
1344 .RS 12n
1345 Aggregate write I/O over gap
1346 .sp
1347 Default value: \fB4,096\fR.
1348 .RE
1349
1350 .sp
1351 .ne 2
1352 .na
1353 \fBzfs_zevent_cols\fR (int)
1354 .ad
1355 .RS 12n
1356 Max event column width
1357 .sp
1358 Default value: \fB80\fR.
1359 .RE
1360
1361 .sp
1362 .ne 2
1363 .na
1364 \fBzfs_zevent_console\fR (int)
1365 .ad
1366 .RS 12n
1367 Log events to the console
1368 .sp
1369 Use \fB1\fR for yes and \fB0\fR for no (default).
1370 .RE
1371
1372 .sp
1373 .ne 2
1374 .na
1375 \fBzfs_zevent_len_max\fR (int)
1376 .ad
1377 .RS 12n
1378 Max event queue length
1379 .sp
1380 Default value: \fB0\fR.
1381 .RE
1382
1383 .sp
1384 .ne 2
1385 .na
1386 \fBzil_replay_disable\fR (int)
1387 .ad
1388 .RS 12n
1389 Disable intent logging replay
1390 .sp
1391 Use \fB1\fR for yes and \fB0\fR for no (default).
1392 .RE
1393
1394 .sp
1395 .ne 2
1396 .na
1397 \fBzil_slog_limit\fR (ulong)
1398 .ad
1399 .RS 12n
1400 Max commit bytes to separate log device
1401 .sp
1402 Default value: \fB1,048,576\fR.
1403 .RE
1404
1405 .sp
1406 .ne 2
1407 .na
1408 \fBzio_delay_max\fR (int)
1409 .ad
1410 .RS 12n
1411 Max zio millisec delay before posting event
1412 .sp
1413 Default value: \fB30,000\fR.
1414 .RE
1415
1416 .sp
1417 .ne 2
1418 .na
1419 \fBzio_requeue_io_start_cut_in_line\fR (int)
1420 .ad
1421 .RS 12n
1422 Prioritize requeued I/O
1423 .sp
1424 Default value: \fB0\fR.
1425 .RE
1426
1427 .sp
1428 .ne 2
1429 .na
1430 \fBzvol_inhibit_dev\fR (uint)
1431 .ad
1432 .RS 12n
1433 Do not create zvol device nodes
1434 .sp
1435 Use \fB1\fR for yes and \fB0\fR for no (default).
1436 .RE
1437
1438 .sp
1439 .ne 2
1440 .na
1441 \fBzvol_major\fR (uint)
1442 .ad
1443 .RS 12n
1444 Major number for zvol device
1445 .sp
1446 Default value: \fB230\fR.
1447 .RE
1448
1449 .sp
1450 .ne 2
1451 .na
1452 \fBzvol_max_discard_blocks\fR (ulong)
1453 .ad
1454 .RS 12n
1455 Max number of blocks to discard at once
1456 .sp
1457 Default value: \fB16,384\fR.
1458 .RE
1459
1460 .sp
1461 .ne 2
1462 .na
1463 \fBzvol_threads\fR (uint)
1464 .ad
1465 .RS 12n
1466 Number of threads for zvol device
1467 .sp
1468 Default value: \fB32\fR.
1469 .RE
1470
1471 .SH ZFS I/O SCHEDULER
1472 ZFS issues I/O operations to leaf vdevs to satisfy and complete I/Os.
1473 The I/O scheduler determines when and in what order those operations are
1474 issued. The I/O scheduler divides operations into five I/O classes
1475 prioritized in the following order: sync read, sync write, async read,
1476 async write, and scrub/resilver. Each queue defines the minimum and
1477 maximum number of concurrent operations that may be issued to the
1478 device. In addition, the device has an aggregate maximum,
1479 \fBzfs_vdev_max_active\fR. Note that the sum of the per-queue minimums
1480 must not exceed the aggregate maximum. If the sum of the per-queue
1481 maximums exceeds the aggregate maximum, then the number of active I/Os
1482 may reach \fBzfs_vdev_max_active\fR, in which case no further I/Os will
1483 be issued regardless of whether all per-queue minimums have been met.
1484 .sp
1485 For many physical devices, throughput increases with the number of
1486 concurrent operations, but latency typically suffers. Further, physical
1487 devices typically have a limit at which more concurrent operations have no
1488 effect on throughput or can actually cause it to decrease.
1489 .sp
1490 The scheduler selects the next operation to issue by first looking for an
1491 I/O class whose minimum has not been satisfied. Once all are satisfied and
1492 the aggregate maximum has not been hit, the scheduler looks for classes
1493 whose maximum has not been satisfied. Iteration through the I/O classes is
1494 done in the order specified above. No further operations are issued if the
1495 aggregate maximum number of concurrent operations has been hit or if there
1496 are no operations queued for an I/O class that has not hit its maximum.
1497 Every time an I/O is queued or an operation completes, the I/O scheduler
1498 looks for new operations to issue.
1499 .sp
1500 In general, smaller max_active's will lead to lower latency of synchronous
1501 operations. Larger max_active's may lead to higher overall throughput,
1502 depending on underlying storage.
1503 .sp
1504 The ratio of the queues' max_actives determines the balance of performance
1505 between reads, writes, and scrubs. E.g., increasing
1506 \fBzfs_vdev_scrub_max_active\fR will cause the scrub or resilver to complete
1507 more quickly, but reads and writes to have higher latency and lower throughput.
1508 .sp
1509 All I/O classes have a fixed maximum number of outstanding operations
1510 except for the async write class. Asynchronous writes represent the data
1511 that is committed to stable storage during the syncing stage for
1512 transaction groups. Transaction groups enter the syncing state
1513 periodically so the number of queued async writes will quickly burst up
1514 and then bleed down to zero. Rather than servicing them as quickly as
1515 possible, the I/O scheduler changes the maximum number of active async
1516 write I/Os according to the amount of dirty data in the pool. Since
1517 both throughput and latency typically increase with the number of
1518 concurrent operations issued to physical devices, reducing the
1519 burstiness in the number of concurrent operations also stabilizes the
1520 response time of operations from other -- and in particular synchronous
1521 -- queues. In broad strokes, the I/O scheduler will issue more
1522 concurrent operations from the async write queue as there's more dirty
1523 data in the pool.
1524 .sp
1525 Async Writes
1526 .sp
1527 The number of concurrent operations issued for the async write I/O class
1528 follows a piece-wise linear function defined by a few adjustable points.
1529 .nf
1530
1531 | o---------| <-- zfs_vdev_async_write_max_active
1532 ^ | /^ |
1533 | | / | |
1534 active | / | |
1535 I/O | / | |
1536 count | / | |
1537 | / | |
1538 |-------o | | <-- zfs_vdev_async_write_min_active
1539 0|_______^______|_________|
1540 0% | | 100% of zfs_dirty_data_max
1541 | |
1542 | `-- zfs_vdev_async_write_active_max_dirty_percent
1543 `--------- zfs_vdev_async_write_active_min_dirty_percent
1544
1545 .fi
1546 Until the amount of dirty data exceeds a minimum percentage of the dirty
1547 data allowed in the pool, the I/O scheduler will limit the number of
1548 concurrent operations to the minimum. As that threshold is crossed, the
1549 number of concurrent operations issued increases linearly to the maximum at
1550 the specified maximum percentage of the dirty data allowed in the pool.
1551 .sp
1552 Ideally, the amount of dirty data on a busy pool will stay in the sloped
1553 part of the function between \fBzfs_vdev_async_write_active_min_dirty_percent\fR
1554 and \fBzfs_vdev_async_write_active_max_dirty_percent\fR. If it exceeds the
1555 maximum percentage, this indicates that the rate of incoming data is
1556 greater than the rate that the backend storage can handle. In this case, we
1557 must further throttle incoming writes, as described in the next section.
1558
1559 .SH ZFS TRANSACTION DELAY
1560 We delay transactions when we've determined that the backend storage
1561 isn't able to accommodate the rate of incoming writes.
1562 .sp
1563 If there is already a transaction waiting, we delay relative to when
1564 that transaction will finish waiting. This way the calculated delay time
1565 is independent of the number of threads concurrently executing
1566 transactions.
1567 .sp
1568 If we are the only waiter, wait relative to when the transaction
1569 started, rather than the current time. This credits the transaction for
1570 "time already served", e.g. reading indirect blocks.
1571 .sp
1572 The minimum time for a transaction to take is calculated as:
1573 .nf
1574 min_time = zfs_delay_scale * (dirty - min) / (max - dirty)
1575 min_time is then capped at 100 milliseconds.
1576 .fi
1577 .sp
1578 The delay has two degrees of freedom that can be adjusted via tunables. The
1579 percentage of dirty data at which we start to delay is defined by
1580 \fBzfs_delay_min_dirty_percent\fR. This should typically be at or above
1581 \fBzfs_vdev_async_write_active_max_dirty_percent\fR so that we only start to
1582 delay after writing at full speed has failed to keep up with the incoming write
1583 rate. The scale of the curve is defined by \fBzfs_delay_scale\fR. Roughly speaking,
1584 this variable determines the amount of delay at the midpoint of the curve.
1585 .sp
1586 .nf
1587 delay
1588 10ms +-------------------------------------------------------------*+
1589 | *|
1590 9ms + *+
1591 | *|
1592 8ms + *+
1593 | * |
1594 7ms + * +
1595 | * |
1596 6ms + * +
1597 | * |
1598 5ms + * +
1599 | * |
1600 4ms + * +
1601 | * |
1602 3ms + * +
1603 | * |
1604 2ms + (midpoint) * +
1605 | | ** |
1606 1ms + v *** +
1607 | zfs_delay_scale ----------> ******** |
1608 0 +-------------------------------------*********----------------+
1609 0% <- zfs_dirty_data_max -> 100%
1610 .fi
1611 .sp
1612 Note that since the delay is added to the outstanding time remaining on the
1613 most recent transaction, the delay is effectively the inverse of IOPS.
1614 Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
1615 was chosen such that small changes in the amount of accumulated dirty data
1616 in the first 3/4 of the curve yield relatively small differences in the
1617 amount of delay.
1618 .sp
1619 The effects can be easier to understand when the amount of delay is
1620 represented on a log scale:
1621 .sp
1622 .nf
1623 delay
1624 100ms +-------------------------------------------------------------++
1625 + +
1626 | |
1627 + *+
1628 10ms + *+
1629 + ** +
1630 | (midpoint) ** |
1631 + | ** +
1632 1ms + v **** +
1633 + zfs_delay_scale ----------> ***** +
1634 | **** |
1635 + **** +
1636 100us + ** +
1637 + * +
1638 | * |
1639 + * +
1640 10us + * +
1641 + +
1642 | |
1643 + +
1644 +--------------------------------------------------------------+
1645 0% <- zfs_dirty_data_max -> 100%
1646 .fi
1647 .sp
1648 Note here that only as the amount of dirty data approaches its limit does
1649 the delay start to increase rapidly. The goal of a properly tuned system
1650 should be to keep the amount of dirty data out of that range by first
1651 ensuring that the appropriate limits are set for the I/O scheduler to reach
1652 optimal throughput on the backend storage, and then by changing the value
1653 of \fBzfs_delay_scale\fR to increase the steepness of the curve.