]>
Commit | Line | Data |
---|---|---|
29714574 TF |
1 | '\" te |
2 | .\" Copyright (c) 2013 by Turbo Fredriksson <turbo@bayour.com>. All rights reserved. | |
3 | .\" The contents of this file are subject to the terms of the Common Development | |
4 | .\" and Distribution License (the "License"). You may not use this file except | |
5 | .\" in compliance with the License. You can obtain a copy of the license at | |
6 | .\" usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing. | |
7 | .\" | |
8 | .\" See the License for the specific language governing permissions and | |
9 | .\" limitations under the License. When distributing Covered Code, include this | |
10 | .\" CDDL HEADER in each file and include the License file at | |
11 | .\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this | |
12 | .\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your | |
13 | .\" own identifying information: | |
14 | .\" Portions Copyright [yyyy] [name of copyright owner] | |
15 | .TH ZFS-MODULE-PARAMETERS 5 "Nov 16, 2013" | |
16 | .SH NAME | |
17 | zfs\-module\-parameters \- ZFS module parameters | |
18 | .SH DESCRIPTION | |
19 | .sp | |
20 | .LP | |
21 | Description of the different parameters to the ZFS module. | |
22 | ||
23 | .SS "Module parameters" | |
24 | .sp | |
25 | .LP | |
26 | ||
27 | .sp | |
28 | .ne 2 | |
29 | .na | |
30 | \fBl2arc_feed_again\fR (int) | |
31 | .ad | |
32 | .RS 12n | |
33 | Turbo L2ARC warmup | |
34 | .sp | |
35 | Use \fB1\fR for yes (default) and \fB0\fR to disable. | |
36 | .RE | |
37 | ||
38 | .sp | |
39 | .ne 2 | |
40 | .na | |
41 | \fBl2arc_feed_min_ms\fR (ulong) | |
42 | .ad | |
43 | .RS 12n | |
44 | Min feed interval in milliseconds | |
45 | .sp | |
46 | Default value: \fB200\fR. | |
47 | .RE | |
48 | ||
49 | .sp | |
50 | .ne 2 | |
51 | .na | |
52 | \fBl2arc_feed_secs\fR (ulong) | |
53 | .ad | |
54 | .RS 12n | |
55 | Seconds between L2ARC writing | |
56 | .sp | |
57 | Default value: \fB1\fR. | |
58 | .RE | |
59 | ||
60 | .sp | |
61 | .ne 2 | |
62 | .na | |
63 | \fBl2arc_headroom\fR (ulong) | |
64 | .ad | |
65 | .RS 12n | |
66 | Number of max device writes to precache | |
67 | .sp | |
68 | Default value: \fB2\fR. | |
69 | .RE | |
70 | ||
71 | .sp | |
72 | .ne 2 | |
73 | .na | |
74 | \fBl2arc_headroom_boost\fR (ulong) | |
75 | .ad | |
76 | .RS 12n | |
77 | Compressed l2arc_headroom multiplier | |
78 | .sp | |
79 | Default value: \fB200\fR. | |
80 | .RE | |
81 | ||
82 | .sp | |
83 | .ne 2 | |
84 | .na | |
85 | \fBl2arc_nocompress\fR (int) | |
86 | .ad | |
87 | .RS 12n | |
88 | Skip compressing L2ARC buffers | |
89 | .sp | |
90 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
91 | .RE | |
92 | ||
93 | .sp | |
94 | .ne 2 | |
95 | .na | |
96 | \fBl2arc_noprefetch\fR (int) | |
97 | .ad | |
98 | .RS 12n | |
99 | Skip caching prefetched buffers | |
100 | .sp | |
101 | Use \fB1\fR for yes (default) and \fB0\fR to disable. | |
102 | .RE | |
103 | ||
104 | .sp | |
105 | .ne 2 | |
106 | .na | |
107 | \fBl2arc_norw\fR (int) | |
108 | .ad | |
109 | .RS 12n | |
110 | No reads during writes | |
111 | .sp | |
112 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
113 | .RE | |
114 | ||
115 | .sp | |
116 | .ne 2 | |
117 | .na | |
118 | \fBl2arc_write_boost\fR (ulong) | |
119 | .ad | |
120 | .RS 12n | |
121 | Extra write bytes during device warmup | |
122 | .sp | |
123 | Default value: \fB8,388,608\fR. | |
124 | .RE | |
125 | ||
126 | .sp | |
127 | .ne 2 | |
128 | .na | |
129 | \fBl2arc_write_max\fR (ulong) | |
130 | .ad | |
131 | .RS 12n | |
132 | Max write bytes per interval | |
133 | .sp | |
134 | Default value: \fB8,388,608\fR. | |
135 | .RE | |
136 | ||
99b14de4 ED |
137 | .sp |
138 | .ne 2 | |
139 | .na | |
140 | \fBmetaslab_aliquot\fR (ulong) | |
141 | .ad | |
142 | .RS 12n | |
143 | Metaslab granularity, in bytes. This is roughly similar to what would be | |
144 | referred to as the "stripe size" in traditional RAID arrays. In normal | |
145 | operation, ZFS will try to write this amount of data to a top-level vdev | |
146 | before moving on to the next one. | |
147 | .sp | |
148 | Default value: \fB524,288\fR. | |
149 | .RE | |
150 | ||
f3a7f661 GW |
151 | .sp |
152 | .ne 2 | |
153 | .na | |
154 | \fBmetaslab_bias_enabled\fR (int) | |
155 | .ad | |
156 | .RS 12n | |
157 | Enable metaslab group biasing based on its vdev's over- or under-utilization | |
158 | relative to the pool. | |
159 | .sp | |
160 | Use \fB1\fR for yes (default) and \fB0\fR for no. | |
161 | .RE | |
162 | ||
29714574 TF |
163 | .sp |
164 | .ne 2 | |
165 | .na | |
aa7d06a9 | 166 | \fBmetaslab_debug_load\fR (int) |
29714574 TF |
167 | .ad |
168 | .RS 12n | |
aa7d06a9 GW |
169 | Load all metaslabs during pool import. |
170 | .sp | |
171 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
172 | .RE | |
173 | ||
174 | .sp | |
175 | .ne 2 | |
176 | .na | |
177 | \fBmetaslab_debug_unload\fR (int) | |
178 | .ad | |
179 | .RS 12n | |
180 | Prevent metaslabs from being unloaded. | |
29714574 TF |
181 | .sp |
182 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
183 | .RE | |
184 | ||
f3a7f661 GW |
185 | .sp |
186 | .ne 2 | |
187 | .na | |
188 | \fBmetaslab_fragmentation_factor_enabled\fR (int) | |
189 | .ad | |
190 | .RS 12n | |
191 | Enable use of the fragmentation metric in computing metaslab weights. | |
192 | .sp | |
193 | Use \fB1\fR for yes (default) and \fB0\fR for no. | |
194 | .RE | |
195 | ||
b8bcca18 MA |
196 | .sp |
197 | .ne 2 | |
198 | .na | |
199 | \fBmetaslabs_per_vdev\fR (int) | |
200 | .ad | |
201 | .RS 12n | |
202 | When a vdev is added, it will be divided into approximately (but no more than) this number of metaslabs. | |
203 | .sp | |
204 | Default value: \fB200\fR. | |
205 | .RE | |
206 | ||
f3a7f661 GW |
207 | .sp |
208 | .ne 2 | |
209 | .na | |
210 | \fBmetaslab_preload_enabled\fR (int) | |
211 | .ad | |
212 | .RS 12n | |
213 | Enable metaslab group preloading. | |
214 | .sp | |
215 | Use \fB1\fR for yes (default) and \fB0\fR for no. | |
216 | .RE | |
217 | ||
218 | .sp | |
219 | .ne 2 | |
220 | .na | |
221 | \fBmetaslab_lba_weighting_enabled\fR (int) | |
222 | .ad | |
223 | .RS 12n | |
224 | Give more weight to metaslabs with lower LBAs, assuming they have | |
225 | greater bandwidth as is typically the case on a modern constant | |
226 | angular velocity disk drive. | |
227 | .sp | |
228 | Use \fB1\fR for yes (default) and \fB0\fR for no. | |
229 | .RE | |
230 | ||
29714574 TF |
231 | .sp |
232 | .ne 2 | |
233 | .na | |
234 | \fBspa_config_path\fR (charp) | |
235 | .ad | |
236 | .RS 12n | |
237 | SPA config file | |
238 | .sp | |
239 | Default value: \fB/etc/zfs/zpool.cache\fR. | |
240 | .RE | |
241 | ||
e8b96c60 MA |
242 | .sp |
243 | .ne 2 | |
244 | .na | |
245 | \fBspa_asize_inflation\fR (int) | |
246 | .ad | |
247 | .RS 12n | |
248 | Multiplication factor used to estimate actual disk consumption from the | |
249 | size of data being written. The default value is a worst case estimate, | |
250 | but lower values may be valid for a given pool depending on its | |
251 | configuration. Pool administrators who understand the factors involved | |
252 | may wish to specify a more realistic inflation factor, particularly if | |
253 | they operate close to quota or capacity limits. | |
254 | .sp | |
255 | Default value: 24 | |
256 | .RE | |
257 | ||
dea377c0 MA |
258 | .sp |
259 | .ne 2 | |
260 | .na | |
261 | \fBspa_load_verify_data\fR (int) | |
262 | .ad | |
263 | .RS 12n | |
264 | Whether to traverse data blocks during an "extreme rewind" (\fB-X\fR) | |
265 | import. Use 0 to disable and 1 to enable. | |
266 | ||
267 | An extreme rewind import normally performs a full traversal of all | |
268 | blocks in the pool for verification. If this parameter is set to 0, | |
269 | the traversal skips non-metadata blocks. It can be toggled once the | |
270 | import has started to stop or start the traversal of non-metadata blocks. | |
271 | .sp | |
272 | Default value: 1 | |
273 | .RE | |
274 | ||
275 | .sp | |
276 | .ne 2 | |
277 | .na | |
278 | \fBspa_load_verify_metadata\fR (int) | |
279 | .ad | |
280 | .RS 12n | |
281 | Whether to traverse blocks during an "extreme rewind" (\fB-X\fR) | |
282 | pool import. Use 0 to disable and 1 to enable. | |
283 | ||
284 | An extreme rewind import normally performs a full traversal of all | |
285 | blocks in the pool for verification. If this parameter is set to 1, | |
286 | the traversal is not performed. It can be toggled once the import has | |
287 | started to stop or start the traversal. | |
288 | .sp | |
289 | Default value: 1 | |
290 | .RE | |
291 | ||
292 | .sp | |
293 | .ne 2 | |
294 | .na | |
295 | \fBspa_load_verify_maxinflight\fR (int) | |
296 | .ad | |
297 | .RS 12n | |
298 | Maximum concurrent I/Os during the traversal performed during an "extreme | |
299 | rewind" (\fB-X\fR) pool import. | |
300 | .sp | |
301 | Default value: 10000 | |
302 | .RE | |
303 | ||
6cde6435 BB |
304 | .sp |
305 | .ne 2 | |
306 | .na | |
307 | \fBspa_slop_shift\fR (int) | |
308 | .ad | |
309 | .RS 12n | |
310 | Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space | |
311 | in the pool to be consumed. This ensures that we don't run the pool | |
312 | completely out of space, due to unaccounted changes (e.g. to the MOS). | |
313 | It also limits the worst-case time to allocate space. If we have | |
314 | less than this amount of free space, most ZPL operations (e.g. write, | |
315 | create) will return ENOSPC. | |
316 | .sp | |
317 | Default value: 5 | |
318 | .RE | |
319 | ||
29714574 TF |
320 | .sp |
321 | .ne 2 | |
322 | .na | |
323 | \fBzfetch_array_rd_sz\fR (ulong) | |
324 | .ad | |
325 | .RS 12n | |
27b293be | 326 | If prefetching is enabled, disable prefetching for reads larger than this size. |
29714574 TF |
327 | .sp |
328 | Default value: \fB1,048,576\fR. | |
329 | .RE | |
330 | ||
331 | .sp | |
332 | .ne 2 | |
333 | .na | |
334 | \fBzfetch_block_cap\fR (uint) | |
335 | .ad | |
336 | .RS 12n | |
27b293be | 337 | Max number of blocks to prefetch at a time |
29714574 TF |
338 | .sp |
339 | Default value: \fB256\fR. | |
340 | .RE | |
341 | ||
342 | .sp | |
343 | .ne 2 | |
344 | .na | |
345 | \fBzfetch_max_streams\fR (uint) | |
346 | .ad | |
347 | .RS 12n | |
27b293be | 348 | Max number of streams per zfetch (prefetch streams per file). |
29714574 TF |
349 | .sp |
350 | Default value: \fB8\fR. | |
351 | .RE | |
352 | ||
353 | .sp | |
354 | .ne 2 | |
355 | .na | |
356 | \fBzfetch_min_sec_reap\fR (uint) | |
357 | .ad | |
358 | .RS 12n | |
27b293be | 359 | Min time before an active prefetch stream can be reclaimed |
29714574 TF |
360 | .sp |
361 | Default value: \fB2\fR. | |
362 | .RE | |
363 | ||
49ddb315 MA |
364 | .sp |
365 | .ne 2 | |
366 | .na | |
367 | \fBzfs_arc_average_blocksize\fR (int) | |
368 | .ad | |
369 | .RS 12n | |
370 | The ARC's buffer hash table is sized based on the assumption of an average | |
371 | block size of \fBzfs_arc_average_blocksize\fR (default 8K). This works out | |
372 | to roughly 1MB of hash table per 1GB of physical memory with 8-byte pointers. | |
373 | For configurations with a known larger average block size this value can be | |
374 | increased to reduce the memory footprint. | |
375 | ||
376 | .sp | |
377 | Default value: \fB8192\fR. | |
378 | .RE | |
379 | ||
ca0bf58d PS |
380 | .sp |
381 | .ne 2 | |
382 | .na | |
383 | \fBzfs_arc_evict_batch_limit\fR (int) | |
384 | .ad | |
385 | .RS 12n | |
8f343973 | 386 | Number ARC headers to evict per sub-list before proceeding to another sub-list. |
ca0bf58d PS |
387 | This batch-style operation prevents entire sub-lists from being evicted at once |
388 | but comes at a cost of additional unlocking and locking. | |
389 | .sp | |
390 | Default value: \fB10\fR. | |
391 | .RE | |
392 | ||
29714574 TF |
393 | .sp |
394 | .ne 2 | |
395 | .na | |
396 | \fBzfs_arc_grow_retry\fR (int) | |
397 | .ad | |
398 | .RS 12n | |
399 | Seconds before growing arc size | |
400 | .sp | |
401 | Default value: \fB5\fR. | |
402 | .RE | |
403 | ||
404 | .sp | |
405 | .ne 2 | |
406 | .na | |
7e8bddd0 | 407 | \fBzfs_arc_lotsfree_percent\fR (int) |
29714574 TF |
408 | .ad |
409 | .RS 12n | |
7e8bddd0 BB |
410 | Throttle I/O when free system memory drops below this percentage of total |
411 | system memory. Setting this value to 0 will disable the throttle. | |
29714574 | 412 | .sp |
7e8bddd0 | 413 | Default value: \fB10\fR. |
29714574 TF |
414 | .RE |
415 | ||
416 | .sp | |
417 | .ne 2 | |
418 | .na | |
7e8bddd0 | 419 | \fBzfs_arc_max\fR (ulong) |
29714574 TF |
420 | .ad |
421 | .RS 12n | |
7e8bddd0 | 422 | Max arc size |
29714574 | 423 | .sp |
7e8bddd0 | 424 | Default value: \fB0\fR. |
29714574 TF |
425 | .RE |
426 | ||
427 | .sp | |
428 | .ne 2 | |
429 | .na | |
430 | \fBzfs_arc_meta_limit\fR (ulong) | |
431 | .ad | |
432 | .RS 12n | |
2cbb06b5 BB |
433 | The maximum allowed size in bytes that meta data buffers are allowed to |
434 | consume in the ARC. When this limit is reached meta data buffers will | |
435 | be reclaimed even if the overall arc_c_max has not been reached. This | |
436 | value defaults to 0 which indicates that 3/4 of the ARC may be used | |
437 | for meta data. | |
29714574 TF |
438 | .sp |
439 | Default value: \fB0\fR. | |
440 | .RE | |
441 | ||
ca0bf58d PS |
442 | .sp |
443 | .ne 2 | |
444 | .na | |
445 | \fBzfs_arc_meta_min\fR (ulong) | |
446 | .ad | |
447 | .RS 12n | |
448 | The minimum allowed size in bytes that meta data buffers may consume in | |
449 | the ARC. This value defaults to 0 which disables a floor on the amount | |
450 | of the ARC devoted meta data. | |
451 | .sp | |
452 | Default value: \fB0\fR. | |
453 | .RE | |
454 | ||
29714574 TF |
455 | .sp |
456 | .ne 2 | |
457 | .na | |
458 | \fBzfs_arc_meta_prune\fR (int) | |
459 | .ad | |
460 | .RS 12n | |
2cbb06b5 BB |
461 | The number of dentries and inodes to be scanned looking for entries |
462 | which can be dropped. This may be required when the ARC reaches the | |
463 | \fBzfs_arc_meta_limit\fR because dentries and inodes can pin buffers | |
464 | in the ARC. Increasing this value will cause to dentry and inode caches | |
465 | to be pruned more aggressively. Setting this value to 0 will disable | |
466 | pruning the inode and dentry caches. | |
29714574 | 467 | .sp |
2cbb06b5 | 468 | Default value: \fB10,000\fR. |
29714574 TF |
469 | .RE |
470 | ||
bc888666 BB |
471 | .sp |
472 | .ne 2 | |
473 | .na | |
474 | \fBzfs_arc_meta_adjust_restarts\fR (ulong) | |
475 | .ad | |
476 | .RS 12n | |
477 | The number of restart passes to make while scanning the ARC attempting | |
478 | the free buffers in order to stay below the \fBzfs_arc_meta_limit\fR. | |
479 | This value should not need to be tuned but is available to facilitate | |
480 | performance analysis. | |
481 | .sp | |
482 | Default value: \fB4096\fR. | |
483 | .RE | |
484 | ||
29714574 TF |
485 | .sp |
486 | .ne 2 | |
487 | .na | |
488 | \fBzfs_arc_min\fR (ulong) | |
489 | .ad | |
490 | .RS 12n | |
491 | Min arc size | |
492 | .sp | |
493 | Default value: \fB100\fR. | |
494 | .RE | |
495 | ||
496 | .sp | |
497 | .ne 2 | |
498 | .na | |
499 | \fBzfs_arc_min_prefetch_lifespan\fR (int) | |
500 | .ad | |
501 | .RS 12n | |
502 | Min life of prefetch block | |
503 | .sp | |
504 | Default value: \fB100\fR. | |
505 | .RE | |
506 | ||
ca0bf58d PS |
507 | .sp |
508 | .ne 2 | |
509 | .na | |
510 | \fBzfs_arc_num_sublists_per_state\fR (int) | |
511 | .ad | |
512 | .RS 12n | |
513 | To allow more fine-grained locking, each ARC state contains a series | |
514 | of lists for both data and meta data objects. Locking is performed at | |
515 | the level of these "sub-lists". This parameters controls the number of | |
516 | sub-lists per ARC state. | |
517 | .sp | |
518 | Default value: 1 or the number of on-online CPUs, whichever is greater | |
519 | .RE | |
520 | ||
521 | .sp | |
522 | .ne 2 | |
523 | .na | |
524 | \fBzfs_arc_overflow_shift\fR (int) | |
525 | .ad | |
526 | .RS 12n | |
527 | The ARC size is considered to be overflowing if it exceeds the current | |
528 | ARC target size (arc_c) by a threshold determined by this parameter. | |
529 | The threshold is calculated as a fraction of arc_c using the formula | |
530 | "arc_c >> \fBzfs_arc_overflow_shift\fR". | |
531 | ||
532 | The default value of 8 causes the ARC to be considered to be overflowing | |
533 | if it exceeds the target size by 1/256th (0.3%) of the target size. | |
534 | ||
535 | When the ARC is overflowing, new buffer allocations are stalled until | |
536 | the reclaim thread catches up and the overflow condition no longer exists. | |
537 | .sp | |
538 | Default value: \fB8\fR. | |
539 | .RE | |
540 | ||
728d6ae9 BB |
541 | .sp |
542 | .ne 2 | |
543 | .na | |
544 | ||
545 | \fBzfs_arc_p_min_shift\fR (int) | |
546 | .ad | |
547 | .RS 12n | |
548 | arc_c shift to calc min/max arc_p | |
549 | .sp | |
550 | Default value: \fB4\fR. | |
551 | .RE | |
552 | ||
89c8cac4 PS |
553 | .sp |
554 | .ne 2 | |
555 | .na | |
556 | \fBzfs_arc_p_aggressive_disable\fR (int) | |
557 | .ad | |
558 | .RS 12n | |
559 | Disable aggressive arc_p growth | |
560 | .sp | |
561 | Use \fB1\fR for yes (default) and \fB0\fR to disable. | |
562 | .RE | |
563 | ||
62422785 PS |
564 | .sp |
565 | .ne 2 | |
566 | .na | |
567 | \fBzfs_arc_p_dampener_disable\fR (int) | |
568 | .ad | |
569 | .RS 12n | |
570 | Disable arc_p adapt dampener | |
571 | .sp | |
572 | Use \fB1\fR for yes (default) and \fB0\fR to disable. | |
573 | .RE | |
574 | ||
29714574 TF |
575 | .sp |
576 | .ne 2 | |
577 | .na | |
578 | \fBzfs_arc_shrink_shift\fR (int) | |
579 | .ad | |
580 | .RS 12n | |
581 | log2(fraction of arc to reclaim) | |
582 | .sp | |
583 | Default value: \fB5\fR. | |
584 | .RE | |
585 | ||
11f552fa BB |
586 | .sp |
587 | .ne 2 | |
588 | .na | |
589 | \fBzfs_arc_sys_free\fR (ulong) | |
590 | .ad | |
591 | .RS 12n | |
592 | The target number of bytes the ARC should leave as free memory on the system. | |
593 | Defaults to the larger of 1/64 of physical memory or 512K. Setting this | |
594 | option to a non-zero value will override the default. | |
595 | .sp | |
596 | Default value: \fB0\fR. | |
597 | .RE | |
598 | ||
29714574 TF |
599 | .sp |
600 | .ne 2 | |
601 | .na | |
602 | \fBzfs_autoimport_disable\fR (int) | |
603 | .ad | |
604 | .RS 12n | |
27b293be | 605 | Disable pool import at module load by ignoring the cache file (typically \fB/etc/zfs/zpool.cache\fR). |
29714574 | 606 | .sp |
70081096 | 607 | Use \fB1\fR for yes (default) and \fB0\fR for no. |
29714574 TF |
608 | .RE |
609 | ||
3b36f831 BB |
610 | .sp |
611 | .ne 2 | |
612 | .na | |
613 | \fBzfs_dbgmsg_enable\fR (int) | |
614 | .ad | |
615 | .RS 12n | |
616 | Internally ZFS keeps a small log to facilitate debugging. By default the log | |
617 | is disabled, to enable it set this option to 1. The contents of the log can | |
618 | be accessed by reading the /proc/spl/kstat/zfs/dbgmsg file. Writing 0 to | |
619 | this proc file clears the log. | |
620 | .sp | |
621 | Default value: \fB0\fR. | |
622 | .RE | |
623 | ||
624 | .sp | |
625 | .ne 2 | |
626 | .na | |
627 | \fBzfs_dbgmsg_maxsize\fR (int) | |
628 | .ad | |
629 | .RS 12n | |
630 | The maximum size in bytes of the internal ZFS debug log. | |
631 | .sp | |
632 | Default value: \fB4M\fR. | |
633 | .RE | |
634 | ||
29714574 TF |
635 | .sp |
636 | .ne 2 | |
637 | .na | |
638 | \fBzfs_dbuf_state_index\fR (int) | |
639 | .ad | |
640 | .RS 12n | |
641 | Calculate arc header index | |
642 | .sp | |
643 | Default value: \fB0\fR. | |
644 | .RE | |
645 | ||
646 | .sp | |
647 | .ne 2 | |
648 | .na | |
649 | \fBzfs_deadman_enabled\fR (int) | |
650 | .ad | |
651 | .RS 12n | |
652 | Enable deadman timer | |
653 | .sp | |
654 | Use \fB1\fR for yes (default) and \fB0\fR to disable. | |
655 | .RE | |
656 | ||
657 | .sp | |
658 | .ne 2 | |
659 | .na | |
e8b96c60 | 660 | \fBzfs_deadman_synctime_ms\fR (ulong) |
29714574 TF |
661 | .ad |
662 | .RS 12n | |
e8b96c60 MA |
663 | Expiration time in milliseconds. This value has two meanings. First it is |
664 | used to determine when the spa_deadman() logic should fire. By default the | |
665 | spa_deadman() will fire if spa_sync() has not completed in 1000 seconds. | |
666 | Secondly, the value determines if an I/O is considered "hung". Any I/O that | |
667 | has not completed in zfs_deadman_synctime_ms is considered "hung" resulting | |
668 | in a zevent being logged. | |
29714574 | 669 | .sp |
e8b96c60 | 670 | Default value: \fB1,000,000\fR. |
29714574 TF |
671 | .RE |
672 | ||
673 | .sp | |
674 | .ne 2 | |
675 | .na | |
676 | \fBzfs_dedup_prefetch\fR (int) | |
677 | .ad | |
678 | .RS 12n | |
679 | Enable prefetching dedup-ed blks | |
680 | .sp | |
0dfc7324 | 681 | Use \fB1\fR for yes and \fB0\fR to disable (default). |
29714574 TF |
682 | .RE |
683 | ||
e8b96c60 MA |
684 | .sp |
685 | .ne 2 | |
686 | .na | |
687 | \fBzfs_delay_min_dirty_percent\fR (int) | |
688 | .ad | |
689 | .RS 12n | |
690 | Start to delay each transaction once there is this amount of dirty data, | |
691 | expressed as a percentage of \fBzfs_dirty_data_max\fR. | |
692 | This value should be >= zfs_vdev_async_write_active_max_dirty_percent. | |
693 | See the section "ZFS TRANSACTION DELAY". | |
694 | .sp | |
695 | Default value: \fB60\fR. | |
696 | .RE | |
697 | ||
698 | .sp | |
699 | .ne 2 | |
700 | .na | |
701 | \fBzfs_delay_scale\fR (int) | |
702 | .ad | |
703 | .RS 12n | |
704 | This controls how quickly the transaction delay approaches infinity. | |
705 | Larger values cause longer delays for a given amount of dirty data. | |
706 | .sp | |
707 | For the smoothest delay, this value should be about 1 billion divided | |
708 | by the maximum number of operations per second. This will smoothly | |
709 | handle between 10x and 1/10th this number. | |
710 | .sp | |
711 | See the section "ZFS TRANSACTION DELAY". | |
712 | .sp | |
713 | Note: \fBzfs_delay_scale\fR * \fBzfs_dirty_data_max\fR must be < 2^64. | |
714 | .sp | |
715 | Default value: \fB500,000\fR. | |
716 | .RE | |
717 | ||
718 | .sp | |
719 | .ne 2 | |
720 | .na | |
721 | \fBzfs_dirty_data_max\fR (int) | |
722 | .ad | |
723 | .RS 12n | |
724 | Determines the dirty space limit in bytes. Once this limit is exceeded, new | |
725 | writes are halted until space frees up. This parameter takes precedence | |
726 | over \fBzfs_dirty_data_max_percent\fR. | |
727 | See the section "ZFS TRANSACTION DELAY". | |
728 | .sp | |
729 | Default value: 10 percent of all memory, capped at \fBzfs_dirty_data_max_max\fR. | |
730 | .RE | |
731 | ||
732 | .sp | |
733 | .ne 2 | |
734 | .na | |
735 | \fBzfs_dirty_data_max_max\fR (int) | |
736 | .ad | |
737 | .RS 12n | |
738 | Maximum allowable value of \fBzfs_dirty_data_max\fR, expressed in bytes. | |
739 | This limit is only enforced at module load time, and will be ignored if | |
740 | \fBzfs_dirty_data_max\fR is later changed. This parameter takes | |
741 | precedence over \fBzfs_dirty_data_max_max_percent\fR. See the section | |
742 | "ZFS TRANSACTION DELAY". | |
743 | .sp | |
744 | Default value: 25% of physical RAM. | |
745 | .RE | |
746 | ||
747 | .sp | |
748 | .ne 2 | |
749 | .na | |
750 | \fBzfs_dirty_data_max_max_percent\fR (int) | |
751 | .ad | |
752 | .RS 12n | |
753 | Maximum allowable value of \fBzfs_dirty_data_max\fR, expressed as a | |
754 | percentage of physical RAM. This limit is only enforced at module load | |
755 | time, and will be ignored if \fBzfs_dirty_data_max\fR is later changed. | |
756 | The parameter \fBzfs_dirty_data_max_max\fR takes precedence over this | |
757 | one. See the section "ZFS TRANSACTION DELAY". | |
758 | .sp | |
759 | Default value: 25 | |
760 | .RE | |
761 | ||
762 | .sp | |
763 | .ne 2 | |
764 | .na | |
765 | \fBzfs_dirty_data_max_percent\fR (int) | |
766 | .ad | |
767 | .RS 12n | |
768 | Determines the dirty space limit, expressed as a percentage of all | |
769 | memory. Once this limit is exceeded, new writes are halted until space frees | |
770 | up. The parameter \fBzfs_dirty_data_max\fR takes precedence over this | |
771 | one. See the section "ZFS TRANSACTION DELAY". | |
772 | .sp | |
773 | Default value: 10%, subject to \fBzfs_dirty_data_max_max\fR. | |
774 | .RE | |
775 | ||
776 | .sp | |
777 | .ne 2 | |
778 | .na | |
779 | \fBzfs_dirty_data_sync\fR (int) | |
780 | .ad | |
781 | .RS 12n | |
782 | Start syncing out a transaction group if there is at least this much dirty data. | |
783 | .sp | |
784 | Default value: \fB67,108,864\fR. | |
785 | .RE | |
786 | ||
36283ca2 MG |
787 | .sp |
788 | .ne 2 | |
789 | .na | |
790 | \fBzfs_free_max_blocks\fR (ulong) | |
791 | .ad | |
792 | .RS 12n | |
793 | Maximum number of blocks freed in a single txg. | |
794 | .sp | |
795 | Default value: \fB100,000\fR. | |
796 | .RE | |
797 | ||
e8b96c60 MA |
798 | .sp |
799 | .ne 2 | |
800 | .na | |
801 | \fBzfs_vdev_async_read_max_active\fR (int) | |
802 | .ad | |
803 | .RS 12n | |
804 | Maxium asynchronous read I/Os active to each device. | |
805 | See the section "ZFS I/O SCHEDULER". | |
806 | .sp | |
807 | Default value: \fB3\fR. | |
808 | .RE | |
809 | ||
810 | .sp | |
811 | .ne 2 | |
812 | .na | |
813 | \fBzfs_vdev_async_read_min_active\fR (int) | |
814 | .ad | |
815 | .RS 12n | |
816 | Minimum asynchronous read I/Os active to each device. | |
817 | See the section "ZFS I/O SCHEDULER". | |
818 | .sp | |
819 | Default value: \fB1\fR. | |
820 | .RE | |
821 | ||
822 | .sp | |
823 | .ne 2 | |
824 | .na | |
825 | \fBzfs_vdev_async_write_active_max_dirty_percent\fR (int) | |
826 | .ad | |
827 | .RS 12n | |
828 | When the pool has more than | |
829 | \fBzfs_vdev_async_write_active_max_dirty_percent\fR dirty data, use | |
830 | \fBzfs_vdev_async_write_max_active\fR to limit active async writes. If | |
831 | the dirty data is between min and max, the active I/O limit is linearly | |
832 | interpolated. See the section "ZFS I/O SCHEDULER". | |
833 | .sp | |
834 | Default value: \fB60\fR. | |
835 | .RE | |
836 | ||
837 | .sp | |
838 | .ne 2 | |
839 | .na | |
840 | \fBzfs_vdev_async_write_active_min_dirty_percent\fR (int) | |
841 | .ad | |
842 | .RS 12n | |
843 | When the pool has less than | |
844 | \fBzfs_vdev_async_write_active_min_dirty_percent\fR dirty data, use | |
845 | \fBzfs_vdev_async_write_min_active\fR to limit active async writes. If | |
846 | the dirty data is between min and max, the active I/O limit is linearly | |
847 | interpolated. See the section "ZFS I/O SCHEDULER". | |
848 | .sp | |
849 | Default value: \fB30\fR. | |
850 | .RE | |
851 | ||
852 | .sp | |
853 | .ne 2 | |
854 | .na | |
855 | \fBzfs_vdev_async_write_max_active\fR (int) | |
856 | .ad | |
857 | .RS 12n | |
858 | Maxium asynchronous write I/Os active to each device. | |
859 | See the section "ZFS I/O SCHEDULER". | |
860 | .sp | |
861 | Default value: \fB10\fR. | |
862 | .RE | |
863 | ||
864 | .sp | |
865 | .ne 2 | |
866 | .na | |
867 | \fBzfs_vdev_async_write_min_active\fR (int) | |
868 | .ad | |
869 | .RS 12n | |
870 | Minimum asynchronous write I/Os active to each device. | |
871 | See the section "ZFS I/O SCHEDULER". | |
872 | .sp | |
873 | Default value: \fB1\fR. | |
874 | .RE | |
875 | ||
876 | .sp | |
877 | .ne 2 | |
878 | .na | |
879 | \fBzfs_vdev_max_active\fR (int) | |
880 | .ad | |
881 | .RS 12n | |
882 | The maximum number of I/Os active to each device. Ideally, this will be >= | |
883 | the sum of each queue's max_active. It must be at least the sum of each | |
884 | queue's min_active. See the section "ZFS I/O SCHEDULER". | |
885 | .sp | |
886 | Default value: \fB1,000\fR. | |
887 | .RE | |
888 | ||
889 | .sp | |
890 | .ne 2 | |
891 | .na | |
892 | \fBzfs_vdev_scrub_max_active\fR (int) | |
893 | .ad | |
894 | .RS 12n | |
895 | Maxium scrub I/Os active to each device. | |
896 | See the section "ZFS I/O SCHEDULER". | |
897 | .sp | |
898 | Default value: \fB2\fR. | |
899 | .RE | |
900 | ||
901 | .sp | |
902 | .ne 2 | |
903 | .na | |
904 | \fBzfs_vdev_scrub_min_active\fR (int) | |
905 | .ad | |
906 | .RS 12n | |
907 | Minimum scrub I/Os active to each device. | |
908 | See the section "ZFS I/O SCHEDULER". | |
909 | .sp | |
910 | Default value: \fB1\fR. | |
911 | .RE | |
912 | ||
913 | .sp | |
914 | .ne 2 | |
915 | .na | |
916 | \fBzfs_vdev_sync_read_max_active\fR (int) | |
917 | .ad | |
918 | .RS 12n | |
919 | Maxium synchronous read I/Os active to each device. | |
920 | See the section "ZFS I/O SCHEDULER". | |
921 | .sp | |
922 | Default value: \fB10\fR. | |
923 | .RE | |
924 | ||
925 | .sp | |
926 | .ne 2 | |
927 | .na | |
928 | \fBzfs_vdev_sync_read_min_active\fR (int) | |
929 | .ad | |
930 | .RS 12n | |
931 | Minimum synchronous read I/Os active to each device. | |
932 | See the section "ZFS I/O SCHEDULER". | |
933 | .sp | |
934 | Default value: \fB10\fR. | |
935 | .RE | |
936 | ||
937 | .sp | |
938 | .ne 2 | |
939 | .na | |
940 | \fBzfs_vdev_sync_write_max_active\fR (int) | |
941 | .ad | |
942 | .RS 12n | |
943 | Maxium synchronous write I/Os active to each device. | |
944 | See the section "ZFS I/O SCHEDULER". | |
945 | .sp | |
946 | Default value: \fB10\fR. | |
947 | .RE | |
948 | ||
949 | .sp | |
950 | .ne 2 | |
951 | .na | |
952 | \fBzfs_vdev_sync_write_min_active\fR (int) | |
953 | .ad | |
954 | .RS 12n | |
955 | Minimum synchronous write I/Os active to each device. | |
956 | See the section "ZFS I/O SCHEDULER". | |
957 | .sp | |
958 | Default value: \fB10\fR. | |
959 | .RE | |
960 | ||
29714574 TF |
961 | .sp |
962 | .ne 2 | |
963 | .na | |
964 | \fBzfs_disable_dup_eviction\fR (int) | |
965 | .ad | |
966 | .RS 12n | |
967 | Disable duplicate buffer eviction | |
968 | .sp | |
969 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
970 | .RE | |
971 | ||
972 | .sp | |
973 | .ne 2 | |
974 | .na | |
975 | \fBzfs_expire_snapshot\fR (int) | |
976 | .ad | |
977 | .RS 12n | |
978 | Seconds to expire .zfs/snapshot | |
979 | .sp | |
980 | Default value: \fB300\fR. | |
981 | .RE | |
982 | ||
0500e835 BB |
983 | .sp |
984 | .ne 2 | |
985 | .na | |
986 | \fBzfs_admin_snapshot\fR (int) | |
987 | .ad | |
988 | .RS 12n | |
989 | Allow the creation, removal, or renaming of entries in the .zfs/snapshot | |
990 | directory to cause the creation, destruction, or renaming of snapshots. | |
991 | When enabled this functionality works both locally and over NFS exports | |
992 | which have the 'no_root_squash' option set. This functionality is disabled | |
993 | by default. | |
994 | .sp | |
995 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
996 | .RE | |
997 | ||
29714574 TF |
998 | .sp |
999 | .ne 2 | |
1000 | .na | |
1001 | \fBzfs_flags\fR (int) | |
1002 | .ad | |
1003 | .RS 12n | |
33b6dbbc NB |
1004 | Set additional debugging flags. The following flags may be bitwise-or'd |
1005 | together. | |
1006 | .sp | |
1007 | .TS | |
1008 | box; | |
1009 | rB lB | |
1010 | lB lB | |
1011 | r l. | |
1012 | Value Symbolic Name | |
1013 | Description | |
1014 | _ | |
1015 | 1 ZFS_DEBUG_DPRINTF | |
1016 | Enable dprintf entries in the debug log. | |
1017 | _ | |
1018 | 2 ZFS_DEBUG_DBUF_VERIFY * | |
1019 | Enable extra dbuf verifications. | |
1020 | _ | |
1021 | 4 ZFS_DEBUG_DNODE_VERIFY * | |
1022 | Enable extra dnode verifications. | |
1023 | _ | |
1024 | 8 ZFS_DEBUG_SNAPNAMES | |
1025 | Enable snapshot name verification. | |
1026 | _ | |
1027 | 16 ZFS_DEBUG_MODIFY | |
1028 | Check for illegally modified ARC buffers. | |
1029 | _ | |
1030 | 32 ZFS_DEBUG_SPA | |
1031 | Enable spa_dbgmsg entries in the debug log. | |
1032 | _ | |
1033 | 64 ZFS_DEBUG_ZIO_FREE | |
1034 | Enable verification of block frees. | |
1035 | _ | |
1036 | 128 ZFS_DEBUG_HISTOGRAM_VERIFY | |
1037 | Enable extra spacemap histogram verifications. | |
1038 | .TE | |
1039 | .sp | |
1040 | * Requires debug build. | |
29714574 | 1041 | .sp |
33b6dbbc | 1042 | Default value: \fB0\fR. |
29714574 TF |
1043 | .RE |
1044 | ||
fbeddd60 MA |
1045 | .sp |
1046 | .ne 2 | |
1047 | .na | |
1048 | \fBzfs_free_leak_on_eio\fR (int) | |
1049 | .ad | |
1050 | .RS 12n | |
1051 | If destroy encounters an EIO while reading metadata (e.g. indirect | |
1052 | blocks), space referenced by the missing metadata can not be freed. | |
1053 | Normally this causes the background destroy to become "stalled", as | |
1054 | it is unable to make forward progress. While in this stalled state, | |
1055 | all remaining space to free from the error-encountering filesystem is | |
1056 | "temporarily leaked". Set this flag to cause it to ignore the EIO, | |
1057 | permanently leak the space from indirect blocks that can not be read, | |
1058 | and continue to free everything else that it can. | |
1059 | ||
1060 | The default, "stalling" behavior is useful if the storage partially | |
1061 | fails (i.e. some but not all i/os fail), and then later recovers. In | |
1062 | this case, we will be able to continue pool operations while it is | |
1063 | partially failed, and when it recovers, we can continue to free the | |
1064 | space, with no leaks. However, note that this case is actually | |
1065 | fairly rare. | |
1066 | ||
1067 | Typically pools either (a) fail completely (but perhaps temporarily, | |
1068 | e.g. a top-level vdev going offline), or (b) have localized, | |
1069 | permanent errors (e.g. disk returns the wrong data due to bit flip or | |
1070 | firmware bug). In case (a), this setting does not matter because the | |
1071 | pool will be suspended and the sync thread will not be able to make | |
1072 | forward progress regardless. In case (b), because the error is | |
1073 | permanent, the best we can do is leak the minimum amount of space, | |
1074 | which is what setting this flag will do. Therefore, it is reasonable | |
1075 | for this flag to normally be set, but we chose the more conservative | |
1076 | approach of not setting it, so that there is no possibility of | |
1077 | leaking space in the "partial temporary" failure case. | |
1078 | .sp | |
1079 | Default value: \fB0\fR. | |
1080 | .RE | |
1081 | ||
29714574 TF |
1082 | .sp |
1083 | .ne 2 | |
1084 | .na | |
1085 | \fBzfs_free_min_time_ms\fR (int) | |
1086 | .ad | |
1087 | .RS 12n | |
1088 | Min millisecs to free per txg | |
1089 | .sp | |
1090 | Default value: \fB1,000\fR. | |
1091 | .RE | |
1092 | ||
1093 | .sp | |
1094 | .ne 2 | |
1095 | .na | |
1096 | \fBzfs_immediate_write_sz\fR (long) | |
1097 | .ad | |
1098 | .RS 12n | |
1099 | Largest data block to write to zil | |
1100 | .sp | |
1101 | Default value: \fB32,768\fR. | |
1102 | .RE | |
1103 | ||
f1512ee6 MA |
1104 | .sp |
1105 | .ne 2 | |
1106 | .na | |
1107 | \fBzfs_max_recordsize\fR (int) | |
1108 | .ad | |
1109 | .RS 12n | |
1110 | We currently support block sizes from 512 bytes to 16MB. The benefits of | |
1111 | larger blocks, and thus larger IO, need to be weighed against the cost of | |
1112 | COWing a giant block to modify one byte. Additionally, very large blocks | |
1113 | can have an impact on i/o latency, and also potentially on the memory | |
1114 | allocator. Therefore, we do not allow the recordsize to be set larger than | |
1115 | zfs_max_recordsize (default 1MB). Larger blocks can be created by changing | |
1116 | this tunable, and pools with larger blocks can always be imported and used, | |
1117 | regardless of this setting. | |
1118 | .sp | |
1119 | Default value: \fB1,048,576\fR. | |
1120 | .RE | |
1121 | ||
29714574 TF |
1122 | .sp |
1123 | .ne 2 | |
1124 | .na | |
1125 | \fBzfs_mdcomp_disable\fR (int) | |
1126 | .ad | |
1127 | .RS 12n | |
1128 | Disable meta data compression | |
1129 | .sp | |
1130 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
1131 | .RE | |
1132 | ||
f3a7f661 GW |
1133 | .sp |
1134 | .ne 2 | |
1135 | .na | |
1136 | \fBzfs_metaslab_fragmentation_threshold\fR (int) | |
1137 | .ad | |
1138 | .RS 12n | |
1139 | Allow metaslabs to keep their active state as long as their fragmentation | |
1140 | percentage is less than or equal to this value. An active metaslab that | |
1141 | exceeds this threshold will no longer keep its active status allowing | |
1142 | better metaslabs to be selected. | |
1143 | .sp | |
1144 | Default value: \fB70\fR. | |
1145 | .RE | |
1146 | ||
1147 | .sp | |
1148 | .ne 2 | |
1149 | .na | |
1150 | \fBzfs_mg_fragmentation_threshold\fR (int) | |
1151 | .ad | |
1152 | .RS 12n | |
1153 | Metaslab groups are considered eligible for allocations if their | |
1154 | fragmenation metric (measured as a percentage) is less than or equal to | |
1155 | this value. If a metaslab group exceeds this threshold then it will be | |
1156 | skipped unless all metaslab groups within the metaslab class have also | |
1157 | crossed this threshold. | |
1158 | .sp | |
1159 | Default value: \fB85\fR. | |
1160 | .RE | |
1161 | ||
f4a4046b TC |
1162 | .sp |
1163 | .ne 2 | |
1164 | .na | |
1165 | \fBzfs_mg_noalloc_threshold\fR (int) | |
1166 | .ad | |
1167 | .RS 12n | |
1168 | Defines a threshold at which metaslab groups should be eligible for | |
1169 | allocations. The value is expressed as a percentage of free space | |
1170 | beyond which a metaslab group is always eligible for allocations. | |
1171 | If a metaslab group's free space is less than or equal to the | |
6b4e21c6 | 1172 | threshold, the allocator will avoid allocating to that group |
f4a4046b TC |
1173 | unless all groups in the pool have reached the threshold. Once all |
1174 | groups have reached the threshold, all groups are allowed to accept | |
1175 | allocations. The default value of 0 disables the feature and causes | |
1176 | all metaslab groups to be eligible for allocations. | |
1177 | ||
1178 | This parameter allows to deal with pools having heavily imbalanced | |
1179 | vdevs such as would be the case when a new vdev has been added. | |
1180 | Setting the threshold to a non-zero percentage will stop allocations | |
1181 | from being made to vdevs that aren't filled to the specified percentage | |
1182 | and allow lesser filled vdevs to acquire more allocations than they | |
1183 | otherwise would under the old \fBzfs_mg_alloc_failures\fR facility. | |
1184 | .sp | |
1185 | Default value: \fB0\fR. | |
1186 | .RE | |
1187 | ||
29714574 TF |
1188 | .sp |
1189 | .ne 2 | |
1190 | .na | |
1191 | \fBzfs_no_scrub_io\fR (int) | |
1192 | .ad | |
1193 | .RS 12n | |
1194 | Set for no scrub I/O | |
1195 | .sp | |
1196 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
1197 | .RE | |
1198 | ||
1199 | .sp | |
1200 | .ne 2 | |
1201 | .na | |
1202 | \fBzfs_no_scrub_prefetch\fR (int) | |
1203 | .ad | |
1204 | .RS 12n | |
1205 | Set for no scrub prefetching | |
1206 | .sp | |
1207 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
1208 | .RE | |
1209 | ||
29714574 TF |
1210 | .sp |
1211 | .ne 2 | |
1212 | .na | |
1213 | \fBzfs_nocacheflush\fR (int) | |
1214 | .ad | |
1215 | .RS 12n | |
1216 | Disable cache flushes | |
1217 | .sp | |
1218 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
1219 | .RE | |
1220 | ||
1221 | .sp | |
1222 | .ne 2 | |
1223 | .na | |
1224 | \fBzfs_nopwrite_enabled\fR (int) | |
1225 | .ad | |
1226 | .RS 12n | |
1227 | Enable NOP writes | |
1228 | .sp | |
1229 | Use \fB1\fR for yes (default) and \fB0\fR to disable. | |
1230 | .RE | |
1231 | ||
1232 | .sp | |
1233 | .ne 2 | |
1234 | .na | |
b738bc5a | 1235 | \fBzfs_pd_bytes_max\fR (int) |
29714574 TF |
1236 | .ad |
1237 | .RS 12n | |
b738bc5a | 1238 | The number of bytes which should be prefetched. |
29714574 | 1239 | .sp |
74aa2ba2 | 1240 | Default value: \fB52,428,800\fR. |
29714574 TF |
1241 | .RE |
1242 | ||
1243 | .sp | |
1244 | .ne 2 | |
1245 | .na | |
1246 | \fBzfs_prefetch_disable\fR (int) | |
1247 | .ad | |
1248 | .RS 12n | |
1249 | Disable all ZFS prefetching | |
1250 | .sp | |
1251 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
1252 | .RE | |
1253 | ||
1254 | .sp | |
1255 | .ne 2 | |
1256 | .na | |
1257 | \fBzfs_read_chunk_size\fR (long) | |
1258 | .ad | |
1259 | .RS 12n | |
1260 | Bytes to read per chunk | |
1261 | .sp | |
1262 | Default value: \fB1,048,576\fR. | |
1263 | .RE | |
1264 | ||
1265 | .sp | |
1266 | .ne 2 | |
1267 | .na | |
1268 | \fBzfs_read_history\fR (int) | |
1269 | .ad | |
1270 | .RS 12n | |
1271 | Historic statistics for the last N reads | |
1272 | .sp | |
1273 | Default value: \fB0\fR. | |
1274 | .RE | |
1275 | ||
1276 | .sp | |
1277 | .ne 2 | |
1278 | .na | |
1279 | \fBzfs_read_history_hits\fR (int) | |
1280 | .ad | |
1281 | .RS 12n | |
1282 | Include cache hits in read history | |
1283 | .sp | |
1284 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
1285 | .RE | |
1286 | ||
1287 | .sp | |
1288 | .ne 2 | |
1289 | .na | |
1290 | \fBzfs_recover\fR (int) | |
1291 | .ad | |
1292 | .RS 12n | |
1293 | Set to attempt to recover from fatal errors. This should only be used as a | |
1294 | last resort, as it typically results in leaked space, or worse. | |
1295 | .sp | |
1296 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
1297 | .RE | |
1298 | ||
1299 | .sp | |
1300 | .ne 2 | |
1301 | .na | |
1302 | \fBzfs_resilver_delay\fR (int) | |
1303 | .ad | |
1304 | .RS 12n | |
27b293be TC |
1305 | Number of ticks to delay prior to issuing a resilver I/O operation when |
1306 | a non-resilver or non-scrub I/O operation has occurred within the past | |
1307 | \fBzfs_scan_idle\fR ticks. | |
29714574 TF |
1308 | .sp |
1309 | Default value: \fB2\fR. | |
1310 | .RE | |
1311 | ||
1312 | .sp | |
1313 | .ne 2 | |
1314 | .na | |
1315 | \fBzfs_resilver_min_time_ms\fR (int) | |
1316 | .ad | |
1317 | .RS 12n | |
1318 | Min millisecs to resilver per txg | |
1319 | .sp | |
1320 | Default value: \fB3,000\fR. | |
1321 | .RE | |
1322 | ||
1323 | .sp | |
1324 | .ne 2 | |
1325 | .na | |
1326 | \fBzfs_scan_idle\fR (int) | |
1327 | .ad | |
1328 | .RS 12n | |
27b293be TC |
1329 | Idle window in clock ticks. During a scrub or a resilver, if |
1330 | a non-scrub or non-resilver I/O operation has occurred during this | |
1331 | window, the next scrub or resilver operation is delayed by, respectively | |
1332 | \fBzfs_scrub_delay\fR or \fBzfs_resilver_delay\fR ticks. | |
29714574 TF |
1333 | .sp |
1334 | Default value: \fB50\fR. | |
1335 | .RE | |
1336 | ||
1337 | .sp | |
1338 | .ne 2 | |
1339 | .na | |
1340 | \fBzfs_scan_min_time_ms\fR (int) | |
1341 | .ad | |
1342 | .RS 12n | |
1343 | Min millisecs to scrub per txg | |
1344 | .sp | |
1345 | Default value: \fB1,000\fR. | |
1346 | .RE | |
1347 | ||
1348 | .sp | |
1349 | .ne 2 | |
1350 | .na | |
1351 | \fBzfs_scrub_delay\fR (int) | |
1352 | .ad | |
1353 | .RS 12n | |
27b293be TC |
1354 | Number of ticks to delay prior to issuing a scrub I/O operation when |
1355 | a non-scrub or non-resilver I/O operation has occurred within the past | |
1356 | \fBzfs_scan_idle\fR ticks. | |
29714574 TF |
1357 | .sp |
1358 | Default value: \fB4\fR. | |
1359 | .RE | |
1360 | ||
fd8febbd TF |
1361 | .sp |
1362 | .ne 2 | |
1363 | .na | |
1364 | \fBzfs_send_corrupt_data\fR (int) | |
1365 | .ad | |
1366 | .RS 12n | |
1367 | Allow to send corrupt data (ignore read/checksum errors when sending data) | |
1368 | .sp | |
1369 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
1370 | .RE | |
1371 | ||
29714574 TF |
1372 | .sp |
1373 | .ne 2 | |
1374 | .na | |
1375 | \fBzfs_sync_pass_deferred_free\fR (int) | |
1376 | .ad | |
1377 | .RS 12n | |
1378 | Defer frees starting in this pass | |
1379 | .sp | |
1380 | Default value: \fB2\fR. | |
1381 | .RE | |
1382 | ||
1383 | .sp | |
1384 | .ne 2 | |
1385 | .na | |
1386 | \fBzfs_sync_pass_dont_compress\fR (int) | |
1387 | .ad | |
1388 | .RS 12n | |
1389 | Don't compress starting in this pass | |
1390 | .sp | |
1391 | Default value: \fB5\fR. | |
1392 | .RE | |
1393 | ||
1394 | .sp | |
1395 | .ne 2 | |
1396 | .na | |
1397 | \fBzfs_sync_pass_rewrite\fR (int) | |
1398 | .ad | |
1399 | .RS 12n | |
1400 | Rewrite new bps starting in this pass | |
1401 | .sp | |
1402 | Default value: \fB2\fR. | |
1403 | .RE | |
1404 | ||
1405 | .sp | |
1406 | .ne 2 | |
1407 | .na | |
1408 | \fBzfs_top_maxinflight\fR (int) | |
1409 | .ad | |
1410 | .RS 12n | |
27b293be | 1411 | Max I/Os per top-level vdev during scrub or resilver operations. |
29714574 TF |
1412 | .sp |
1413 | Default value: \fB32\fR. | |
1414 | .RE | |
1415 | ||
1416 | .sp | |
1417 | .ne 2 | |
1418 | .na | |
1419 | \fBzfs_txg_history\fR (int) | |
1420 | .ad | |
1421 | .RS 12n | |
1422 | Historic statistics for the last N txgs | |
1423 | .sp | |
1424 | Default value: \fB0\fR. | |
1425 | .RE | |
1426 | ||
29714574 TF |
1427 | .sp |
1428 | .ne 2 | |
1429 | .na | |
1430 | \fBzfs_txg_timeout\fR (int) | |
1431 | .ad | |
1432 | .RS 12n | |
1433 | Max seconds worth of delta per txg | |
1434 | .sp | |
1435 | Default value: \fB5\fR. | |
1436 | .RE | |
1437 | ||
1438 | .sp | |
1439 | .ne 2 | |
1440 | .na | |
1441 | \fBzfs_vdev_aggregation_limit\fR (int) | |
1442 | .ad | |
1443 | .RS 12n | |
1444 | Max vdev I/O aggregation size | |
1445 | .sp | |
1446 | Default value: \fB131,072\fR. | |
1447 | .RE | |
1448 | ||
1449 | .sp | |
1450 | .ne 2 | |
1451 | .na | |
1452 | \fBzfs_vdev_cache_bshift\fR (int) | |
1453 | .ad | |
1454 | .RS 12n | |
1455 | Shift size to inflate reads too | |
1456 | .sp | |
1457 | Default value: \fB16\fR. | |
1458 | .RE | |
1459 | ||
1460 | .sp | |
1461 | .ne 2 | |
1462 | .na | |
1463 | \fBzfs_vdev_cache_max\fR (int) | |
1464 | .ad | |
1465 | .RS 12n | |
1466 | Inflate reads small than max | |
1467 | .RE | |
1468 | ||
1469 | .sp | |
1470 | .ne 2 | |
1471 | .na | |
1472 | \fBzfs_vdev_cache_size\fR (int) | |
1473 | .ad | |
1474 | .RS 12n | |
1475 | Total size of the per-disk cache | |
1476 | .sp | |
1477 | Default value: \fB0\fR. | |
1478 | .RE | |
1479 | ||
29714574 TF |
1480 | .sp |
1481 | .ne 2 | |
1482 | .na | |
1483 | \fBzfs_vdev_mirror_switch_us\fR (int) | |
1484 | .ad | |
1485 | .RS 12n | |
1486 | Switch mirrors every N usecs | |
1487 | .sp | |
1488 | Default value: \fB10,000\fR. | |
1489 | .RE | |
1490 | ||
29714574 TF |
1491 | .sp |
1492 | .ne 2 | |
1493 | .na | |
1494 | \fBzfs_vdev_read_gap_limit\fR (int) | |
1495 | .ad | |
1496 | .RS 12n | |
1497 | Aggregate read I/O over gap | |
1498 | .sp | |
1499 | Default value: \fB32,768\fR. | |
1500 | .RE | |
1501 | ||
1502 | .sp | |
1503 | .ne 2 | |
1504 | .na | |
1505 | \fBzfs_vdev_scheduler\fR (charp) | |
1506 | .ad | |
1507 | .RS 12n | |
1508 | I/O scheduler | |
1509 | .sp | |
1510 | Default value: \fBnoop\fR. | |
1511 | .RE | |
1512 | ||
29714574 TF |
1513 | .sp |
1514 | .ne 2 | |
1515 | .na | |
1516 | \fBzfs_vdev_write_gap_limit\fR (int) | |
1517 | .ad | |
1518 | .RS 12n | |
1519 | Aggregate write I/O over gap | |
1520 | .sp | |
1521 | Default value: \fB4,096\fR. | |
1522 | .RE | |
1523 | ||
29714574 TF |
1524 | .sp |
1525 | .ne 2 | |
1526 | .na | |
1527 | \fBzfs_zevent_cols\fR (int) | |
1528 | .ad | |
1529 | .RS 12n | |
1530 | Max event column width | |
1531 | .sp | |
1532 | Default value: \fB80\fR. | |
1533 | .RE | |
1534 | ||
1535 | .sp | |
1536 | .ne 2 | |
1537 | .na | |
1538 | \fBzfs_zevent_console\fR (int) | |
1539 | .ad | |
1540 | .RS 12n | |
1541 | Log events to the console | |
1542 | .sp | |
1543 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
1544 | .RE | |
1545 | ||
1546 | .sp | |
1547 | .ne 2 | |
1548 | .na | |
1549 | \fBzfs_zevent_len_max\fR (int) | |
1550 | .ad | |
1551 | .RS 12n | |
1552 | Max event queue length | |
1553 | .sp | |
1554 | Default value: \fB0\fR. | |
1555 | .RE | |
1556 | ||
1557 | .sp | |
1558 | .ne 2 | |
1559 | .na | |
1560 | \fBzil_replay_disable\fR (int) | |
1561 | .ad | |
1562 | .RS 12n | |
1563 | Disable intent logging replay | |
1564 | .sp | |
1565 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
1566 | .RE | |
1567 | ||
1568 | .sp | |
1569 | .ne 2 | |
1570 | .na | |
1571 | \fBzil_slog_limit\fR (ulong) | |
1572 | .ad | |
1573 | .RS 12n | |
1574 | Max commit bytes to separate log device | |
1575 | .sp | |
1576 | Default value: \fB1,048,576\fR. | |
1577 | .RE | |
1578 | ||
29714574 TF |
1579 | .sp |
1580 | .ne 2 | |
1581 | .na | |
1582 | \fBzio_delay_max\fR (int) | |
1583 | .ad | |
1584 | .RS 12n | |
6b4e21c6 | 1585 | Max zio millisecond delay before posting event |
29714574 TF |
1586 | .sp |
1587 | Default value: \fB30,000\fR. | |
1588 | .RE | |
1589 | ||
29714574 TF |
1590 | .sp |
1591 | .ne 2 | |
1592 | .na | |
1593 | \fBzio_requeue_io_start_cut_in_line\fR (int) | |
1594 | .ad | |
1595 | .RS 12n | |
1596 | Prioritize requeued I/O | |
1597 | .sp | |
1598 | Default value: \fB0\fR. | |
1599 | .RE | |
1600 | ||
dcb6bed1 D |
1601 | .sp |
1602 | .ne 2 | |
1603 | .na | |
1604 | \fBzio_taskq_batch_pct\fR (uint) | |
1605 | .ad | |
1606 | .RS 12n | |
1607 | Percentage of online CPUs (or CPU cores, etc) which will run a worker thread | |
1608 | for IO. These workers are responsible for IO work such as compression and | |
1609 | checksum calculations. Fractional number of CPUs will be rounded down. | |
1610 | .sp | |
1611 | The default value of 75 was chosen to avoid using all CPUs which can result in | |
1612 | latency issues and inconsistent application performance, especially when high | |
1613 | compression is enabled. | |
1614 | .sp | |
1615 | Default value: \fB75\fR. | |
1616 | .RE | |
1617 | ||
29714574 TF |
1618 | .sp |
1619 | .ne 2 | |
1620 | .na | |
1621 | \fBzvol_inhibit_dev\fR (uint) | |
1622 | .ad | |
1623 | .RS 12n | |
1624 | Do not create zvol device nodes | |
1625 | .sp | |
1626 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
1627 | .RE | |
1628 | ||
1629 | .sp | |
1630 | .ne 2 | |
1631 | .na | |
1632 | \fBzvol_major\fR (uint) | |
1633 | .ad | |
1634 | .RS 12n | |
1635 | Major number for zvol device | |
1636 | .sp | |
1637 | Default value: \fB230\fR. | |
1638 | .RE | |
1639 | ||
1640 | .sp | |
1641 | .ne 2 | |
1642 | .na | |
1643 | \fBzvol_max_discard_blocks\fR (ulong) | |
1644 | .ad | |
1645 | .RS 12n | |
1646 | Max number of blocks to discard at once | |
1647 | .sp | |
1648 | Default value: \fB16,384\fR. | |
1649 | .RE | |
1650 | ||
9965059a BB |
1651 | .sp |
1652 | .ne 2 | |
1653 | .na | |
1654 | \fBzvol_prefetch_bytes\fR (uint) | |
1655 | .ad | |
1656 | .RS 12n | |
1657 | When adding a zvol to the system prefetch \fBzvol_prefetch_bytes\fR | |
1658 | from the start and end of the volume. Prefetching these regions | |
1659 | of the volume is desirable because they are likely to be accessed | |
1660 | immediately by \fBblkid(8)\fR or by the kernel scanning for a partition | |
1661 | table. | |
1662 | .sp | |
1663 | Default value: \fB131,072\fR. | |
1664 | .RE | |
1665 | ||
e8b96c60 MA |
1666 | .SH ZFS I/O SCHEDULER |
1667 | ZFS issues I/O operations to leaf vdevs to satisfy and complete I/Os. | |
1668 | The I/O scheduler determines when and in what order those operations are | |
1669 | issued. The I/O scheduler divides operations into five I/O classes | |
1670 | prioritized in the following order: sync read, sync write, async read, | |
1671 | async write, and scrub/resilver. Each queue defines the minimum and | |
1672 | maximum number of concurrent operations that may be issued to the | |
1673 | device. In addition, the device has an aggregate maximum, | |
1674 | \fBzfs_vdev_max_active\fR. Note that the sum of the per-queue minimums | |
1675 | must not exceed the aggregate maximum. If the sum of the per-queue | |
1676 | maximums exceeds the aggregate maximum, then the number of active I/Os | |
1677 | may reach \fBzfs_vdev_max_active\fR, in which case no further I/Os will | |
1678 | be issued regardless of whether all per-queue minimums have been met. | |
1679 | .sp | |
1680 | For many physical devices, throughput increases with the number of | |
1681 | concurrent operations, but latency typically suffers. Further, physical | |
1682 | devices typically have a limit at which more concurrent operations have no | |
1683 | effect on throughput or can actually cause it to decrease. | |
1684 | .sp | |
1685 | The scheduler selects the next operation to issue by first looking for an | |
1686 | I/O class whose minimum has not been satisfied. Once all are satisfied and | |
1687 | the aggregate maximum has not been hit, the scheduler looks for classes | |
1688 | whose maximum has not been satisfied. Iteration through the I/O classes is | |
1689 | done in the order specified above. No further operations are issued if the | |
1690 | aggregate maximum number of concurrent operations has been hit or if there | |
1691 | are no operations queued for an I/O class that has not hit its maximum. | |
1692 | Every time an I/O is queued or an operation completes, the I/O scheduler | |
1693 | looks for new operations to issue. | |
1694 | .sp | |
1695 | In general, smaller max_active's will lead to lower latency of synchronous | |
1696 | operations. Larger max_active's may lead to higher overall throughput, | |
1697 | depending on underlying storage. | |
1698 | .sp | |
1699 | The ratio of the queues' max_actives determines the balance of performance | |
1700 | between reads, writes, and scrubs. E.g., increasing | |
1701 | \fBzfs_vdev_scrub_max_active\fR will cause the scrub or resilver to complete | |
1702 | more quickly, but reads and writes to have higher latency and lower throughput. | |
1703 | .sp | |
1704 | All I/O classes have a fixed maximum number of outstanding operations | |
1705 | except for the async write class. Asynchronous writes represent the data | |
1706 | that is committed to stable storage during the syncing stage for | |
1707 | transaction groups. Transaction groups enter the syncing state | |
1708 | periodically so the number of queued async writes will quickly burst up | |
1709 | and then bleed down to zero. Rather than servicing them as quickly as | |
1710 | possible, the I/O scheduler changes the maximum number of active async | |
1711 | write I/Os according to the amount of dirty data in the pool. Since | |
1712 | both throughput and latency typically increase with the number of | |
1713 | concurrent operations issued to physical devices, reducing the | |
1714 | burstiness in the number of concurrent operations also stabilizes the | |
1715 | response time of operations from other -- and in particular synchronous | |
1716 | -- queues. In broad strokes, the I/O scheduler will issue more | |
1717 | concurrent operations from the async write queue as there's more dirty | |
1718 | data in the pool. | |
1719 | .sp | |
1720 | Async Writes | |
1721 | .sp | |
1722 | The number of concurrent operations issued for the async write I/O class | |
1723 | follows a piece-wise linear function defined by a few adjustable points. | |
1724 | .nf | |
1725 | ||
1726 | | o---------| <-- zfs_vdev_async_write_max_active | |
1727 | ^ | /^ | | |
1728 | | | / | | | |
1729 | active | / | | | |
1730 | I/O | / | | | |
1731 | count | / | | | |
1732 | | / | | | |
1733 | |-------o | | <-- zfs_vdev_async_write_min_active | |
1734 | 0|_______^______|_________| | |
1735 | 0% | | 100% of zfs_dirty_data_max | |
1736 | | | | |
1737 | | `-- zfs_vdev_async_write_active_max_dirty_percent | |
1738 | `--------- zfs_vdev_async_write_active_min_dirty_percent | |
1739 | ||
1740 | .fi | |
1741 | Until the amount of dirty data exceeds a minimum percentage of the dirty | |
1742 | data allowed in the pool, the I/O scheduler will limit the number of | |
1743 | concurrent operations to the minimum. As that threshold is crossed, the | |
1744 | number of concurrent operations issued increases linearly to the maximum at | |
1745 | the specified maximum percentage of the dirty data allowed in the pool. | |
1746 | .sp | |
1747 | Ideally, the amount of dirty data on a busy pool will stay in the sloped | |
1748 | part of the function between \fBzfs_vdev_async_write_active_min_dirty_percent\fR | |
1749 | and \fBzfs_vdev_async_write_active_max_dirty_percent\fR. If it exceeds the | |
1750 | maximum percentage, this indicates that the rate of incoming data is | |
1751 | greater than the rate that the backend storage can handle. In this case, we | |
1752 | must further throttle incoming writes, as described in the next section. | |
1753 | ||
1754 | .SH ZFS TRANSACTION DELAY | |
1755 | We delay transactions when we've determined that the backend storage | |
1756 | isn't able to accommodate the rate of incoming writes. | |
1757 | .sp | |
1758 | If there is already a transaction waiting, we delay relative to when | |
1759 | that transaction will finish waiting. This way the calculated delay time | |
1760 | is independent of the number of threads concurrently executing | |
1761 | transactions. | |
1762 | .sp | |
1763 | If we are the only waiter, wait relative to when the transaction | |
1764 | started, rather than the current time. This credits the transaction for | |
1765 | "time already served", e.g. reading indirect blocks. | |
1766 | .sp | |
1767 | The minimum time for a transaction to take is calculated as: | |
1768 | .nf | |
1769 | min_time = zfs_delay_scale * (dirty - min) / (max - dirty) | |
1770 | min_time is then capped at 100 milliseconds. | |
1771 | .fi | |
1772 | .sp | |
1773 | The delay has two degrees of freedom that can be adjusted via tunables. The | |
1774 | percentage of dirty data at which we start to delay is defined by | |
1775 | \fBzfs_delay_min_dirty_percent\fR. This should typically be at or above | |
1776 | \fBzfs_vdev_async_write_active_max_dirty_percent\fR so that we only start to | |
1777 | delay after writing at full speed has failed to keep up with the incoming write | |
1778 | rate. The scale of the curve is defined by \fBzfs_delay_scale\fR. Roughly speaking, | |
1779 | this variable determines the amount of delay at the midpoint of the curve. | |
1780 | .sp | |
1781 | .nf | |
1782 | delay | |
1783 | 10ms +-------------------------------------------------------------*+ | |
1784 | | *| | |
1785 | 9ms + *+ | |
1786 | | *| | |
1787 | 8ms + *+ | |
1788 | | * | | |
1789 | 7ms + * + | |
1790 | | * | | |
1791 | 6ms + * + | |
1792 | | * | | |
1793 | 5ms + * + | |
1794 | | * | | |
1795 | 4ms + * + | |
1796 | | * | | |
1797 | 3ms + * + | |
1798 | | * | | |
1799 | 2ms + (midpoint) * + | |
1800 | | | ** | | |
1801 | 1ms + v *** + | |
1802 | | zfs_delay_scale ----------> ******** | | |
1803 | 0 +-------------------------------------*********----------------+ | |
1804 | 0% <- zfs_dirty_data_max -> 100% | |
1805 | .fi | |
1806 | .sp | |
1807 | Note that since the delay is added to the outstanding time remaining on the | |
1808 | most recent transaction, the delay is effectively the inverse of IOPS. | |
1809 | Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve | |
1810 | was chosen such that small changes in the amount of accumulated dirty data | |
1811 | in the first 3/4 of the curve yield relatively small differences in the | |
1812 | amount of delay. | |
1813 | .sp | |
1814 | The effects can be easier to understand when the amount of delay is | |
1815 | represented on a log scale: | |
1816 | .sp | |
1817 | .nf | |
1818 | delay | |
1819 | 100ms +-------------------------------------------------------------++ | |
1820 | + + | |
1821 | | | | |
1822 | + *+ | |
1823 | 10ms + *+ | |
1824 | + ** + | |
1825 | | (midpoint) ** | | |
1826 | + | ** + | |
1827 | 1ms + v **** + | |
1828 | + zfs_delay_scale ----------> ***** + | |
1829 | | **** | | |
1830 | + **** + | |
1831 | 100us + ** + | |
1832 | + * + | |
1833 | | * | | |
1834 | + * + | |
1835 | 10us + * + | |
1836 | + + | |
1837 | | | | |
1838 | + + | |
1839 | +--------------------------------------------------------------+ | |
1840 | 0% <- zfs_dirty_data_max -> 100% | |
1841 | .fi | |
1842 | .sp | |
1843 | Note here that only as the amount of dirty data approaches its limit does | |
1844 | the delay start to increase rapidly. The goal of a properly tuned system | |
1845 | should be to keep the amount of dirty data out of that range by first | |
1846 | ensuring that the appropriate limits are set for the I/O scheduler to reach | |
1847 | optimal throughput on the backend storage, and then by changing the value | |
1848 | of \fBzfs_delay_scale\fR to increase the steepness of the curve. |