]>
Commit | Line | Data |
---|---|---|
1 | '\" te | |
2 | .\" Copyright (c) 2013 by Turbo Fredriksson <turbo@bayour.com>. All rights reserved. | |
3 | .\" The contents of this file are subject to the terms of the Common Development | |
4 | .\" and Distribution License (the "License"). You may not use this file except | |
5 | .\" in compliance with the License. You can obtain a copy of the license at | |
6 | .\" usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing. | |
7 | .\" | |
8 | .\" See the License for the specific language governing permissions and | |
9 | .\" limitations under the License. When distributing Covered Code, include this | |
10 | .\" CDDL HEADER in each file and include the License file at | |
11 | .\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this | |
12 | .\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your | |
13 | .\" own identifying information: | |
14 | .\" Portions Copyright [yyyy] [name of copyright owner] | |
15 | .TH ZFS-MODULE-PARAMETERS 5 "Nov 16, 2013" | |
16 | .SH NAME | |
17 | zfs\-module\-parameters \- ZFS module parameters | |
18 | .SH DESCRIPTION | |
19 | .sp | |
20 | .LP | |
21 | Description of the different parameters to the ZFS module. | |
22 | ||
23 | .SS "Module parameters" | |
24 | .sp | |
25 | .LP | |
26 | ||
27 | .sp | |
28 | .ne 2 | |
29 | .na | |
30 | \fBl2arc_feed_again\fR (int) | |
31 | .ad | |
32 | .RS 12n | |
33 | Turbo L2ARC warmup | |
34 | .sp | |
35 | Use \fB1\fR for yes (default) and \fB0\fR to disable. | |
36 | .RE | |
37 | ||
38 | .sp | |
39 | .ne 2 | |
40 | .na | |
41 | \fBl2arc_feed_min_ms\fR (ulong) | |
42 | .ad | |
43 | .RS 12n | |
44 | Min feed interval in milliseconds | |
45 | .sp | |
46 | Default value: \fB200\fR. | |
47 | .RE | |
48 | ||
49 | .sp | |
50 | .ne 2 | |
51 | .na | |
52 | \fBl2arc_feed_secs\fR (ulong) | |
53 | .ad | |
54 | .RS 12n | |
55 | Seconds between L2ARC writing | |
56 | .sp | |
57 | Default value: \fB1\fR. | |
58 | .RE | |
59 | ||
60 | .sp | |
61 | .ne 2 | |
62 | .na | |
63 | \fBl2arc_headroom\fR (ulong) | |
64 | .ad | |
65 | .RS 12n | |
66 | Number of max device writes to precache | |
67 | .sp | |
68 | Default value: \fB2\fR. | |
69 | .RE | |
70 | ||
71 | .sp | |
72 | .ne 2 | |
73 | .na | |
74 | \fBl2arc_headroom_boost\fR (ulong) | |
75 | .ad | |
76 | .RS 12n | |
77 | Compressed l2arc_headroom multiplier | |
78 | .sp | |
79 | Default value: \fB200\fR. | |
80 | .RE | |
81 | ||
82 | .sp | |
83 | .ne 2 | |
84 | .na | |
85 | \fBl2arc_nocompress\fR (int) | |
86 | .ad | |
87 | .RS 12n | |
88 | Skip compressing L2ARC buffers | |
89 | .sp | |
90 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
91 | .RE | |
92 | ||
93 | .sp | |
94 | .ne 2 | |
95 | .na | |
96 | \fBl2arc_noprefetch\fR (int) | |
97 | .ad | |
98 | .RS 12n | |
99 | Skip caching prefetched buffers | |
100 | .sp | |
101 | Use \fB1\fR for yes (default) and \fB0\fR to disable. | |
102 | .RE | |
103 | ||
104 | .sp | |
105 | .ne 2 | |
106 | .na | |
107 | \fBl2arc_norw\fR (int) | |
108 | .ad | |
109 | .RS 12n | |
110 | No reads during writes | |
111 | .sp | |
112 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
113 | .RE | |
114 | ||
115 | .sp | |
116 | .ne 2 | |
117 | .na | |
118 | \fBl2arc_write_boost\fR (ulong) | |
119 | .ad | |
120 | .RS 12n | |
121 | Extra write bytes during device warmup | |
122 | .sp | |
123 | Default value: \fB8,388,608\fR. | |
124 | .RE | |
125 | ||
126 | .sp | |
127 | .ne 2 | |
128 | .na | |
129 | \fBl2arc_write_max\fR (ulong) | |
130 | .ad | |
131 | .RS 12n | |
132 | Max write bytes per interval | |
133 | .sp | |
134 | Default value: \fB8,388,608\fR. | |
135 | .RE | |
136 | ||
137 | .sp | |
138 | .ne 2 | |
139 | .na | |
140 | \fBmetaslab_bias_enabled\fR (int) | |
141 | .ad | |
142 | .RS 12n | |
143 | Enable metaslab group biasing based on its vdev's over- or under-utilization | |
144 | relative to the pool. | |
145 | .sp | |
146 | Use \fB1\fR for yes (default) and \fB0\fR for no. | |
147 | .RE | |
148 | ||
149 | .sp | |
150 | .ne 2 | |
151 | .na | |
152 | \fBmetaslab_debug_load\fR (int) | |
153 | .ad | |
154 | .RS 12n | |
155 | Load all metaslabs during pool import. | |
156 | .sp | |
157 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
158 | .RE | |
159 | ||
160 | .sp | |
161 | .ne 2 | |
162 | .na | |
163 | \fBmetaslab_debug_unload\fR (int) | |
164 | .ad | |
165 | .RS 12n | |
166 | Prevent metaslabs from being unloaded. | |
167 | .sp | |
168 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
169 | .RE | |
170 | ||
171 | .sp | |
172 | .ne 2 | |
173 | .na | |
174 | \fBmetaslab_fragmentation_factor_enabled\fR (int) | |
175 | .ad | |
176 | .RS 12n | |
177 | Enable use of the fragmentation metric in computing metaslab weights. | |
178 | .sp | |
179 | Use \fB1\fR for yes (default) and \fB0\fR for no. | |
180 | .RE | |
181 | ||
182 | .sp | |
183 | .ne 2 | |
184 | .na | |
185 | \fBmetaslabs_per_vdev\fR (int) | |
186 | .ad | |
187 | .RS 12n | |
188 | When a vdev is added, it will be divided into approximately (but no more than) this number of metaslabs. | |
189 | .sp | |
190 | Default value: \fB200\fR. | |
191 | .RE | |
192 | ||
193 | .sp | |
194 | .ne 2 | |
195 | .na | |
196 | \fBmetaslab_preload_enabled\fR (int) | |
197 | .ad | |
198 | .RS 12n | |
199 | Enable metaslab group preloading. | |
200 | .sp | |
201 | Use \fB1\fR for yes (default) and \fB0\fR for no. | |
202 | .RE | |
203 | ||
204 | .sp | |
205 | .ne 2 | |
206 | .na | |
207 | \fBmetaslab_lba_weighting_enabled\fR (int) | |
208 | .ad | |
209 | .RS 12n | |
210 | Give more weight to metaslabs with lower LBAs, assuming they have | |
211 | greater bandwidth as is typically the case on a modern constant | |
212 | angular velocity disk drive. | |
213 | .sp | |
214 | Use \fB1\fR for yes (default) and \fB0\fR for no. | |
215 | .RE | |
216 | ||
217 | .sp | |
218 | .ne 2 | |
219 | .na | |
220 | \fBspa_config_path\fR (charp) | |
221 | .ad | |
222 | .RS 12n | |
223 | SPA config file | |
224 | .sp | |
225 | Default value: \fB/etc/zfs/zpool.cache\fR. | |
226 | .RE | |
227 | ||
228 | .sp | |
229 | .ne 2 | |
230 | .na | |
231 | \fBspa_asize_inflation\fR (int) | |
232 | .ad | |
233 | .RS 12n | |
234 | Multiplication factor used to estimate actual disk consumption from the | |
235 | size of data being written. The default value is a worst case estimate, | |
236 | but lower values may be valid for a given pool depending on its | |
237 | configuration. Pool administrators who understand the factors involved | |
238 | may wish to specify a more realistic inflation factor, particularly if | |
239 | they operate close to quota or capacity limits. | |
240 | .sp | |
241 | Default value: 24 | |
242 | .RE | |
243 | ||
244 | .sp | |
245 | .ne 2 | |
246 | .na | |
247 | \fBspa_load_verify_data\fR (int) | |
248 | .ad | |
249 | .RS 12n | |
250 | Whether to traverse data blocks during an "extreme rewind" (\fB-X\fR) | |
251 | import. Use 0 to disable and 1 to enable. | |
252 | ||
253 | An extreme rewind import normally performs a full traversal of all | |
254 | blocks in the pool for verification. If this parameter is set to 0, | |
255 | the traversal skips non-metadata blocks. It can be toggled once the | |
256 | import has started to stop or start the traversal of non-metadata blocks. | |
257 | .sp | |
258 | Default value: 1 | |
259 | .RE | |
260 | ||
261 | .sp | |
262 | .ne 2 | |
263 | .na | |
264 | \fBspa_load_verify_metadata\fR (int) | |
265 | .ad | |
266 | .RS 12n | |
267 | Whether to traverse blocks during an "extreme rewind" (\fB-X\fR) | |
268 | pool import. Use 0 to disable and 1 to enable. | |
269 | ||
270 | An extreme rewind import normally performs a full traversal of all | |
271 | blocks in the pool for verification. If this parameter is set to 1, | |
272 | the traversal is not performed. It can be toggled once the import has | |
273 | started to stop or start the traversal. | |
274 | .sp | |
275 | Default value: 1 | |
276 | .RE | |
277 | ||
278 | .sp | |
279 | .ne 2 | |
280 | .na | |
281 | \fBspa_load_verify_maxinflight\fR (int) | |
282 | .ad | |
283 | .RS 12n | |
284 | Maximum concurrent I/Os during the traversal performed during an "extreme | |
285 | rewind" (\fB-X\fR) pool import. | |
286 | .sp | |
287 | Default value: 10000 | |
288 | .RE | |
289 | ||
290 | .sp | |
291 | .ne 2 | |
292 | .na | |
293 | \fBzfetch_array_rd_sz\fR (ulong) | |
294 | .ad | |
295 | .RS 12n | |
296 | If prefetching is enabled, disable prefetching for reads larger than this size. | |
297 | .sp | |
298 | Default value: \fB1,048,576\fR. | |
299 | .RE | |
300 | ||
301 | .sp | |
302 | .ne 2 | |
303 | .na | |
304 | \fBzfetch_block_cap\fR (uint) | |
305 | .ad | |
306 | .RS 12n | |
307 | Max number of blocks to prefetch at a time | |
308 | .sp | |
309 | Default value: \fB256\fR. | |
310 | .RE | |
311 | ||
312 | .sp | |
313 | .ne 2 | |
314 | .na | |
315 | \fBzfetch_max_streams\fR (uint) | |
316 | .ad | |
317 | .RS 12n | |
318 | Max number of streams per zfetch (prefetch streams per file). | |
319 | .sp | |
320 | Default value: \fB8\fR. | |
321 | .RE | |
322 | ||
323 | .sp | |
324 | .ne 2 | |
325 | .na | |
326 | \fBzfetch_min_sec_reap\fR (uint) | |
327 | .ad | |
328 | .RS 12n | |
329 | Min time before an active prefetch stream can be reclaimed | |
330 | .sp | |
331 | Default value: \fB2\fR. | |
332 | .RE | |
333 | ||
334 | .sp | |
335 | .ne 2 | |
336 | .na | |
337 | \fBzfs_arc_average_blocksize\fR (int) | |
338 | .ad | |
339 | .RS 12n | |
340 | The ARC's buffer hash table is sized based on the assumption of an average | |
341 | block size of \fBzfs_arc_average_blocksize\fR (default 8K). This works out | |
342 | to roughly 1MB of hash table per 1GB of physical memory with 8-byte pointers. | |
343 | For configurations with a known larger average block size this value can be | |
344 | increased to reduce the memory footprint. | |
345 | ||
346 | .sp | |
347 | Default value: \fB8192\fR. | |
348 | .RE | |
349 | ||
350 | .sp | |
351 | .ne 2 | |
352 | .na | |
353 | \fBzfs_arc_grow_retry\fR (int) | |
354 | .ad | |
355 | .RS 12n | |
356 | Seconds before growing arc size | |
357 | .sp | |
358 | Default value: \fB5\fR. | |
359 | .RE | |
360 | ||
361 | .sp | |
362 | .ne 2 | |
363 | .na | |
364 | \fBzfs_arc_max\fR (ulong) | |
365 | .ad | |
366 | .RS 12n | |
367 | Max arc size | |
368 | .sp | |
369 | Default value: \fB0\fR. | |
370 | .RE | |
371 | ||
372 | .sp | |
373 | .ne 2 | |
374 | .na | |
375 | \fBzfs_arc_memory_throttle_disable\fR (int) | |
376 | .ad | |
377 | .RS 12n | |
378 | Disable memory throttle | |
379 | .sp | |
380 | Use \fB1\fR for yes (default) and \fB0\fR to disable. | |
381 | .RE | |
382 | ||
383 | .sp | |
384 | .ne 2 | |
385 | .na | |
386 | \fBzfs_arc_meta_limit\fR (ulong) | |
387 | .ad | |
388 | .RS 12n | |
389 | The maximum allowed size in bytes that meta data buffers are allowed to | |
390 | consume in the ARC. When this limit is reached meta data buffers will | |
391 | be reclaimed even if the overall arc_c_max has not been reached. This | |
392 | value defaults to 0 which indicates that 3/4 of the ARC may be used | |
393 | for meta data. | |
394 | .sp | |
395 | Default value: \fB0\fR. | |
396 | .RE | |
397 | ||
398 | .sp | |
399 | .ne 2 | |
400 | .na | |
401 | \fBzfs_arc_meta_prune\fR (int) | |
402 | .ad | |
403 | .RS 12n | |
404 | The number of dentries and inodes to be scanned looking for entries | |
405 | which can be dropped. This may be required when the ARC reaches the | |
406 | \fBzfs_arc_meta_limit\fR because dentries and inodes can pin buffers | |
407 | in the ARC. Increasing this value will cause to dentry and inode caches | |
408 | to be pruned more aggressively. Setting this value to 0 will disable | |
409 | pruning the inode and dentry caches. | |
410 | .sp | |
411 | Default value: \fB10,000\fR. | |
412 | .RE | |
413 | ||
414 | .sp | |
415 | .ne 2 | |
416 | .na | |
417 | \fBzfs_arc_meta_adjust_restarts\fR (ulong) | |
418 | .ad | |
419 | .RS 12n | |
420 | The number of restart passes to make while scanning the ARC attempting | |
421 | the free buffers in order to stay below the \fBzfs_arc_meta_limit\fR. | |
422 | This value should not need to be tuned but is available to facilitate | |
423 | performance analysis. | |
424 | .sp | |
425 | Default value: \fB4096\fR. | |
426 | .RE | |
427 | ||
428 | .sp | |
429 | .ne 2 | |
430 | .na | |
431 | \fBzfs_arc_min\fR (ulong) | |
432 | .ad | |
433 | .RS 12n | |
434 | Min arc size | |
435 | .sp | |
436 | Default value: \fB100\fR. | |
437 | .RE | |
438 | ||
439 | .sp | |
440 | .ne 2 | |
441 | .na | |
442 | \fBzfs_arc_min_prefetch_lifespan\fR (int) | |
443 | .ad | |
444 | .RS 12n | |
445 | Min life of prefetch block | |
446 | .sp | |
447 | Default value: \fB100\fR. | |
448 | .RE | |
449 | ||
450 | .sp | |
451 | .ne 2 | |
452 | .na | |
453 | \fBzfs_arc_p_aggressive_disable\fR (int) | |
454 | .ad | |
455 | .RS 12n | |
456 | Disable aggressive arc_p growth | |
457 | .sp | |
458 | Use \fB1\fR for yes (default) and \fB0\fR to disable. | |
459 | .RE | |
460 | ||
461 | .sp | |
462 | .ne 2 | |
463 | .na | |
464 | \fBzfs_arc_p_dampener_disable\fR (int) | |
465 | .ad | |
466 | .RS 12n | |
467 | Disable arc_p adapt dampener | |
468 | .sp | |
469 | Use \fB1\fR for yes (default) and \fB0\fR to disable. | |
470 | .RE | |
471 | ||
472 | .sp | |
473 | .ne 2 | |
474 | .na | |
475 | \fBzfs_arc_shrink_shift\fR (int) | |
476 | .ad | |
477 | .RS 12n | |
478 | log2(fraction of arc to reclaim) | |
479 | .sp | |
480 | Default value: \fB5\fR. | |
481 | .RE | |
482 | ||
483 | .sp | |
484 | .ne 2 | |
485 | .na | |
486 | \fBzfs_autoimport_disable\fR (int) | |
487 | .ad | |
488 | .RS 12n | |
489 | Disable pool import at module load by ignoring the cache file (typically \fB/etc/zfs/zpool.cache\fR). | |
490 | .sp | |
491 | Use \fB1\fR for yes (default) and \fB0\fR for no. | |
492 | .RE | |
493 | ||
494 | .sp | |
495 | .ne 2 | |
496 | .na | |
497 | \fBzfs_dbuf_state_index\fR (int) | |
498 | .ad | |
499 | .RS 12n | |
500 | Calculate arc header index | |
501 | .sp | |
502 | Default value: \fB0\fR. | |
503 | .RE | |
504 | ||
505 | .sp | |
506 | .ne 2 | |
507 | .na | |
508 | \fBzfs_deadman_enabled\fR (int) | |
509 | .ad | |
510 | .RS 12n | |
511 | Enable deadman timer | |
512 | .sp | |
513 | Use \fB1\fR for yes (default) and \fB0\fR to disable. | |
514 | .RE | |
515 | ||
516 | .sp | |
517 | .ne 2 | |
518 | .na | |
519 | \fBzfs_deadman_synctime_ms\fR (ulong) | |
520 | .ad | |
521 | .RS 12n | |
522 | Expiration time in milliseconds. This value has two meanings. First it is | |
523 | used to determine when the spa_deadman() logic should fire. By default the | |
524 | spa_deadman() will fire if spa_sync() has not completed in 1000 seconds. | |
525 | Secondly, the value determines if an I/O is considered "hung". Any I/O that | |
526 | has not completed in zfs_deadman_synctime_ms is considered "hung" resulting | |
527 | in a zevent being logged. | |
528 | .sp | |
529 | Default value: \fB1,000,000\fR. | |
530 | .RE | |
531 | ||
532 | .sp | |
533 | .ne 2 | |
534 | .na | |
535 | \fBzfs_dedup_prefetch\fR (int) | |
536 | .ad | |
537 | .RS 12n | |
538 | Enable prefetching dedup-ed blks | |
539 | .sp | |
540 | Use \fB1\fR for yes and \fB0\fR to disable (default). | |
541 | .RE | |
542 | ||
543 | .sp | |
544 | .ne 2 | |
545 | .na | |
546 | \fBzfs_delay_min_dirty_percent\fR (int) | |
547 | .ad | |
548 | .RS 12n | |
549 | Start to delay each transaction once there is this amount of dirty data, | |
550 | expressed as a percentage of \fBzfs_dirty_data_max\fR. | |
551 | This value should be >= zfs_vdev_async_write_active_max_dirty_percent. | |
552 | See the section "ZFS TRANSACTION DELAY". | |
553 | .sp | |
554 | Default value: \fB60\fR. | |
555 | .RE | |
556 | ||
557 | .sp | |
558 | .ne 2 | |
559 | .na | |
560 | \fBzfs_delay_scale\fR (int) | |
561 | .ad | |
562 | .RS 12n | |
563 | This controls how quickly the transaction delay approaches infinity. | |
564 | Larger values cause longer delays for a given amount of dirty data. | |
565 | .sp | |
566 | For the smoothest delay, this value should be about 1 billion divided | |
567 | by the maximum number of operations per second. This will smoothly | |
568 | handle between 10x and 1/10th this number. | |
569 | .sp | |
570 | See the section "ZFS TRANSACTION DELAY". | |
571 | .sp | |
572 | Note: \fBzfs_delay_scale\fR * \fBzfs_dirty_data_max\fR must be < 2^64. | |
573 | .sp | |
574 | Default value: \fB500,000\fR. | |
575 | .RE | |
576 | ||
577 | .sp | |
578 | .ne 2 | |
579 | .na | |
580 | \fBzfs_dirty_data_max\fR (int) | |
581 | .ad | |
582 | .RS 12n | |
583 | Determines the dirty space limit in bytes. Once this limit is exceeded, new | |
584 | writes are halted until space frees up. This parameter takes precedence | |
585 | over \fBzfs_dirty_data_max_percent\fR. | |
586 | See the section "ZFS TRANSACTION DELAY". | |
587 | .sp | |
588 | Default value: 10 percent of all memory, capped at \fBzfs_dirty_data_max_max\fR. | |
589 | .RE | |
590 | ||
591 | .sp | |
592 | .ne 2 | |
593 | .na | |
594 | \fBzfs_dirty_data_max_max\fR (int) | |
595 | .ad | |
596 | .RS 12n | |
597 | Maximum allowable value of \fBzfs_dirty_data_max\fR, expressed in bytes. | |
598 | This limit is only enforced at module load time, and will be ignored if | |
599 | \fBzfs_dirty_data_max\fR is later changed. This parameter takes | |
600 | precedence over \fBzfs_dirty_data_max_max_percent\fR. See the section | |
601 | "ZFS TRANSACTION DELAY". | |
602 | .sp | |
603 | Default value: 25% of physical RAM. | |
604 | .RE | |
605 | ||
606 | .sp | |
607 | .ne 2 | |
608 | .na | |
609 | \fBzfs_dirty_data_max_max_percent\fR (int) | |
610 | .ad | |
611 | .RS 12n | |
612 | Maximum allowable value of \fBzfs_dirty_data_max\fR, expressed as a | |
613 | percentage of physical RAM. This limit is only enforced at module load | |
614 | time, and will be ignored if \fBzfs_dirty_data_max\fR is later changed. | |
615 | The parameter \fBzfs_dirty_data_max_max\fR takes precedence over this | |
616 | one. See the section "ZFS TRANSACTION DELAY". | |
617 | .sp | |
618 | Default value: 25 | |
619 | .RE | |
620 | ||
621 | .sp | |
622 | .ne 2 | |
623 | .na | |
624 | \fBzfs_dirty_data_max_percent\fR (int) | |
625 | .ad | |
626 | .RS 12n | |
627 | Determines the dirty space limit, expressed as a percentage of all | |
628 | memory. Once this limit is exceeded, new writes are halted until space frees | |
629 | up. The parameter \fBzfs_dirty_data_max\fR takes precedence over this | |
630 | one. See the section "ZFS TRANSACTION DELAY". | |
631 | .sp | |
632 | Default value: 10%, subject to \fBzfs_dirty_data_max_max\fR. | |
633 | .RE | |
634 | ||
635 | .sp | |
636 | .ne 2 | |
637 | .na | |
638 | \fBzfs_dirty_data_sync\fR (int) | |
639 | .ad | |
640 | .RS 12n | |
641 | Start syncing out a transaction group if there is at least this much dirty data. | |
642 | .sp | |
643 | Default value: \fB67,108,864\fR. | |
644 | .RE | |
645 | ||
646 | .sp | |
647 | .ne 2 | |
648 | .na | |
649 | \fBzfs_free_max_blocks\fR (ulong) | |
650 | .ad | |
651 | .RS 12n | |
652 | Maximum number of blocks freed in a single txg. | |
653 | .sp | |
654 | Default value: \fB100,000\fR. | |
655 | .RE | |
656 | ||
657 | .sp | |
658 | .ne 2 | |
659 | .na | |
660 | \fBzfs_vdev_async_read_max_active\fR (int) | |
661 | .ad | |
662 | .RS 12n | |
663 | Maxium asynchronous read I/Os active to each device. | |
664 | See the section "ZFS I/O SCHEDULER". | |
665 | .sp | |
666 | Default value: \fB3\fR. | |
667 | .RE | |
668 | ||
669 | .sp | |
670 | .ne 2 | |
671 | .na | |
672 | \fBzfs_vdev_async_read_min_active\fR (int) | |
673 | .ad | |
674 | .RS 12n | |
675 | Minimum asynchronous read I/Os active to each device. | |
676 | See the section "ZFS I/O SCHEDULER". | |
677 | .sp | |
678 | Default value: \fB1\fR. | |
679 | .RE | |
680 | ||
681 | .sp | |
682 | .ne 2 | |
683 | .na | |
684 | \fBzfs_vdev_async_write_active_max_dirty_percent\fR (int) | |
685 | .ad | |
686 | .RS 12n | |
687 | When the pool has more than | |
688 | \fBzfs_vdev_async_write_active_max_dirty_percent\fR dirty data, use | |
689 | \fBzfs_vdev_async_write_max_active\fR to limit active async writes. If | |
690 | the dirty data is between min and max, the active I/O limit is linearly | |
691 | interpolated. See the section "ZFS I/O SCHEDULER". | |
692 | .sp | |
693 | Default value: \fB60\fR. | |
694 | .RE | |
695 | ||
696 | .sp | |
697 | .ne 2 | |
698 | .na | |
699 | \fBzfs_vdev_async_write_active_min_dirty_percent\fR (int) | |
700 | .ad | |
701 | .RS 12n | |
702 | When the pool has less than | |
703 | \fBzfs_vdev_async_write_active_min_dirty_percent\fR dirty data, use | |
704 | \fBzfs_vdev_async_write_min_active\fR to limit active async writes. If | |
705 | the dirty data is between min and max, the active I/O limit is linearly | |
706 | interpolated. See the section "ZFS I/O SCHEDULER". | |
707 | .sp | |
708 | Default value: \fB30\fR. | |
709 | .RE | |
710 | ||
711 | .sp | |
712 | .ne 2 | |
713 | .na | |
714 | \fBzfs_vdev_async_write_max_active\fR (int) | |
715 | .ad | |
716 | .RS 12n | |
717 | Maxium asynchronous write I/Os active to each device. | |
718 | See the section "ZFS I/O SCHEDULER". | |
719 | .sp | |
720 | Default value: \fB10\fR. | |
721 | .RE | |
722 | ||
723 | .sp | |
724 | .ne 2 | |
725 | .na | |
726 | \fBzfs_vdev_async_write_min_active\fR (int) | |
727 | .ad | |
728 | .RS 12n | |
729 | Minimum asynchronous write I/Os active to each device. | |
730 | See the section "ZFS I/O SCHEDULER". | |
731 | .sp | |
732 | Default value: \fB1\fR. | |
733 | .RE | |
734 | ||
735 | .sp | |
736 | .ne 2 | |
737 | .na | |
738 | \fBzfs_vdev_max_active\fR (int) | |
739 | .ad | |
740 | .RS 12n | |
741 | The maximum number of I/Os active to each device. Ideally, this will be >= | |
742 | the sum of each queue's max_active. It must be at least the sum of each | |
743 | queue's min_active. See the section "ZFS I/O SCHEDULER". | |
744 | .sp | |
745 | Default value: \fB1,000\fR. | |
746 | .RE | |
747 | ||
748 | .sp | |
749 | .ne 2 | |
750 | .na | |
751 | \fBzfs_vdev_scrub_max_active\fR (int) | |
752 | .ad | |
753 | .RS 12n | |
754 | Maxium scrub I/Os active to each device. | |
755 | See the section "ZFS I/O SCHEDULER". | |
756 | .sp | |
757 | Default value: \fB2\fR. | |
758 | .RE | |
759 | ||
760 | .sp | |
761 | .ne 2 | |
762 | .na | |
763 | \fBzfs_vdev_scrub_min_active\fR (int) | |
764 | .ad | |
765 | .RS 12n | |
766 | Minimum scrub I/Os active to each device. | |
767 | See the section "ZFS I/O SCHEDULER". | |
768 | .sp | |
769 | Default value: \fB1\fR. | |
770 | .RE | |
771 | ||
772 | .sp | |
773 | .ne 2 | |
774 | .na | |
775 | \fBzfs_vdev_sync_read_max_active\fR (int) | |
776 | .ad | |
777 | .RS 12n | |
778 | Maxium synchronous read I/Os active to each device. | |
779 | See the section "ZFS I/O SCHEDULER". | |
780 | .sp | |
781 | Default value: \fB10\fR. | |
782 | .RE | |
783 | ||
784 | .sp | |
785 | .ne 2 | |
786 | .na | |
787 | \fBzfs_vdev_sync_read_min_active\fR (int) | |
788 | .ad | |
789 | .RS 12n | |
790 | Minimum synchronous read I/Os active to each device. | |
791 | See the section "ZFS I/O SCHEDULER". | |
792 | .sp | |
793 | Default value: \fB10\fR. | |
794 | .RE | |
795 | ||
796 | .sp | |
797 | .ne 2 | |
798 | .na | |
799 | \fBzfs_vdev_sync_write_max_active\fR (int) | |
800 | .ad | |
801 | .RS 12n | |
802 | Maxium synchronous write I/Os active to each device. | |
803 | See the section "ZFS I/O SCHEDULER". | |
804 | .sp | |
805 | Default value: \fB10\fR. | |
806 | .RE | |
807 | ||
808 | .sp | |
809 | .ne 2 | |
810 | .na | |
811 | \fBzfs_vdev_sync_write_min_active\fR (int) | |
812 | .ad | |
813 | .RS 12n | |
814 | Minimum synchronous write I/Os active to each device. | |
815 | See the section "ZFS I/O SCHEDULER". | |
816 | .sp | |
817 | Default value: \fB10\fR. | |
818 | .RE | |
819 | ||
820 | .sp | |
821 | .ne 2 | |
822 | .na | |
823 | \fBzfs_disable_dup_eviction\fR (int) | |
824 | .ad | |
825 | .RS 12n | |
826 | Disable duplicate buffer eviction | |
827 | .sp | |
828 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
829 | .RE | |
830 | ||
831 | .sp | |
832 | .ne 2 | |
833 | .na | |
834 | \fBzfs_expire_snapshot\fR (int) | |
835 | .ad | |
836 | .RS 12n | |
837 | Seconds to expire .zfs/snapshot | |
838 | .sp | |
839 | Default value: \fB300\fR. | |
840 | .RE | |
841 | ||
842 | .sp | |
843 | .ne 2 | |
844 | .na | |
845 | \fBzfs_flags\fR (int) | |
846 | .ad | |
847 | .RS 12n | |
848 | Set additional debugging flags. The following flags may be bitwise-or'd | |
849 | together. | |
850 | .sp | |
851 | .TS | |
852 | box; | |
853 | rB lB | |
854 | lB lB | |
855 | r l. | |
856 | Value Symbolic Name | |
857 | Description | |
858 | _ | |
859 | 1 ZFS_DEBUG_DPRINTF | |
860 | Enable dprintf entries in the debug log. | |
861 | _ | |
862 | 2 ZFS_DEBUG_DBUF_VERIFY * | |
863 | Enable extra dbuf verifications. | |
864 | _ | |
865 | 4 ZFS_DEBUG_DNODE_VERIFY * | |
866 | Enable extra dnode verifications. | |
867 | _ | |
868 | 8 ZFS_DEBUG_SNAPNAMES | |
869 | Enable snapshot name verification. | |
870 | _ | |
871 | 16 ZFS_DEBUG_MODIFY | |
872 | Check for illegally modified ARC buffers. | |
873 | _ | |
874 | 32 ZFS_DEBUG_SPA | |
875 | Enable spa_dbgmsg entries in the debug log. | |
876 | _ | |
877 | 64 ZFS_DEBUG_ZIO_FREE | |
878 | Enable verification of block frees. | |
879 | _ | |
880 | 128 ZFS_DEBUG_HISTOGRAM_VERIFY | |
881 | Enable extra spacemap histogram verifications. | |
882 | .TE | |
883 | .sp | |
884 | * Requires debug build. | |
885 | .sp | |
886 | Default value: \fB0\fR. | |
887 | .RE | |
888 | ||
889 | .sp | |
890 | .ne 2 | |
891 | .na | |
892 | \fBzfs_free_leak_on_eio\fR (int) | |
893 | .ad | |
894 | .RS 12n | |
895 | If destroy encounters an EIO while reading metadata (e.g. indirect | |
896 | blocks), space referenced by the missing metadata can not be freed. | |
897 | Normally this causes the background destroy to become "stalled", as | |
898 | it is unable to make forward progress. While in this stalled state, | |
899 | all remaining space to free from the error-encountering filesystem is | |
900 | "temporarily leaked". Set this flag to cause it to ignore the EIO, | |
901 | permanently leak the space from indirect blocks that can not be read, | |
902 | and continue to free everything else that it can. | |
903 | ||
904 | The default, "stalling" behavior is useful if the storage partially | |
905 | fails (i.e. some but not all i/os fail), and then later recovers. In | |
906 | this case, we will be able to continue pool operations while it is | |
907 | partially failed, and when it recovers, we can continue to free the | |
908 | space, with no leaks. However, note that this case is actually | |
909 | fairly rare. | |
910 | ||
911 | Typically pools either (a) fail completely (but perhaps temporarily, | |
912 | e.g. a top-level vdev going offline), or (b) have localized, | |
913 | permanent errors (e.g. disk returns the wrong data due to bit flip or | |
914 | firmware bug). In case (a), this setting does not matter because the | |
915 | pool will be suspended and the sync thread will not be able to make | |
916 | forward progress regardless. In case (b), because the error is | |
917 | permanent, the best we can do is leak the minimum amount of space, | |
918 | which is what setting this flag will do. Therefore, it is reasonable | |
919 | for this flag to normally be set, but we chose the more conservative | |
920 | approach of not setting it, so that there is no possibility of | |
921 | leaking space in the "partial temporary" failure case. | |
922 | .sp | |
923 | Default value: \fB0\fR. | |
924 | .RE | |
925 | ||
926 | .sp | |
927 | .ne 2 | |
928 | .na | |
929 | \fBzfs_free_min_time_ms\fR (int) | |
930 | .ad | |
931 | .RS 12n | |
932 | Min millisecs to free per txg | |
933 | .sp | |
934 | Default value: \fB1,000\fR. | |
935 | .RE | |
936 | ||
937 | .sp | |
938 | .ne 2 | |
939 | .na | |
940 | \fBzfs_immediate_write_sz\fR (long) | |
941 | .ad | |
942 | .RS 12n | |
943 | Largest data block to write to zil | |
944 | .sp | |
945 | Default value: \fB32,768\fR. | |
946 | .RE | |
947 | ||
948 | .sp | |
949 | .ne 2 | |
950 | .na | |
951 | \fBzfs_max_recordsize\fR (int) | |
952 | .ad | |
953 | .RS 12n | |
954 | We currently support block sizes from 512 bytes to 16MB. The benefits of | |
955 | larger blocks, and thus larger IO, need to be weighed against the cost of | |
956 | COWing a giant block to modify one byte. Additionally, very large blocks | |
957 | can have an impact on i/o latency, and also potentially on the memory | |
958 | allocator. Therefore, we do not allow the recordsize to be set larger than | |
959 | zfs_max_recordsize (default 1MB). Larger blocks can be created by changing | |
960 | this tunable, and pools with larger blocks can always be imported and used, | |
961 | regardless of this setting. | |
962 | .sp | |
963 | Default value: \fB1,048,576\fR. | |
964 | .RE | |
965 | ||
966 | .sp | |
967 | .ne 2 | |
968 | .na | |
969 | \fBzfs_mdcomp_disable\fR (int) | |
970 | .ad | |
971 | .RS 12n | |
972 | Disable meta data compression | |
973 | .sp | |
974 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
975 | .RE | |
976 | ||
977 | .sp | |
978 | .ne 2 | |
979 | .na | |
980 | \fBzfs_metaslab_fragmentation_threshold\fR (int) | |
981 | .ad | |
982 | .RS 12n | |
983 | Allow metaslabs to keep their active state as long as their fragmentation | |
984 | percentage is less than or equal to this value. An active metaslab that | |
985 | exceeds this threshold will no longer keep its active status allowing | |
986 | better metaslabs to be selected. | |
987 | .sp | |
988 | Default value: \fB70\fR. | |
989 | .RE | |
990 | ||
991 | .sp | |
992 | .ne 2 | |
993 | .na | |
994 | \fBzfs_mg_fragmentation_threshold\fR (int) | |
995 | .ad | |
996 | .RS 12n | |
997 | Metaslab groups are considered eligible for allocations if their | |
998 | fragmenation metric (measured as a percentage) is less than or equal to | |
999 | this value. If a metaslab group exceeds this threshold then it will be | |
1000 | skipped unless all metaslab groups within the metaslab class have also | |
1001 | crossed this threshold. | |
1002 | .sp | |
1003 | Default value: \fB85\fR. | |
1004 | .RE | |
1005 | ||
1006 | .sp | |
1007 | .ne 2 | |
1008 | .na | |
1009 | \fBzfs_mg_noalloc_threshold\fR (int) | |
1010 | .ad | |
1011 | .RS 12n | |
1012 | Defines a threshold at which metaslab groups should be eligible for | |
1013 | allocations. The value is expressed as a percentage of free space | |
1014 | beyond which a metaslab group is always eligible for allocations. | |
1015 | If a metaslab group's free space is less than or equal to the | |
1016 | the threshold, the allocator will avoid allocating to that group | |
1017 | unless all groups in the pool have reached the threshold. Once all | |
1018 | groups have reached the threshold, all groups are allowed to accept | |
1019 | allocations. The default value of 0 disables the feature and causes | |
1020 | all metaslab groups to be eligible for allocations. | |
1021 | ||
1022 | This parameter allows to deal with pools having heavily imbalanced | |
1023 | vdevs such as would be the case when a new vdev has been added. | |
1024 | Setting the threshold to a non-zero percentage will stop allocations | |
1025 | from being made to vdevs that aren't filled to the specified percentage | |
1026 | and allow lesser filled vdevs to acquire more allocations than they | |
1027 | otherwise would under the old \fBzfs_mg_alloc_failures\fR facility. | |
1028 | .sp | |
1029 | Default value: \fB0\fR. | |
1030 | .RE | |
1031 | ||
1032 | .sp | |
1033 | .ne 2 | |
1034 | .na | |
1035 | \fBzfs_no_scrub_io\fR (int) | |
1036 | .ad | |
1037 | .RS 12n | |
1038 | Set for no scrub I/O | |
1039 | .sp | |
1040 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
1041 | .RE | |
1042 | ||
1043 | .sp | |
1044 | .ne 2 | |
1045 | .na | |
1046 | \fBzfs_no_scrub_prefetch\fR (int) | |
1047 | .ad | |
1048 | .RS 12n | |
1049 | Set for no scrub prefetching | |
1050 | .sp | |
1051 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
1052 | .RE | |
1053 | ||
1054 | .sp | |
1055 | .ne 2 | |
1056 | .na | |
1057 | \fBzfs_nocacheflush\fR (int) | |
1058 | .ad | |
1059 | .RS 12n | |
1060 | Disable cache flushes | |
1061 | .sp | |
1062 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
1063 | .RE | |
1064 | ||
1065 | .sp | |
1066 | .ne 2 | |
1067 | .na | |
1068 | \fBzfs_nopwrite_enabled\fR (int) | |
1069 | .ad | |
1070 | .RS 12n | |
1071 | Enable NOP writes | |
1072 | .sp | |
1073 | Use \fB1\fR for yes (default) and \fB0\fR to disable. | |
1074 | .RE | |
1075 | ||
1076 | .sp | |
1077 | .ne 2 | |
1078 | .na | |
1079 | \fBzfs_pd_bytes_max\fR (int) | |
1080 | .ad | |
1081 | .RS 12n | |
1082 | The number of bytes which should be prefetched. | |
1083 | .sp | |
1084 | Default value: \fB52,428,800\fR. | |
1085 | .RE | |
1086 | ||
1087 | .sp | |
1088 | .ne 2 | |
1089 | .na | |
1090 | \fBzfs_prefetch_disable\fR (int) | |
1091 | .ad | |
1092 | .RS 12n | |
1093 | Disable all ZFS prefetching | |
1094 | .sp | |
1095 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
1096 | .RE | |
1097 | ||
1098 | .sp | |
1099 | .ne 2 | |
1100 | .na | |
1101 | \fBzfs_read_chunk_size\fR (long) | |
1102 | .ad | |
1103 | .RS 12n | |
1104 | Bytes to read per chunk | |
1105 | .sp | |
1106 | Default value: \fB1,048,576\fR. | |
1107 | .RE | |
1108 | ||
1109 | .sp | |
1110 | .ne 2 | |
1111 | .na | |
1112 | \fBzfs_read_history\fR (int) | |
1113 | .ad | |
1114 | .RS 12n | |
1115 | Historic statistics for the last N reads | |
1116 | .sp | |
1117 | Default value: \fB0\fR. | |
1118 | .RE | |
1119 | ||
1120 | .sp | |
1121 | .ne 2 | |
1122 | .na | |
1123 | \fBzfs_read_history_hits\fR (int) | |
1124 | .ad | |
1125 | .RS 12n | |
1126 | Include cache hits in read history | |
1127 | .sp | |
1128 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
1129 | .RE | |
1130 | ||
1131 | .sp | |
1132 | .ne 2 | |
1133 | .na | |
1134 | \fBzfs_recover\fR (int) | |
1135 | .ad | |
1136 | .RS 12n | |
1137 | Set to attempt to recover from fatal errors. This should only be used as a | |
1138 | last resort, as it typically results in leaked space, or worse. | |
1139 | .sp | |
1140 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
1141 | .RE | |
1142 | ||
1143 | .sp | |
1144 | .ne 2 | |
1145 | .na | |
1146 | \fBzfs_resilver_delay\fR (int) | |
1147 | .ad | |
1148 | .RS 12n | |
1149 | Number of ticks to delay prior to issuing a resilver I/O operation when | |
1150 | a non-resilver or non-scrub I/O operation has occurred within the past | |
1151 | \fBzfs_scan_idle\fR ticks. | |
1152 | .sp | |
1153 | Default value: \fB2\fR. | |
1154 | .RE | |
1155 | ||
1156 | .sp | |
1157 | .ne 2 | |
1158 | .na | |
1159 | \fBzfs_resilver_min_time_ms\fR (int) | |
1160 | .ad | |
1161 | .RS 12n | |
1162 | Min millisecs to resilver per txg | |
1163 | .sp | |
1164 | Default value: \fB3,000\fR. | |
1165 | .RE | |
1166 | ||
1167 | .sp | |
1168 | .ne 2 | |
1169 | .na | |
1170 | \fBzfs_scan_idle\fR (int) | |
1171 | .ad | |
1172 | .RS 12n | |
1173 | Idle window in clock ticks. During a scrub or a resilver, if | |
1174 | a non-scrub or non-resilver I/O operation has occurred during this | |
1175 | window, the next scrub or resilver operation is delayed by, respectively | |
1176 | \fBzfs_scrub_delay\fR or \fBzfs_resilver_delay\fR ticks. | |
1177 | .sp | |
1178 | Default value: \fB50\fR. | |
1179 | .RE | |
1180 | ||
1181 | .sp | |
1182 | .ne 2 | |
1183 | .na | |
1184 | \fBzfs_scan_min_time_ms\fR (int) | |
1185 | .ad | |
1186 | .RS 12n | |
1187 | Min millisecs to scrub per txg | |
1188 | .sp | |
1189 | Default value: \fB1,000\fR. | |
1190 | .RE | |
1191 | ||
1192 | .sp | |
1193 | .ne 2 | |
1194 | .na | |
1195 | \fBzfs_scrub_delay\fR (int) | |
1196 | .ad | |
1197 | .RS 12n | |
1198 | Number of ticks to delay prior to issuing a scrub I/O operation when | |
1199 | a non-scrub or non-resilver I/O operation has occurred within the past | |
1200 | \fBzfs_scan_idle\fR ticks. | |
1201 | .sp | |
1202 | Default value: \fB4\fR. | |
1203 | .RE | |
1204 | ||
1205 | .sp | |
1206 | .ne 2 | |
1207 | .na | |
1208 | \fBzfs_send_corrupt_data\fR (int) | |
1209 | .ad | |
1210 | .RS 12n | |
1211 | Allow to send corrupt data (ignore read/checksum errors when sending data) | |
1212 | .sp | |
1213 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
1214 | .RE | |
1215 | ||
1216 | .sp | |
1217 | .ne 2 | |
1218 | .na | |
1219 | \fBzfs_sync_pass_deferred_free\fR (int) | |
1220 | .ad | |
1221 | .RS 12n | |
1222 | Defer frees starting in this pass | |
1223 | .sp | |
1224 | Default value: \fB2\fR. | |
1225 | .RE | |
1226 | ||
1227 | .sp | |
1228 | .ne 2 | |
1229 | .na | |
1230 | \fBzfs_sync_pass_dont_compress\fR (int) | |
1231 | .ad | |
1232 | .RS 12n | |
1233 | Don't compress starting in this pass | |
1234 | .sp | |
1235 | Default value: \fB5\fR. | |
1236 | .RE | |
1237 | ||
1238 | .sp | |
1239 | .ne 2 | |
1240 | .na | |
1241 | \fBzfs_sync_pass_rewrite\fR (int) | |
1242 | .ad | |
1243 | .RS 12n | |
1244 | Rewrite new bps starting in this pass | |
1245 | .sp | |
1246 | Default value: \fB2\fR. | |
1247 | .RE | |
1248 | ||
1249 | .sp | |
1250 | .ne 2 | |
1251 | .na | |
1252 | \fBzfs_top_maxinflight\fR (int) | |
1253 | .ad | |
1254 | .RS 12n | |
1255 | Max I/Os per top-level vdev during scrub or resilver operations. | |
1256 | .sp | |
1257 | Default value: \fB32\fR. | |
1258 | .RE | |
1259 | ||
1260 | .sp | |
1261 | .ne 2 | |
1262 | .na | |
1263 | \fBzfs_txg_history\fR (int) | |
1264 | .ad | |
1265 | .RS 12n | |
1266 | Historic statistics for the last N txgs | |
1267 | .sp | |
1268 | Default value: \fB0\fR. | |
1269 | .RE | |
1270 | ||
1271 | .sp | |
1272 | .ne 2 | |
1273 | .na | |
1274 | \fBzfs_txg_timeout\fR (int) | |
1275 | .ad | |
1276 | .RS 12n | |
1277 | Max seconds worth of delta per txg | |
1278 | .sp | |
1279 | Default value: \fB5\fR. | |
1280 | .RE | |
1281 | ||
1282 | .sp | |
1283 | .ne 2 | |
1284 | .na | |
1285 | \fBzfs_vdev_aggregation_limit\fR (int) | |
1286 | .ad | |
1287 | .RS 12n | |
1288 | Max vdev I/O aggregation size | |
1289 | .sp | |
1290 | Default value: \fB131,072\fR. | |
1291 | .RE | |
1292 | ||
1293 | .sp | |
1294 | .ne 2 | |
1295 | .na | |
1296 | \fBzfs_vdev_cache_bshift\fR (int) | |
1297 | .ad | |
1298 | .RS 12n | |
1299 | Shift size to inflate reads too | |
1300 | .sp | |
1301 | Default value: \fB16\fR. | |
1302 | .RE | |
1303 | ||
1304 | .sp | |
1305 | .ne 2 | |
1306 | .na | |
1307 | \fBzfs_vdev_cache_max\fR (int) | |
1308 | .ad | |
1309 | .RS 12n | |
1310 | Inflate reads small than max | |
1311 | .RE | |
1312 | ||
1313 | .sp | |
1314 | .ne 2 | |
1315 | .na | |
1316 | \fBzfs_vdev_cache_size\fR (int) | |
1317 | .ad | |
1318 | .RS 12n | |
1319 | Total size of the per-disk cache | |
1320 | .sp | |
1321 | Default value: \fB0\fR. | |
1322 | .RE | |
1323 | ||
1324 | .sp | |
1325 | .ne 2 | |
1326 | .na | |
1327 | \fBzfs_vdev_mirror_switch_us\fR (int) | |
1328 | .ad | |
1329 | .RS 12n | |
1330 | Switch mirrors every N usecs | |
1331 | .sp | |
1332 | Default value: \fB10,000\fR. | |
1333 | .RE | |
1334 | ||
1335 | .sp | |
1336 | .ne 2 | |
1337 | .na | |
1338 | \fBzfs_vdev_read_gap_limit\fR (int) | |
1339 | .ad | |
1340 | .RS 12n | |
1341 | Aggregate read I/O over gap | |
1342 | .sp | |
1343 | Default value: \fB32,768\fR. | |
1344 | .RE | |
1345 | ||
1346 | .sp | |
1347 | .ne 2 | |
1348 | .na | |
1349 | \fBzfs_vdev_scheduler\fR (charp) | |
1350 | .ad | |
1351 | .RS 12n | |
1352 | I/O scheduler | |
1353 | .sp | |
1354 | Default value: \fBnoop\fR. | |
1355 | .RE | |
1356 | ||
1357 | .sp | |
1358 | .ne 2 | |
1359 | .na | |
1360 | \fBzfs_vdev_write_gap_limit\fR (int) | |
1361 | .ad | |
1362 | .RS 12n | |
1363 | Aggregate write I/O over gap | |
1364 | .sp | |
1365 | Default value: \fB4,096\fR. | |
1366 | .RE | |
1367 | ||
1368 | .sp | |
1369 | .ne 2 | |
1370 | .na | |
1371 | \fBzfs_zevent_cols\fR (int) | |
1372 | .ad | |
1373 | .RS 12n | |
1374 | Max event column width | |
1375 | .sp | |
1376 | Default value: \fB80\fR. | |
1377 | .RE | |
1378 | ||
1379 | .sp | |
1380 | .ne 2 | |
1381 | .na | |
1382 | \fBzfs_zevent_console\fR (int) | |
1383 | .ad | |
1384 | .RS 12n | |
1385 | Log events to the console | |
1386 | .sp | |
1387 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
1388 | .RE | |
1389 | ||
1390 | .sp | |
1391 | .ne 2 | |
1392 | .na | |
1393 | \fBzfs_zevent_len_max\fR (int) | |
1394 | .ad | |
1395 | .RS 12n | |
1396 | Max event queue length | |
1397 | .sp | |
1398 | Default value: \fB0\fR. | |
1399 | .RE | |
1400 | ||
1401 | .sp | |
1402 | .ne 2 | |
1403 | .na | |
1404 | \fBzil_replay_disable\fR (int) | |
1405 | .ad | |
1406 | .RS 12n | |
1407 | Disable intent logging replay | |
1408 | .sp | |
1409 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
1410 | .RE | |
1411 | ||
1412 | .sp | |
1413 | .ne 2 | |
1414 | .na | |
1415 | \fBzil_slog_limit\fR (ulong) | |
1416 | .ad | |
1417 | .RS 12n | |
1418 | Max commit bytes to separate log device | |
1419 | .sp | |
1420 | Default value: \fB1,048,576\fR. | |
1421 | .RE | |
1422 | ||
1423 | .sp | |
1424 | .ne 2 | |
1425 | .na | |
1426 | \fBzio_delay_max\fR (int) | |
1427 | .ad | |
1428 | .RS 12n | |
1429 | Max zio millisec delay before posting event | |
1430 | .sp | |
1431 | Default value: \fB30,000\fR. | |
1432 | .RE | |
1433 | ||
1434 | .sp | |
1435 | .ne 2 | |
1436 | .na | |
1437 | \fBzio_requeue_io_start_cut_in_line\fR (int) | |
1438 | .ad | |
1439 | .RS 12n | |
1440 | Prioritize requeued I/O | |
1441 | .sp | |
1442 | Default value: \fB0\fR. | |
1443 | .RE | |
1444 | ||
1445 | .sp | |
1446 | .ne 2 | |
1447 | .na | |
1448 | \fBzvol_inhibit_dev\fR (uint) | |
1449 | .ad | |
1450 | .RS 12n | |
1451 | Do not create zvol device nodes | |
1452 | .sp | |
1453 | Use \fB1\fR for yes and \fB0\fR for no (default). | |
1454 | .RE | |
1455 | ||
1456 | .sp | |
1457 | .ne 2 | |
1458 | .na | |
1459 | \fBzvol_major\fR (uint) | |
1460 | .ad | |
1461 | .RS 12n | |
1462 | Major number for zvol device | |
1463 | .sp | |
1464 | Default value: \fB230\fR. | |
1465 | .RE | |
1466 | ||
1467 | .sp | |
1468 | .ne 2 | |
1469 | .na | |
1470 | \fBzvol_max_discard_blocks\fR (ulong) | |
1471 | .ad | |
1472 | .RS 12n | |
1473 | Max number of blocks to discard at once | |
1474 | .sp | |
1475 | Default value: \fB16,384\fR. | |
1476 | .RE | |
1477 | ||
1478 | .sp | |
1479 | .ne 2 | |
1480 | .na | |
1481 | \fBzvol_threads\fR (uint) | |
1482 | .ad | |
1483 | .RS 12n | |
1484 | Number of threads for zvol device | |
1485 | .sp | |
1486 | Default value: \fB32\fR. | |
1487 | .RE | |
1488 | ||
1489 | .SH ZFS I/O SCHEDULER | |
1490 | ZFS issues I/O operations to leaf vdevs to satisfy and complete I/Os. | |
1491 | The I/O scheduler determines when and in what order those operations are | |
1492 | issued. The I/O scheduler divides operations into five I/O classes | |
1493 | prioritized in the following order: sync read, sync write, async read, | |
1494 | async write, and scrub/resilver. Each queue defines the minimum and | |
1495 | maximum number of concurrent operations that may be issued to the | |
1496 | device. In addition, the device has an aggregate maximum, | |
1497 | \fBzfs_vdev_max_active\fR. Note that the sum of the per-queue minimums | |
1498 | must not exceed the aggregate maximum. If the sum of the per-queue | |
1499 | maximums exceeds the aggregate maximum, then the number of active I/Os | |
1500 | may reach \fBzfs_vdev_max_active\fR, in which case no further I/Os will | |
1501 | be issued regardless of whether all per-queue minimums have been met. | |
1502 | .sp | |
1503 | For many physical devices, throughput increases with the number of | |
1504 | concurrent operations, but latency typically suffers. Further, physical | |
1505 | devices typically have a limit at which more concurrent operations have no | |
1506 | effect on throughput or can actually cause it to decrease. | |
1507 | .sp | |
1508 | The scheduler selects the next operation to issue by first looking for an | |
1509 | I/O class whose minimum has not been satisfied. Once all are satisfied and | |
1510 | the aggregate maximum has not been hit, the scheduler looks for classes | |
1511 | whose maximum has not been satisfied. Iteration through the I/O classes is | |
1512 | done in the order specified above. No further operations are issued if the | |
1513 | aggregate maximum number of concurrent operations has been hit or if there | |
1514 | are no operations queued for an I/O class that has not hit its maximum. | |
1515 | Every time an I/O is queued or an operation completes, the I/O scheduler | |
1516 | looks for new operations to issue. | |
1517 | .sp | |
1518 | In general, smaller max_active's will lead to lower latency of synchronous | |
1519 | operations. Larger max_active's may lead to higher overall throughput, | |
1520 | depending on underlying storage. | |
1521 | .sp | |
1522 | The ratio of the queues' max_actives determines the balance of performance | |
1523 | between reads, writes, and scrubs. E.g., increasing | |
1524 | \fBzfs_vdev_scrub_max_active\fR will cause the scrub or resilver to complete | |
1525 | more quickly, but reads and writes to have higher latency and lower throughput. | |
1526 | .sp | |
1527 | All I/O classes have a fixed maximum number of outstanding operations | |
1528 | except for the async write class. Asynchronous writes represent the data | |
1529 | that is committed to stable storage during the syncing stage for | |
1530 | transaction groups. Transaction groups enter the syncing state | |
1531 | periodically so the number of queued async writes will quickly burst up | |
1532 | and then bleed down to zero. Rather than servicing them as quickly as | |
1533 | possible, the I/O scheduler changes the maximum number of active async | |
1534 | write I/Os according to the amount of dirty data in the pool. Since | |
1535 | both throughput and latency typically increase with the number of | |
1536 | concurrent operations issued to physical devices, reducing the | |
1537 | burstiness in the number of concurrent operations also stabilizes the | |
1538 | response time of operations from other -- and in particular synchronous | |
1539 | -- queues. In broad strokes, the I/O scheduler will issue more | |
1540 | concurrent operations from the async write queue as there's more dirty | |
1541 | data in the pool. | |
1542 | .sp | |
1543 | Async Writes | |
1544 | .sp | |
1545 | The number of concurrent operations issued for the async write I/O class | |
1546 | follows a piece-wise linear function defined by a few adjustable points. | |
1547 | .nf | |
1548 | ||
1549 | | o---------| <-- zfs_vdev_async_write_max_active | |
1550 | ^ | /^ | | |
1551 | | | / | | | |
1552 | active | / | | | |
1553 | I/O | / | | | |
1554 | count | / | | | |
1555 | | / | | | |
1556 | |-------o | | <-- zfs_vdev_async_write_min_active | |
1557 | 0|_______^______|_________| | |
1558 | 0% | | 100% of zfs_dirty_data_max | |
1559 | | | | |
1560 | | `-- zfs_vdev_async_write_active_max_dirty_percent | |
1561 | `--------- zfs_vdev_async_write_active_min_dirty_percent | |
1562 | ||
1563 | .fi | |
1564 | Until the amount of dirty data exceeds a minimum percentage of the dirty | |
1565 | data allowed in the pool, the I/O scheduler will limit the number of | |
1566 | concurrent operations to the minimum. As that threshold is crossed, the | |
1567 | number of concurrent operations issued increases linearly to the maximum at | |
1568 | the specified maximum percentage of the dirty data allowed in the pool. | |
1569 | .sp | |
1570 | Ideally, the amount of dirty data on a busy pool will stay in the sloped | |
1571 | part of the function between \fBzfs_vdev_async_write_active_min_dirty_percent\fR | |
1572 | and \fBzfs_vdev_async_write_active_max_dirty_percent\fR. If it exceeds the | |
1573 | maximum percentage, this indicates that the rate of incoming data is | |
1574 | greater than the rate that the backend storage can handle. In this case, we | |
1575 | must further throttle incoming writes, as described in the next section. | |
1576 | ||
1577 | .SH ZFS TRANSACTION DELAY | |
1578 | We delay transactions when we've determined that the backend storage | |
1579 | isn't able to accommodate the rate of incoming writes. | |
1580 | .sp | |
1581 | If there is already a transaction waiting, we delay relative to when | |
1582 | that transaction will finish waiting. This way the calculated delay time | |
1583 | is independent of the number of threads concurrently executing | |
1584 | transactions. | |
1585 | .sp | |
1586 | If we are the only waiter, wait relative to when the transaction | |
1587 | started, rather than the current time. This credits the transaction for | |
1588 | "time already served", e.g. reading indirect blocks. | |
1589 | .sp | |
1590 | The minimum time for a transaction to take is calculated as: | |
1591 | .nf | |
1592 | min_time = zfs_delay_scale * (dirty - min) / (max - dirty) | |
1593 | min_time is then capped at 100 milliseconds. | |
1594 | .fi | |
1595 | .sp | |
1596 | The delay has two degrees of freedom that can be adjusted via tunables. The | |
1597 | percentage of dirty data at which we start to delay is defined by | |
1598 | \fBzfs_delay_min_dirty_percent\fR. This should typically be at or above | |
1599 | \fBzfs_vdev_async_write_active_max_dirty_percent\fR so that we only start to | |
1600 | delay after writing at full speed has failed to keep up with the incoming write | |
1601 | rate. The scale of the curve is defined by \fBzfs_delay_scale\fR. Roughly speaking, | |
1602 | this variable determines the amount of delay at the midpoint of the curve. | |
1603 | .sp | |
1604 | .nf | |
1605 | delay | |
1606 | 10ms +-------------------------------------------------------------*+ | |
1607 | | *| | |
1608 | 9ms + *+ | |
1609 | | *| | |
1610 | 8ms + *+ | |
1611 | | * | | |
1612 | 7ms + * + | |
1613 | | * | | |
1614 | 6ms + * + | |
1615 | | * | | |
1616 | 5ms + * + | |
1617 | | * | | |
1618 | 4ms + * + | |
1619 | | * | | |
1620 | 3ms + * + | |
1621 | | * | | |
1622 | 2ms + (midpoint) * + | |
1623 | | | ** | | |
1624 | 1ms + v *** + | |
1625 | | zfs_delay_scale ----------> ******** | | |
1626 | 0 +-------------------------------------*********----------------+ | |
1627 | 0% <- zfs_dirty_data_max -> 100% | |
1628 | .fi | |
1629 | .sp | |
1630 | Note that since the delay is added to the outstanding time remaining on the | |
1631 | most recent transaction, the delay is effectively the inverse of IOPS. | |
1632 | Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve | |
1633 | was chosen such that small changes in the amount of accumulated dirty data | |
1634 | in the first 3/4 of the curve yield relatively small differences in the | |
1635 | amount of delay. | |
1636 | .sp | |
1637 | The effects can be easier to understand when the amount of delay is | |
1638 | represented on a log scale: | |
1639 | .sp | |
1640 | .nf | |
1641 | delay | |
1642 | 100ms +-------------------------------------------------------------++ | |
1643 | + + | |
1644 | | | | |
1645 | + *+ | |
1646 | 10ms + *+ | |
1647 | + ** + | |
1648 | | (midpoint) ** | | |
1649 | + | ** + | |
1650 | 1ms + v **** + | |
1651 | + zfs_delay_scale ----------> ***** + | |
1652 | | **** | | |
1653 | + **** + | |
1654 | 100us + ** + | |
1655 | + * + | |
1656 | | * | | |
1657 | + * + | |
1658 | 10us + * + | |
1659 | + + | |
1660 | | | | |
1661 | + + | |
1662 | +--------------------------------------------------------------+ | |
1663 | 0% <- zfs_dirty_data_max -> 100% | |
1664 | .fi | |
1665 | .sp | |
1666 | Note here that only as the amount of dirty data approaches its limit does | |
1667 | the delay start to increase rapidly. The goal of a properly tuned system | |
1668 | should be to keep the amount of dirty data out of that range by first | |
1669 | ensuring that the appropriate limits are set for the I/O scheduler to reach | |
1670 | optimal throughput on the backend storage, and then by changing the value | |
1671 | of \fBzfs_delay_scale\fR to increase the steepness of the curve. |