]> git.proxmox.com Git - mirror_zfs.git/blob - module/os/freebsd/zfs/sysctl_os.c
Cleanup: Specify unsignedness on things that should not be signed
[mirror_zfs.git] / module / os / freebsd / zfs / sysctl_os.c
1 /*
2 * Copyright (c) 2020 iXsystems, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include <sys/types.h>
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/conf.h>
35 #include <sys/kernel.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/mutex.h>
39 #include <sys/proc.h>
40 #include <sys/errno.h>
41 #include <sys/uio.h>
42 #include <sys/buf.h>
43 #include <sys/file.h>
44 #include <sys/kmem.h>
45 #include <sys/conf.h>
46 #include <sys/cmn_err.h>
47 #include <sys/stat.h>
48 #include <sys/zfs_ioctl.h>
49 #include <sys/zfs_vfsops.h>
50 #include <sys/zfs_znode.h>
51 #include <sys/zap.h>
52 #include <sys/spa.h>
53 #include <sys/spa_impl.h>
54 #include <sys/vdev.h>
55 #include <sys/vdev_impl.h>
56 #include <sys/arc_os.h>
57 #include <sys/dmu.h>
58 #include <sys/dsl_dir.h>
59 #include <sys/dsl_dataset.h>
60 #include <sys/dsl_prop.h>
61 #include <sys/dsl_deleg.h>
62 #include <sys/dmu_objset.h>
63 #include <sys/dmu_impl.h>
64 #include <sys/dmu_tx.h>
65 #include <sys/sunddi.h>
66 #include <sys/policy.h>
67 #include <sys/zone.h>
68 #include <sys/nvpair.h>
69 #include <sys/mount.h>
70 #include <sys/taskqueue.h>
71 #include <sys/sdt.h>
72 #include <sys/fs/zfs.h>
73 #include <sys/zfs_ctldir.h>
74 #include <sys/zfs_dir.h>
75 #include <sys/zfs_onexit.h>
76 #include <sys/zvol.h>
77 #include <sys/dsl_scan.h>
78 #include <sys/dmu_objset.h>
79 #include <sys/dmu_send.h>
80 #include <sys/dsl_destroy.h>
81 #include <sys/dsl_bookmark.h>
82 #include <sys/dsl_userhold.h>
83 #include <sys/zfeature.h>
84 #include <sys/zcp.h>
85 #include <sys/zio_checksum.h>
86 #include <sys/vdev_removal.h>
87 #include <sys/dsl_crypt.h>
88
89 #include <sys/zfs_ioctl_compat.h>
90 #include <sys/zfs_context.h>
91
92 #include <sys/arc_impl.h>
93 #include <sys/dsl_pool.h>
94
95 #include <sys/vmmeter.h>
96
97 SYSCTL_DECL(_vfs_zfs);
98 SYSCTL_NODE(_vfs_zfs, OID_AUTO, arc, CTLFLAG_RW, 0,
99 "ZFS adaptive replacement cache");
100 SYSCTL_NODE(_vfs_zfs, OID_AUTO, condense, CTLFLAG_RW, 0, "ZFS condense");
101 SYSCTL_NODE(_vfs_zfs, OID_AUTO, dbuf, CTLFLAG_RW, 0, "ZFS disk buf cache");
102 SYSCTL_NODE(_vfs_zfs, OID_AUTO, dbuf_cache, CTLFLAG_RW, 0,
103 "ZFS disk buf cache");
104 SYSCTL_NODE(_vfs_zfs, OID_AUTO, deadman, CTLFLAG_RW, 0, "ZFS deadman");
105 SYSCTL_NODE(_vfs_zfs, OID_AUTO, dedup, CTLFLAG_RW, 0, "ZFS dedup");
106 SYSCTL_NODE(_vfs_zfs, OID_AUTO, l2arc, CTLFLAG_RW, 0, "ZFS l2arc");
107 SYSCTL_NODE(_vfs_zfs, OID_AUTO, livelist, CTLFLAG_RW, 0, "ZFS livelist");
108 SYSCTL_NODE(_vfs_zfs, OID_AUTO, lua, CTLFLAG_RW, 0, "ZFS lua");
109 SYSCTL_NODE(_vfs_zfs, OID_AUTO, metaslab, CTLFLAG_RW, 0, "ZFS metaslab");
110 SYSCTL_NODE(_vfs_zfs, OID_AUTO, mg, CTLFLAG_RW, 0, "ZFS metaslab group");
111 SYSCTL_NODE(_vfs_zfs, OID_AUTO, multihost, CTLFLAG_RW, 0,
112 "ZFS multihost protection");
113 SYSCTL_NODE(_vfs_zfs, OID_AUTO, prefetch, CTLFLAG_RW, 0, "ZFS prefetch");
114 SYSCTL_NODE(_vfs_zfs, OID_AUTO, reconstruct, CTLFLAG_RW, 0, "ZFS reconstruct");
115 SYSCTL_NODE(_vfs_zfs, OID_AUTO, recv, CTLFLAG_RW, 0, "ZFS receive");
116 SYSCTL_NODE(_vfs_zfs, OID_AUTO, send, CTLFLAG_RW, 0, "ZFS send");
117 SYSCTL_NODE(_vfs_zfs, OID_AUTO, spa, CTLFLAG_RW, 0, "ZFS space allocation");
118 SYSCTL_NODE(_vfs_zfs, OID_AUTO, trim, CTLFLAG_RW, 0, "ZFS TRIM");
119 SYSCTL_NODE(_vfs_zfs, OID_AUTO, txg, CTLFLAG_RW, 0, "ZFS transaction group");
120 SYSCTL_NODE(_vfs_zfs, OID_AUTO, vdev, CTLFLAG_RW, 0, "ZFS VDEV");
121 SYSCTL_NODE(_vfs_zfs, OID_AUTO, vnops, CTLFLAG_RW, 0, "ZFS VNOPS");
122 SYSCTL_NODE(_vfs_zfs, OID_AUTO, zevent, CTLFLAG_RW, 0, "ZFS event");
123 SYSCTL_NODE(_vfs_zfs, OID_AUTO, zil, CTLFLAG_RW, 0, "ZFS ZIL");
124 SYSCTL_NODE(_vfs_zfs, OID_AUTO, zio, CTLFLAG_RW, 0, "ZFS ZIO");
125
126 SYSCTL_NODE(_vfs_zfs_livelist, OID_AUTO, condense, CTLFLAG_RW, 0,
127 "ZFS livelist condense");
128 SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, cache, CTLFLAG_RW, 0, "ZFS VDEV Cache");
129 SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, file, CTLFLAG_RW, 0, "ZFS VDEV file");
130 SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, mirror, CTLFLAG_RD, 0,
131 "ZFS VDEV mirror");
132
133 SYSCTL_DECL(_vfs_zfs_version);
134 SYSCTL_CONST_STRING(_vfs_zfs_version, OID_AUTO, module, CTLFLAG_RD,
135 (ZFS_META_VERSION "-" ZFS_META_RELEASE), "OpenZFS module version");
136
137 /* arc.c */
138
139 int
140 param_set_arc_long(SYSCTL_HANDLER_ARGS)
141 {
142 int err;
143
144 err = sysctl_handle_long(oidp, arg1, 0, req);
145 if (err != 0 || req->newptr == NULL)
146 return (err);
147
148 arc_tuning_update(B_TRUE);
149
150 return (0);
151 }
152
153 int
154 param_set_arc_int(SYSCTL_HANDLER_ARGS)
155 {
156 int err;
157
158 err = sysctl_handle_int(oidp, arg1, 0, req);
159 if (err != 0 || req->newptr == NULL)
160 return (err);
161
162 arc_tuning_update(B_TRUE);
163
164 return (0);
165 }
166
167 int
168 param_set_arc_max(SYSCTL_HANDLER_ARGS)
169 {
170 unsigned long val;
171 int err;
172
173 val = zfs_arc_max;
174 err = sysctl_handle_long(oidp, &val, 0, req);
175 if (err != 0 || req->newptr == NULL)
176 return (SET_ERROR(err));
177
178 if (val != 0 && (val < MIN_ARC_MAX || val <= arc_c_min ||
179 val >= arc_all_memory()))
180 return (SET_ERROR(EINVAL));
181
182 zfs_arc_max = val;
183 arc_tuning_update(B_TRUE);
184
185 /* Update the sysctl to the tuned value */
186 if (val != 0)
187 zfs_arc_max = arc_c_max;
188
189 return (0);
190 }
191
192 /* BEGIN CSTYLED */
193 SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_max,
194 CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
195 NULL, 0, param_set_arc_max, "LU",
196 "Maximum ARC size in bytes (LEGACY)");
197 /* END CSTYLED */
198
199 int
200 param_set_arc_min(SYSCTL_HANDLER_ARGS)
201 {
202 unsigned long val;
203 int err;
204
205 val = zfs_arc_min;
206 err = sysctl_handle_long(oidp, &val, 0, req);
207 if (err != 0 || req->newptr == NULL)
208 return (SET_ERROR(err));
209
210 if (val != 0 && (val < 2ULL << SPA_MAXBLOCKSHIFT || val > arc_c_max))
211 return (SET_ERROR(EINVAL));
212
213 zfs_arc_min = val;
214 arc_tuning_update(B_TRUE);
215
216 /* Update the sysctl to the tuned value */
217 if (val != 0)
218 zfs_arc_min = arc_c_min;
219
220 return (0);
221 }
222
223 /* BEGIN CSTYLED */
224 SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_min,
225 CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
226 NULL, 0, param_set_arc_min, "LU",
227 "Minimum ARC size in bytes (LEGACY)");
228 /* END CSTYLED */
229
230 extern uint_t zfs_arc_free_target;
231
232 int
233 param_set_arc_free_target(SYSCTL_HANDLER_ARGS)
234 {
235 uint_t val;
236 int err;
237
238 val = zfs_arc_free_target;
239 err = sysctl_handle_int(oidp, &val, 0, req);
240 if (err != 0 || req->newptr == NULL)
241 return (err);
242
243 if (val < minfree)
244 return (EINVAL);
245 if (val > vm_cnt.v_page_count)
246 return (EINVAL);
247
248 zfs_arc_free_target = val;
249
250 return (0);
251 }
252
253 /*
254 * NOTE: This sysctl is CTLFLAG_RW not CTLFLAG_RWTUN due to its dependency on
255 * pagedaemon initialization.
256 */
257 /* BEGIN CSTYLED */
258 SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_free_target,
259 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
260 NULL, 0, param_set_arc_free_target, "IU",
261 "Desired number of free pages below which ARC triggers reclaim"
262 " (LEGACY)");
263 /* END CSTYLED */
264
265 int
266 param_set_arc_no_grow_shift(SYSCTL_HANDLER_ARGS)
267 {
268 int err, val;
269
270 val = arc_no_grow_shift;
271 err = sysctl_handle_int(oidp, &val, 0, req);
272 if (err != 0 || req->newptr == NULL)
273 return (err);
274
275 if (val < 0 || val >= arc_shrink_shift)
276 return (EINVAL);
277
278 arc_no_grow_shift = val;
279
280 return (0);
281 }
282
283 /* BEGIN CSTYLED */
284 SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_no_grow_shift,
285 CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
286 NULL, 0, param_set_arc_no_grow_shift, "I",
287 "log2(fraction of ARC which must be free to allow growing) (LEGACY)");
288 /* END CSTYLED */
289
290 extern uint64_t l2arc_write_max;
291
292 /* BEGIN CSTYLED */
293 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max,
294 CTLFLAG_RWTUN, &l2arc_write_max, 0,
295 "Max write bytes per interval (LEGACY)");
296 /* END CSTYLED */
297
298 extern uint64_t l2arc_write_boost;
299
300 /* BEGIN CSTYLED */
301 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost,
302 CTLFLAG_RWTUN, &l2arc_write_boost, 0,
303 "Extra write bytes during device warmup (LEGACY)");
304 /* END CSTYLED */
305
306 extern uint64_t l2arc_headroom;
307
308 /* BEGIN CSTYLED */
309 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom,
310 CTLFLAG_RWTUN, &l2arc_headroom, 0,
311 "Number of max device writes to precache (LEGACY)");
312 /* END CSTYLED */
313
314 extern uint64_t l2arc_headroom_boost;
315
316 /* BEGIN CSTYLED */
317 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom_boost,
318 CTLFLAG_RWTUN, &l2arc_headroom_boost, 0,
319 "Compressed l2arc_headroom multiplier (LEGACY)");
320 /* END CSTYLED */
321
322 extern uint64_t l2arc_feed_secs;
323
324 /* BEGIN CSTYLED */
325 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs,
326 CTLFLAG_RWTUN, &l2arc_feed_secs, 0,
327 "Seconds between L2ARC writing (LEGACY)");
328 /* END CSTYLED */
329
330 extern uint64_t l2arc_feed_min_ms;
331
332 /* BEGIN CSTYLED */
333 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms,
334 CTLFLAG_RWTUN, &l2arc_feed_min_ms, 0,
335 "Min feed interval in milliseconds (LEGACY)");
336 /* END CSTYLED */
337
338 extern int l2arc_noprefetch;
339
340 /* BEGIN CSTYLED */
341 SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch,
342 CTLFLAG_RWTUN, &l2arc_noprefetch, 0,
343 "Skip caching prefetched buffers (LEGACY)");
344 /* END CSTYLED */
345
346 extern int l2arc_feed_again;
347
348 /* BEGIN CSTYLED */
349 SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again,
350 CTLFLAG_RWTUN, &l2arc_feed_again, 0,
351 "Turbo L2ARC warmup (LEGACY)");
352 /* END CSTYLED */
353
354 extern int l2arc_norw;
355
356 /* BEGIN CSTYLED */
357 SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw,
358 CTLFLAG_RWTUN, &l2arc_norw, 0,
359 "No reads during writes (LEGACY)");
360 /* END CSTYLED */
361
362 extern arc_state_t ARC_anon;
363
364 /* BEGIN CSTYLED */
365 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_size, CTLFLAG_RD,
366 &ARC_anon.arcs_size.rc_count, 0, "size of anonymous state");
367 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_esize, CTLFLAG_RD,
368 &ARC_anon.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
369 "size of anonymous state");
370 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_esize, CTLFLAG_RD,
371 &ARC_anon.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
372 "size of anonymous state");
373 /* END CSTYLED */
374
375 extern arc_state_t ARC_mru;
376
377 /* BEGIN CSTYLED */
378 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_size, CTLFLAG_RD,
379 &ARC_mru.arcs_size.rc_count, 0, "size of mru state");
380 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_esize, CTLFLAG_RD,
381 &ARC_mru.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
382 "size of metadata in mru state");
383 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_esize, CTLFLAG_RD,
384 &ARC_mru.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
385 "size of data in mru state");
386 /* END CSTYLED */
387
388 extern arc_state_t ARC_mru_ghost;
389
390 /* BEGIN CSTYLED */
391 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_size, CTLFLAG_RD,
392 &ARC_mru_ghost.arcs_size.rc_count, 0, "size of mru ghost state");
393 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_esize, CTLFLAG_RD,
394 &ARC_mru_ghost.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
395 "size of metadata in mru ghost state");
396 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_esize, CTLFLAG_RD,
397 &ARC_mru_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
398 "size of data in mru ghost state");
399 /* END CSTYLED */
400
401 extern arc_state_t ARC_mfu;
402
403 /* BEGIN CSTYLED */
404 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_size, CTLFLAG_RD,
405 &ARC_mfu.arcs_size.rc_count, 0, "size of mfu state");
406 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_esize, CTLFLAG_RD,
407 &ARC_mfu.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
408 "size of metadata in mfu state");
409 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_esize, CTLFLAG_RD,
410 &ARC_mfu.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
411 "size of data in mfu state");
412 /* END CSTYLED */
413
414 extern arc_state_t ARC_mfu_ghost;
415
416 /* BEGIN CSTYLED */
417 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_size, CTLFLAG_RD,
418 &ARC_mfu_ghost.arcs_size.rc_count, 0, "size of mfu ghost state");
419 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_esize, CTLFLAG_RD,
420 &ARC_mfu_ghost.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
421 "size of metadata in mfu ghost state");
422 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_esize, CTLFLAG_RD,
423 &ARC_mfu_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
424 "size of data in mfu ghost state");
425 /* END CSTYLED */
426
427 extern arc_state_t ARC_l2c_only;
428
429 /* BEGIN CSTYLED */
430 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD,
431 &ARC_l2c_only.arcs_size.rc_count, 0, "size of mru state");
432 /* END CSTYLED */
433
434 /* dbuf.c */
435
436 /* dmu.c */
437
438 /* dmu_zfetch.c */
439
440 SYSCTL_NODE(_vfs_zfs, OID_AUTO, zfetch, CTLFLAG_RW, 0, "ZFS ZFETCH (LEGACY)");
441
442 extern uint32_t zfetch_max_distance;
443
444 /* BEGIN CSTYLED */
445 SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_distance,
446 CTLFLAG_RWTUN, &zfetch_max_distance, 0,
447 "Max bytes to prefetch per stream (LEGACY)");
448 /* END CSTYLED */
449
450 extern uint32_t zfetch_max_idistance;
451
452 /* BEGIN CSTYLED */
453 SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_idistance,
454 CTLFLAG_RWTUN, &zfetch_max_idistance, 0,
455 "Max bytes to prefetch indirects for per stream (LEGACY)");
456 /* END CSTYLED */
457
458 /* dsl_pool.c */
459
460 /* dnode.c */
461
462 extern int zfs_default_bs;
463
464 /* BEGIN CSTYLED */
465 SYSCTL_INT(_vfs_zfs, OID_AUTO, default_bs, CTLFLAG_RWTUN,
466 &zfs_default_bs, 0, "Default dnode block shift");
467 /* END CSTYLED */
468
469 extern int zfs_default_ibs;
470
471 /* BEGIN CSTYLED */
472 SYSCTL_INT(_vfs_zfs, OID_AUTO, default_ibs, CTLFLAG_RWTUN,
473 &zfs_default_ibs, 0, "Default dnode indirect block shift");
474 /* END CSTYLED */
475
476 /* dsl_scan.c */
477
478 /* metaslab.c */
479
480 /*
481 * In pools where the log space map feature is not enabled we touch
482 * multiple metaslabs (and their respective space maps) with each
483 * transaction group. Thus, we benefit from having a small space map
484 * block size since it allows us to issue more I/O operations scattered
485 * around the disk. So a sane default for the space map block size
486 * is 8~16K.
487 */
488 extern int zfs_metaslab_sm_blksz_no_log;
489
490 /* BEGIN CSTYLED */
491 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_no_log,
492 CTLFLAG_RDTUN, &zfs_metaslab_sm_blksz_no_log, 0,
493 "Block size for space map in pools with log space map disabled. "
494 "Power of 2 greater than 4096.");
495 /* END CSTYLED */
496
497 /*
498 * When the log space map feature is enabled, we accumulate a lot of
499 * changes per metaslab that are flushed once in a while so we benefit
500 * from a bigger block size like 128K for the metaslab space maps.
501 */
502 extern int zfs_metaslab_sm_blksz_with_log;
503
504 /* BEGIN CSTYLED */
505 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_with_log,
506 CTLFLAG_RDTUN, &zfs_metaslab_sm_blksz_with_log, 0,
507 "Block size for space map in pools with log space map enabled. "
508 "Power of 2 greater than 4096.");
509 /* END CSTYLED */
510
511 /*
512 * The in-core space map representation is more compact than its on-disk form.
513 * The zfs_condense_pct determines how much more compact the in-core
514 * space map representation must be before we compact it on-disk.
515 * Values should be greater than or equal to 100.
516 */
517 extern uint_t zfs_condense_pct;
518
519 /* BEGIN CSTYLED */
520 SYSCTL_UINT(_vfs_zfs, OID_AUTO, condense_pct,
521 CTLFLAG_RWTUN, &zfs_condense_pct, 0,
522 "Condense on-disk spacemap when it is more than this many percents"
523 " of in-memory counterpart");
524 /* END CSTYLED */
525
526 extern uint_t zfs_remove_max_segment;
527
528 /* BEGIN CSTYLED */
529 SYSCTL_UINT(_vfs_zfs, OID_AUTO, remove_max_segment,
530 CTLFLAG_RWTUN, &zfs_remove_max_segment, 0,
531 "Largest contiguous segment ZFS will attempt to allocate when removing"
532 " a device");
533 /* END CSTYLED */
534
535 extern int zfs_removal_suspend_progress;
536
537 /* BEGIN CSTYLED */
538 SYSCTL_INT(_vfs_zfs, OID_AUTO, removal_suspend_progress,
539 CTLFLAG_RWTUN, &zfs_removal_suspend_progress, 0,
540 "Ensures certain actions can happen while in the middle of a removal");
541 /* END CSTYLED */
542
543 /*
544 * Minimum size which forces the dynamic allocator to change
545 * it's allocation strategy. Once the space map cannot satisfy
546 * an allocation of this size then it switches to using more
547 * aggressive strategy (i.e search by size rather than offset).
548 */
549 extern uint64_t metaslab_df_alloc_threshold;
550
551 /* BEGIN CSTYLED */
552 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold,
553 CTLFLAG_RWTUN, &metaslab_df_alloc_threshold, 0,
554 "Minimum size which forces the dynamic allocator to change its"
555 " allocation strategy");
556 /* END CSTYLED */
557
558 /*
559 * The minimum free space, in percent, which must be available
560 * in a space map to continue allocations in a first-fit fashion.
561 * Once the space map's free space drops below this level we dynamically
562 * switch to using best-fit allocations.
563 */
564 extern uint_t metaslab_df_free_pct;
565
566 /* BEGIN CSTYLED */
567 SYSCTL_UINT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct,
568 CTLFLAG_RWTUN, &metaslab_df_free_pct, 0,
569 "The minimum free space, in percent, which must be available in a"
570 " space map to continue allocations in a first-fit fashion");
571 /* END CSTYLED */
572
573 /*
574 * Percentage of all cpus that can be used by the metaslab taskq.
575 */
576 extern int metaslab_load_pct;
577
578 /* BEGIN CSTYLED */
579 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, load_pct,
580 CTLFLAG_RWTUN, &metaslab_load_pct, 0,
581 "Percentage of cpus that can be used by the metaslab taskq");
582 /* END CSTYLED */
583
584 /*
585 * Max number of metaslabs per group to preload.
586 */
587 extern uint_t metaslab_preload_limit;
588
589 /* BEGIN CSTYLED */
590 SYSCTL_UINT(_vfs_zfs_metaslab, OID_AUTO, preload_limit,
591 CTLFLAG_RWTUN, &metaslab_preload_limit, 0,
592 "Max number of metaslabs per group to preload");
593 /* END CSTYLED */
594
595 /* mmp.c */
596
597 int
598 param_set_multihost_interval(SYSCTL_HANDLER_ARGS)
599 {
600 int err;
601
602 err = sysctl_handle_long(oidp, &zfs_multihost_interval, 0, req);
603 if (err != 0 || req->newptr == NULL)
604 return (err);
605
606 if (spa_mode_global != SPA_MODE_UNINIT)
607 mmp_signal_all_threads();
608
609 return (0);
610 }
611
612 /* spa.c */
613
614 extern int zfs_ccw_retry_interval;
615
616 /* BEGIN CSTYLED */
617 SYSCTL_INT(_vfs_zfs, OID_AUTO, ccw_retry_interval,
618 CTLFLAG_RWTUN, &zfs_ccw_retry_interval, 0,
619 "Configuration cache file write, retry after failure, interval"
620 " (seconds)");
621 /* END CSTYLED */
622
623 extern uint64_t zfs_max_missing_tvds_cachefile;
624
625 /* BEGIN CSTYLED */
626 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, max_missing_tvds_cachefile,
627 CTLFLAG_RWTUN, &zfs_max_missing_tvds_cachefile, 0,
628 "Allow importing pools with missing top-level vdevs in cache file");
629 /* END CSTYLED */
630
631 extern uint64_t zfs_max_missing_tvds_scan;
632
633 /* BEGIN CSTYLED */
634 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, max_missing_tvds_scan,
635 CTLFLAG_RWTUN, &zfs_max_missing_tvds_scan, 0,
636 "Allow importing pools with missing top-level vdevs during scan");
637 /* END CSTYLED */
638
639 /* spa_misc.c */
640
641 extern int zfs_flags;
642
643 static int
644 sysctl_vfs_zfs_debug_flags(SYSCTL_HANDLER_ARGS)
645 {
646 int err, val;
647
648 val = zfs_flags;
649 err = sysctl_handle_int(oidp, &val, 0, req);
650 if (err != 0 || req->newptr == NULL)
651 return (err);
652
653 /*
654 * ZFS_DEBUG_MODIFY must be enabled prior to boot so all
655 * arc buffers in the system have the necessary additional
656 * checksum data. However, it is safe to disable at any
657 * time.
658 */
659 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
660 val &= ~ZFS_DEBUG_MODIFY;
661 zfs_flags = val;
662
663 return (0);
664 }
665
666 /* BEGIN CSTYLED */
667 SYSCTL_PROC(_vfs_zfs, OID_AUTO, debugflags,
668 CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RWTUN, NULL, 0,
669 sysctl_vfs_zfs_debug_flags, "IU", "Debug flags for ZFS testing.");
670 /* END CSTYLED */
671
672 int
673 param_set_deadman_synctime(SYSCTL_HANDLER_ARGS)
674 {
675 unsigned long val;
676 int err;
677
678 val = zfs_deadman_synctime_ms;
679 err = sysctl_handle_long(oidp, &val, 0, req);
680 if (err != 0 || req->newptr == NULL)
681 return (err);
682 zfs_deadman_synctime_ms = val;
683
684 spa_set_deadman_synctime(MSEC2NSEC(zfs_deadman_synctime_ms));
685
686 return (0);
687 }
688
689 int
690 param_set_deadman_ziotime(SYSCTL_HANDLER_ARGS)
691 {
692 unsigned long val;
693 int err;
694
695 val = zfs_deadman_ziotime_ms;
696 err = sysctl_handle_long(oidp, &val, 0, req);
697 if (err != 0 || req->newptr == NULL)
698 return (err);
699 zfs_deadman_ziotime_ms = val;
700
701 spa_set_deadman_ziotime(MSEC2NSEC(zfs_deadman_synctime_ms));
702
703 return (0);
704 }
705
706 int
707 param_set_deadman_failmode(SYSCTL_HANDLER_ARGS)
708 {
709 char buf[16];
710 int rc;
711
712 if (req->newptr == NULL)
713 strlcpy(buf, zfs_deadman_failmode, sizeof (buf));
714
715 rc = sysctl_handle_string(oidp, buf, sizeof (buf), req);
716 if (rc || req->newptr == NULL)
717 return (rc);
718 if (strcmp(buf, zfs_deadman_failmode) == 0)
719 return (0);
720 if (strcmp(buf, "wait") == 0)
721 zfs_deadman_failmode = "wait";
722 if (strcmp(buf, "continue") == 0)
723 zfs_deadman_failmode = "continue";
724 if (strcmp(buf, "panic") == 0)
725 zfs_deadman_failmode = "panic";
726
727 return (-param_set_deadman_failmode_common(buf));
728 }
729
730 int
731 param_set_slop_shift(SYSCTL_HANDLER_ARGS)
732 {
733 int val;
734 int err;
735
736 val = spa_slop_shift;
737 err = sysctl_handle_int(oidp, &val, 0, req);
738 if (err != 0 || req->newptr == NULL)
739 return (err);
740
741 if (val < 1 || val > 31)
742 return (EINVAL);
743
744 spa_slop_shift = val;
745
746 return (0);
747 }
748
749 /* spacemap.c */
750
751 extern int space_map_ibs;
752
753 /* BEGIN CSTYLED */
754 SYSCTL_INT(_vfs_zfs, OID_AUTO, space_map_ibs, CTLFLAG_RWTUN,
755 &space_map_ibs, 0, "Space map indirect block shift");
756 /* END CSTYLED */
757
758
759 /* vdev.c */
760
761 int
762 param_set_min_auto_ashift(SYSCTL_HANDLER_ARGS)
763 {
764 uint64_t val;
765 int err;
766
767 val = zfs_vdev_min_auto_ashift;
768 err = sysctl_handle_64(oidp, &val, 0, req);
769 if (err != 0 || req->newptr == NULL)
770 return (SET_ERROR(err));
771
772 if (val < ASHIFT_MIN || val > zfs_vdev_max_auto_ashift)
773 return (SET_ERROR(EINVAL));
774
775 zfs_vdev_min_auto_ashift = val;
776
777 return (0);
778 }
779
780 /* BEGIN CSTYLED */
781 SYSCTL_PROC(_vfs_zfs, OID_AUTO, min_auto_ashift,
782 CTLTYPE_U64 | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
783 &zfs_vdev_min_auto_ashift, sizeof (zfs_vdev_min_auto_ashift),
784 param_set_min_auto_ashift, "QU",
785 "Min ashift used when creating new top-level vdev. (LEGACY)");
786 /* END CSTYLED */
787
788 int
789 param_set_max_auto_ashift(SYSCTL_HANDLER_ARGS)
790 {
791 uint64_t val;
792 int err;
793
794 val = zfs_vdev_max_auto_ashift;
795 err = sysctl_handle_64(oidp, &val, 0, req);
796 if (err != 0 || req->newptr == NULL)
797 return (SET_ERROR(err));
798
799 if (val > ASHIFT_MAX || val < zfs_vdev_min_auto_ashift)
800 return (SET_ERROR(EINVAL));
801
802 zfs_vdev_max_auto_ashift = val;
803
804 return (0);
805 }
806
807 /* BEGIN CSTYLED */
808 SYSCTL_PROC(_vfs_zfs, OID_AUTO, max_auto_ashift,
809 CTLTYPE_U64 | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
810 &zfs_vdev_max_auto_ashift, sizeof (zfs_vdev_max_auto_ashift),
811 param_set_max_auto_ashift, "QU",
812 "Max ashift used when optimizing for logical -> physical sector size on"
813 " new top-level vdevs. (LEGACY)");
814 /* END CSTYLED */
815
816 /*
817 * Since the DTL space map of a vdev is not expected to have a lot of
818 * entries, we default its block size to 4K.
819 */
820 extern int zfs_vdev_dtl_sm_blksz;
821
822 /* BEGIN CSTYLED */
823 SYSCTL_INT(_vfs_zfs, OID_AUTO, dtl_sm_blksz,
824 CTLFLAG_RDTUN, &zfs_vdev_dtl_sm_blksz, 0,
825 "Block size for DTL space map. Power of 2 greater than 4096.");
826 /* END CSTYLED */
827
828 /*
829 * vdev-wide space maps that have lots of entries written to them at
830 * the end of each transaction can benefit from a higher I/O bandwidth
831 * (e.g. vdev_obsolete_sm), thus we default their block size to 128K.
832 */
833 extern int zfs_vdev_standard_sm_blksz;
834
835 /* BEGIN CSTYLED */
836 SYSCTL_INT(_vfs_zfs, OID_AUTO, standard_sm_blksz,
837 CTLFLAG_RDTUN, &zfs_vdev_standard_sm_blksz, 0,
838 "Block size for standard space map. Power of 2 greater than 4096.");
839 /* END CSTYLED */
840
841 extern int vdev_validate_skip;
842
843 /* BEGIN CSTYLED */
844 SYSCTL_INT(_vfs_zfs, OID_AUTO, validate_skip,
845 CTLFLAG_RDTUN, &vdev_validate_skip, 0,
846 "Enable to bypass vdev_validate().");
847 /* END CSTYLED */
848
849 /* vdev_cache.c */
850
851 /* vdev_mirror.c */
852
853 /* vdev_queue.c */
854
855 extern uint_t zfs_vdev_max_active;
856
857 /* BEGIN CSTYLED */
858 SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight,
859 CTLFLAG_RWTUN, &zfs_vdev_max_active, 0,
860 "The maximum number of I/Os of all types active for each device."
861 " (LEGACY)");
862 /* END CSTYLED */
863
864 extern uint_t zfs_vdev_def_queue_depth;
865
866 /* BEGIN CSTYLED */
867 SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, def_queue_depth,
868 CTLFLAG_RWTUN, &zfs_vdev_def_queue_depth, 0,
869 "Default queue depth for each allocator");
870 /* END CSTYLED */
871
872 /* zio.c */
873
874 /* BEGIN CSTYLED */
875 SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, exclude_metadata,
876 CTLFLAG_RDTUN, &zio_exclude_metadata, 0,
877 "Exclude metadata buffers from dumps as well");
878 /* END CSTYLED */