]> git.proxmox.com Git - mirror_zfs.git/blob - man/man8/zpool.8
Corrected highlight for zpool man page
[mirror_zfs.git] / man / man8 / zpool.8
1 '\" te
2 .\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
3 .\" Copyright 2011 Nexenta Systems, Inc. All rights reserved.
4 .\" Copyright (c) 2013 by Delphix. All rights reserved.
5 .\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
6 .\" The contents of this file are subject to the terms of the Common Development
7 .\" and Distribution License (the "License"). You may not use this file except
8 .\" in compliance with the License. You can obtain a copy of the license at
9 .\" usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing.
10 .\"
11 .\" See the License for the specific language governing permissions and
12 .\" limitations under the License. When distributing Covered Code, include this
13 .\" CDDL HEADER in each file and include the License file at
14 .\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this
15 .\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
16 .\" own identifying information:
17 .\" Portions Copyright [yyyy] [name of copyright owner]
18 .TH zpool 8 "May 11, 2016" "ZFS pool 28, filesystem 5" "System Administration Commands"
19 .SH NAME
20 zpool \- configures ZFS storage pools
21 .SH SYNOPSIS
22 .LP
23 .nf
24 \fBzpool\fR [\fB-?\fR]
25 .fi
26
27 .LP
28 .nf
29 \fBzpool add\fR [\fB-fgLnP\fR] [\fB-o\fR \fIproperty=value\fR] \fIpool\fR \fIvdev\fR ...
30 .fi
31
32 .LP
33 .nf
34 \fBzpool attach\fR [\fB-f\fR] [\fB-o\fR \fIproperty=value\fR] \fIpool\fR \fIdevice\fR \fInew_device\fR
35 .fi
36
37 .LP
38 .nf
39 \fBzpool clear\fR \fIpool\fR [\fIdevice\fR]
40 .fi
41
42 .LP
43 .nf
44 \fBzpool create\fR [\fB-fnd\fR] [\fB-o\fR \fIproperty=value\fR] ... [\fB-o\fR feature@\fIfeature=value\fR]
45 ... [\fB-O\fR \fIfile-system-property=value\fR] ... [\fB-m\fR \fImountpoint\fR] [\fB-R\fR \fIroot\fR]
46 ... [\fB-t\fR \fItname\fR] \fIpool\fR \fIvdev\fR ...
47 .fi
48
49 .LP
50 .nf
51 \fBzpool destroy\fR [\fB-f\fR] \fIpool\fR
52 .fi
53
54 .LP
55 .nf
56 \fBzpool detach\fR \fIpool\fR \fIdevice\fR
57 .fi
58
59 .LP
60 .nf
61 \fBzpool events\fR [\fB-vHfc\fR] [\fIpool\fR] ...
62 .fi
63
64 .LP
65 .nf
66 \fBzpool export\fR [\fB-a\fR] [\fB-f\fR] \fIpool\fR ...
67 .fi
68
69 .LP
70 .nf
71 \fBzpool get\fR [\fB-Hp\fR] [\fB-o \fR\fIfield\fR[,...]] "\fIall\fR" | \fIproperty\fR[,...] \fIpool\fR ...
72 .fi
73
74 .LP
75 .nf
76 \fBzpool history\fR [\fB-il\fR] [\fIpool\fR] ...
77 .fi
78
79 .LP
80 .nf
81 \fBzpool import\fR [\fB-d\fR \fIdir\fR] [\fB-D\fR]
82 .fi
83
84 .LP
85 .nf
86 \fBzpool import\fR [\fB-o \fImntopts\fR\fR] [\fB-o\fR \fIproperty=value\fR] ... [\fB-d\fR \fIdir\fR | \fB-c\fR \fIcachefile\fR]
87 [\fB-D\fR] [\fB-f\fR] [\fB-m\fR] [\fB-N\fR] [\fB-R\fR \fIroot\fR] [\fB-F\fR [\fB-n\fR] [\fB-X\fR\] [\fB-T\fR\]] [\fB-s\fR] \fB-a\fR
88 .fi
89
90 .LP
91 .nf
92 \fBzpool import\fR [\fB-o \fImntopts\fR\fR] [\fB-o\fR \fIproperty=value\fR] ... [\fB-d\fR \fIdir\fR | \fB-c\fR \fIcachefile\fR]
93 [\fB-D\fR] [\fB-f\fR] [\fB-m\fR] [\fB-R\fR \fIroot\fR] [\fB-F\fR [\fB-n\fR] [\fB-X\fR] [\fB-T\fR\]] [\fB-t\fR]] [\fB-s\fR]
94 \fIpool\fR | \fIid\fR [\fInewpool\fR]
95 .fi
96
97 .LP
98 .nf
99 \fB\fBzpool iostat\fR [\fB-c\fR \fBCMD\fR] [\fB-T\fR \fBd\fR | \fBu\fR] [\fB-ghHLpPvy\fR] [\fB-lq\fR]|[\fB-r\fR|-\fBw\fR]]
100 [[\fIpool\fR ...]|[\fIpool vdev\fR ...]|[\fIvdev\fR ...]] [\fIinterval\fR[\fIcount\fR]]\fR
101
102 .fi
103
104 .LP
105 .nf
106 \fBzpool labelclear\fR [\fB-f\fR] \fIdevice\fR
107 .fi
108
109 .LP
110 .nf
111 \fBzpool list\fR [\fB-T\fR d | u ] [\fB-HgLpPv\fR] [\fB-o\fR \fIproperty\fR[,...]] [\fIpool\fR] ...
112 [\fIinterval\fR[\fIcount\fR]]
113 .fi
114
115 .LP
116 .nf
117 \fBzpool offline\fR [\fB-t\fR] \fIpool\fR \fIdevice\fR ...
118 .fi
119
120 .LP
121 .nf
122 \fBzpool online\fR \fIpool\fR \fIdevice\fR ...
123 .fi
124
125 .LP
126 .nf
127 \fBzpool reguid\fR \fIpool\fR
128 .fi
129
130 .LP
131 .nf
132 \fBzpool reopen\fR \fIpool\fR
133 .fi
134
135 .LP
136 .nf
137 \fBzpool remove\fR \fIpool\fR \fIdevice\fR ...
138 .fi
139
140 .LP
141 .nf
142 \fBzpool replace\fR [\fB-f\fR] [\fB-o\fR \fIproperty=value\fR] \fIpool\fR \fIdevice\fR [\fInew_device\fR]
143 .fi
144
145 .LP
146 .nf
147 \fBzpool scrub\fR [\fB-s\fR] \fIpool\fR ...
148 .fi
149
150 .LP
151 .nf
152 \fBzpool set\fR \fIproperty\fR=\fIvalue\fR \fIpool\fR
153 .fi
154
155 .LP
156 .nf
157 \fBzpool split\fR [\fB-gLnP\fR] [\fB-R\fR \fIaltroot\fR] [\fB-o\fR \fIproperty=value\fR] \fIpool\fR \fInewpool\fR [\fIdevice\fR ...]
158 .fi
159
160 .LP
161 .nf
162 \fBzpool status\fR [\fB-c\fR \fBCMD\fR] [\fB-gLPvxD\fR] [\fB-T\fR d | u] [\fIpool\fR] ... [\fIinterval\fR [\fIcount\fR]]
163 .fi
164
165 .LP
166 .nf
167 \fBzpool upgrade\fR
168 .fi
169
170 .LP
171 .nf
172 \fBzpool upgrade\fR \fB-v\fR
173 .fi
174
175 .LP
176 .nf
177 \fBzpool upgrade\fR [\fB-V\fR \fIversion\fR] \fB-a\fR | \fIpool\fR ...
178 .fi
179
180 .SH DESCRIPTION
181 .sp
182 .LP
183 The \fBzpool\fR command configures \fBZFS\fR storage pools. A storage pool is a collection of devices that provides physical storage and data replication for \fBZFS\fR datasets.
184 .sp
185 .LP
186 All datasets within a storage pool share the same space. See \fBzfs\fR(8) for information on managing datasets.
187 .SS "Virtual Devices (vdevs)"
188 .sp
189 .LP
190 A "virtual device" describes a single device or a collection of devices organized according to certain performance and fault characteristics. The following virtual devices are supported:
191 .sp
192 .ne 2
193 .na
194 \fB\fBdisk\fR\fR
195 .ad
196 .RS 10n
197 A block device, typically located under \fB/dev\fR. \fBZFS\fR can use individual partitions, though the recommended mode of operation is to use whole disks. A disk can be specified by a full path, or it can be a shorthand name (the relative portion of the path under "/dev"). For example, "sda" is equivalent to "/dev/sda". A whole disk can be specified by omitting the partition designation. When given a whole disk, \fBZFS\fR automatically labels the disk, if necessary.
198 .RE
199
200 .sp
201 .ne 2
202 .na
203 \fB\fBfile\fR\fR
204 .ad
205 .RS 10n
206 A regular file. The use of files as a backing store is strongly discouraged. It is designed primarily for experimental purposes, as the fault tolerance of a file is only as good as the file system of which it is a part. A file must be specified by a full path.
207 .RE
208
209 .sp
210 .ne 2
211 .na
212 \fB\fBmirror\fR\fR
213 .ad
214 .RS 10n
215 A mirror of two or more devices. Data is replicated in an identical fashion across all components of a mirror. A mirror with \fIN\fR disks of size \fIX\fR can hold \fIX\fR bytes and can withstand (\fIN-1\fR) devices failing before data integrity is compromised.
216 .RE
217
218 .sp
219 .ne 2
220 .na
221 \fB\fBraidz\fR\fR
222 .ad
223 .br
224 .na
225 \fB\fBraidz1\fR\fR
226 .ad
227 .br
228 .na
229 \fB\fBraidz2\fR\fR
230 .ad
231 .br
232 .na
233 \fB\fBraidz3\fR\fR
234 .ad
235 .RS 10n
236 A variation on \fBRAID-5\fR that allows for better distribution of parity and eliminates the "\fBRAID-5\fR write hole" (in which data and parity become inconsistent after a power loss). Data and parity is striped across all disks within a \fBraidz\fR group.
237 .sp
238 A \fBraidz\fR group can have single-, double- , or triple parity, meaning that the \fBraidz\fR group can sustain one, two, or three failures, respectively, without losing any data. The \fBraidz1\fR \fBvdev\fR type specifies a single-parity \fBraidz\fR group; the \fBraidz2\fR \fBvdev\fR type specifies a double-parity \fBraidz\fR group; and the \fBraidz3\fR \fBvdev\fR type specifies a triple-parity \fBraidz\fR group. The \fBraidz\fR \fBvdev\fR type is an alias for \fBraidz1\fR.
239 .sp
240 A \fBraidz\fR group with \fIN\fR disks of size \fIX\fR with \fIP\fR parity disks can hold approximately (\fIN-P\fR)*\fIX\fR bytes and can withstand \fIP\fR device(s) failing before data integrity is compromised. The minimum number of devices in a \fBraidz\fR group is one more than the number of parity disks. The recommended number is between 3 and 9 to help increase performance.
241 .RE
242
243 .sp
244 .ne 2
245 .na
246 \fB\fBspare\fR\fR
247 .ad
248 .RS 10n
249 A special pseudo-\fBvdev\fR which keeps track of available hot spares for a pool. For more information, see the "Hot Spares" section.
250 .RE
251
252 .sp
253 .ne 2
254 .na
255 \fB\fBlog\fR\fR
256 .ad
257 .RS 10n
258 A separate-intent log device. If more than one log device is specified, then writes are load-balanced between devices. Log devices can be mirrored. However, \fBraidz\fR \fBvdev\fR types are not supported for the intent log. For more information, see the "Intent Log" section.
259 .RE
260
261 .sp
262 .ne 2
263 .na
264 \fB\fBcache\fR\fR
265 .ad
266 .RS 10n
267 A device used to cache storage pool data. A cache device cannot be configured as a mirror or \fBraidz\fR group. For more information, see the "Cache Devices" section.
268 .RE
269
270 .sp
271 .LP
272 Virtual devices cannot be nested, so a mirror or \fBraidz\fR virtual device can only contain files or disks. Mirrors of mirrors (or other combinations) are not allowed.
273 .sp
274 .LP
275 A pool can have any number of virtual devices at the top of the configuration (known as "root vdevs"). Data is dynamically distributed across all top-level devices to balance data among devices. As new virtual devices are added, \fBZFS\fR automatically places data on the newly available devices.
276 .sp
277 .LP
278 Virtual devices are specified one at a time on the command line, separated by whitespace. The keywords "mirror" and "raidz" are used to distinguish where a group ends and another begins. For example, the following creates two root vdevs, each a mirror of two disks:
279 .sp
280 .in +2
281 .nf
282 # \fBzpool create mypool mirror sda sdb mirror sdc sdd\fR
283 .fi
284 .in -2
285 .sp
286
287 .SS "Device Failure and Recovery"
288 .sp
289 .LP
290 \fBZFS\fR supports a rich set of mechanisms for handling device failure and data corruption. All metadata and data is checksummed, and \fBZFS\fR automatically repairs bad data from a good copy when corruption is detected.
291 .sp
292 .LP
293 In order to take advantage of these features, a pool must make use of some form of redundancy, using either mirrored or \fBraidz\fR groups. While \fBZFS\fR supports running in a non-redundant configuration, where each root vdev is simply a disk or file, this is strongly discouraged. A single case of bit corruption can render some or all of your data unavailable.
294 .sp
295 .LP
296 A pool's health status is described by one of three states: online, degraded, or faulted. An online pool has all devices operating normally. A degraded pool is one in which one or more devices have failed, but the data is still available due to a redundant configuration. A faulted pool has corrupted metadata, or one or more faulted devices, and insufficient replicas to continue functioning.
297 .sp
298 .LP
299 The health of the top-level vdev, such as mirror or \fBraidz\fR device, is potentially impacted by the state of its associated vdevs, or component devices. A top-level vdev or component device is in one of the following states:
300 .sp
301 .ne 2
302 .na
303 \fB\fBDEGRADED\fR\fR
304 .ad
305 .RS 12n
306 One or more top-level vdevs is in the degraded state because one or more component devices are offline. Sufficient replicas exist to continue functioning.
307 .sp
308 One or more component devices is in the degraded or faulted state, but sufficient replicas exist to continue functioning. The underlying conditions are as follows:
309 .RS +4
310 .TP
311 .ie t \(bu
312 .el o
313 The number of checksum errors exceeds acceptable levels and the device is degraded as an indication that something may be wrong. \fBZFS\fR continues to use the device as necessary.
314 .RE
315 .RS +4
316 .TP
317 .ie t \(bu
318 .el o
319 The number of I/O errors exceeds acceptable levels. The device could not be marked as faulted because there are insufficient replicas to continue functioning.
320 .RE
321 .RE
322
323 .sp
324 .ne 2
325 .na
326 \fB\fBFAULTED\fR\fR
327 .ad
328 .RS 12n
329 One or more top-level vdevs is in the faulted state because one or more component devices are offline. Insufficient replicas exist to continue functioning.
330 .sp
331 One or more component devices is in the faulted state, and insufficient replicas exist to continue functioning. The underlying conditions are as follows:
332 .RS +4
333 .TP
334 .ie t \(bu
335 .el o
336 The device could be opened, but the contents did not match expected values.
337 .RE
338 .RS +4
339 .TP
340 .ie t \(bu
341 .el o
342 The number of I/O errors exceeds acceptable levels and the device is faulted to prevent further use of the device.
343 .RE
344 .RE
345
346 .sp
347 .ne 2
348 .na
349 \fB\fBOFFLINE\fR\fR
350 .ad
351 .RS 12n
352 The device was explicitly taken offline by the "\fBzpool offline\fR" command.
353 .RE
354
355 .sp
356 .ne 2
357 .na
358 \fB\fBONLINE\fR\fR
359 .ad
360 .RS 12n
361 The device is online and functioning.
362 .RE
363
364 .sp
365 .ne 2
366 .na
367 \fB\fBREMOVED\fR\fR
368 .ad
369 .RS 12n
370 The device was physically removed while the system was running. Device removal detection is hardware-dependent and may not be supported on all platforms.
371 .RE
372
373 .sp
374 .ne 2
375 .na
376 \fB\fBUNAVAIL\fR\fR
377 .ad
378 .RS 12n
379 The device could not be opened. If a pool is imported when a device was unavailable, then the device will be identified by a unique identifier instead of its path since the path was never correct in the first place.
380 .RE
381
382 .sp
383 .LP
384 If a device is removed and later re-attached to the system, \fBZFS\fR attempts to put the device online automatically. Device attach detection is hardware-dependent and might not be supported on all platforms.
385 .SS "Hot Spares"
386 .sp
387 .LP
388 \fBZFS\fR allows devices to be associated with pools as "hot spares". These devices are not actively used in the pool, but when an active device fails, it is automatically replaced by a hot spare. To create a pool with hot spares, specify a "spare" \fBvdev\fR with any number of devices. For example,
389 .sp
390 .in +2
391 .nf
392 # zpool create pool mirror sda sdb spare sdc sdd
393 .fi
394 .in -2
395 .sp
396
397 .sp
398 .LP
399 Spares can be shared across multiple pools, and can be added with the "\fBzpool add\fR" command and removed with the "\fBzpool remove\fR" command. Once a spare replacement is initiated, a new "spare" \fBvdev\fR is created within the configuration that will remain there until the original device is replaced. At this point, the hot spare becomes available again.
400 .sp
401 .LP
402 If a pool has a shared spare that is currently being used, the pool can not be exported since other pools may use this shared spare, which may lead to potential data corruption.
403 .sp
404 .LP
405 An in-progress spare replacement can be cancelled by detaching the hot spare. If the original faulted device is detached, then the hot spare assumes its place in the configuration, and is removed from the spare list of all active pools.
406 .sp
407 .LP
408 Spares cannot replace log devices.
409 .SS "Intent Log"
410 .sp
411 .LP
412 The \fBZFS\fR Intent Log (\fBZIL\fR) satisfies \fBPOSIX\fR requirements for synchronous transactions. For instance, databases often require their transactions to be on stable storage devices when returning from a system call. \fBNFS\fR and other applications can also use \fBfsync\fR() to ensure data stability. By default, the intent log is allocated from blocks within the main pool. However, it might be possible to get better performance using separate intent log devices such as \fBNVRAM\fR or a dedicated disk. For example:
413 .sp
414 .in +2
415 .nf
416 \fB# zpool create pool sda sdb log sdc\fR
417 .fi
418 .in -2
419 .sp
420
421 .sp
422 .LP
423 Multiple log devices can also be specified, and they can be mirrored. See the EXAMPLES section for an example of mirroring multiple log devices.
424 .sp
425 .LP
426 Log devices can be added, replaced, attached, detached, and imported and exported as part of the larger pool. Mirrored log devices can be removed by specifying the top-level mirror for the log.
427 .SS "Cache Devices"
428 .sp
429 .LP
430 Devices can be added to a storage pool as "cache devices." These devices provide an additional layer of caching between main memory and disk. For read-heavy workloads, where the working set size is much larger than what can be cached in main memory, using cache devices allow much more of this working set to be served from low latency media. Using cache devices provides the greatest performance improvement for random read-workloads of mostly static content.
431 .sp
432 .LP
433 To create a pool with cache devices, specify a "cache" \fBvdev\fR with any number of devices. For example:
434 .sp
435 .in +2
436 .nf
437 \fB# zpool create pool sda sdb cache sdc sdd\fR
438 .fi
439 .in -2
440 .sp
441
442 .sp
443 .LP
444 Cache devices cannot be mirrored or part of a \fBraidz\fR configuration. If a read error is encountered on a cache device, that read \fBI/O\fR is reissued to the original storage pool device, which might be part of a mirrored or \fBraidz\fR configuration.
445 .sp
446 .LP
447 The content of the cache devices is considered volatile, as is the case with other system caches.
448 .SS "Properties"
449 .sp
450 .LP
451 Each pool has several properties associated with it. Some properties are read-only statistics while others are configurable and change the behavior of the pool. The following are read-only properties:
452 .sp
453 .ne 2
454 .na
455 \fB\fBavailable\fR\fR
456 .ad
457 .RS 20n
458 Amount of storage available within the pool. This property can also be referred to by its shortened column name, "avail".
459 .RE
460
461 .sp
462 .ne 2
463 .na
464 \fB\fBcapacity\fR\fR
465 .ad
466 .RS 20n
467 Percentage of pool space used. This property can also be referred to by its shortened column name, "cap".
468 .RE
469
470 .sp
471 .ne 2
472 .na
473 \fB\fBexpandsize\fR\fR
474 .ad
475 .RS 20n
476 Amount of uninitialized space within the pool or device that can be used to
477 increase the total capacity of the pool. Uninitialized space consists of
478 any space on an EFI labeled vdev which has not been brought online
479 (i.e. zpool online -e). This space occurs when a LUN is dynamically expanded.
480 .RE
481
482 .sp
483 .ne 2
484 .na
485 \fB\fBfragmentation\fR\fR
486 .ad
487 .RS 20n
488 The amount of fragmentation in the pool.
489 .RE
490
491 .sp
492 .ne 2
493 .na
494 \fB\fBfree\fR\fR
495 .ad
496 .RS 20n
497 The amount of free space available in the pool.
498 .RE
499
500 .sp
501 .ne 2
502 .na
503 \fB\fBfreeing\fR\fR
504 .ad
505 .RS 20n
506 After a file system or snapshot is destroyed, the space it was using is
507 returned to the pool asynchronously. \fB\fBfreeing\fR\fR is the amount of
508 space remaining to be reclaimed. Over time \fB\fBfreeing\fR\fR will decrease
509 while \fB\fBfree\fR\fR increases.
510 .RE
511
512 .sp
513 .ne 2
514 .na
515 \fB\fBhealth\fR\fR
516 .ad
517 .RS 20n
518 The current health of the pool. Health can be "\fBONLINE\fR", "\fBDEGRADED\fR", "\fBFAULTED\fR", " \fBOFFLINE\fR", "\fBREMOVED\fR", or "\fBUNAVAIL\fR".
519 .RE
520
521 .sp
522 .ne 2
523 .na
524 \fB\fBguid\fR\fR
525 .ad
526 .RS 20n
527 A unique identifier for the pool.
528 .RE
529
530 .sp
531 .ne 2
532 .na
533 \fB\fBsize\fR\fR
534 .ad
535 .RS 20n
536 Total size of the storage pool.
537 .RE
538
539 .sp
540 .ne 2
541 .na
542 \fB\fBunsupported@\fR\fIfeature_guid\fR\fR
543 .ad
544 .RS 20n
545 .sp
546 Information about unsupported features that are enabled on the pool. See
547 \fBzpool-features\fR(5) for details.
548 .RE
549
550 .sp
551 .ne 2
552 .na
553 \fB\fBused\fR\fR
554 .ad
555 .RS 20n
556 Amount of storage space used within the pool.
557 .RE
558
559 .sp
560 .LP
561 The space usage properties report actual physical space available to the storage pool. The physical space can be different from the total amount of space that any contained datasets can actually use. The amount of space used in a \fBraidz\fR configuration depends on the characteristics of the data being written. In addition, \fBZFS\fR reserves some space for internal accounting that the \fBzfs\fR(8) command takes into account, but the \fBzpool\fR command does not. For non-full pools of a reasonable size, these effects should be invisible. For small pools, or pools that are close to being completely full, these discrepancies may become more noticeable.
562
563 .sp
564 .LP
565 The following property can be set at creation time:
566 .sp
567 .ne 2
568 .na
569 \fB\fBashift\fR=\fIashift\fR\fR
570 .ad
571 .sp .6
572 .RS 4n
573 Pool sector size exponent, to the power of 2 (internally referred to as "ashift"). Values from 9 to 13, inclusive, are valid; also, the special value 0 (the default) means to auto-detect using the kernel's block layer and a ZFS internal exception list. I/O operations will be aligned to the specified size boundaries. Additionally, the minimum (disk) write size will be set to the specified size, so this represents a space vs. performance trade-off. The typical case for setting this property is when performance is important and the underlying disks use 4KiB sectors but report 512B sectors to the OS (for compatibility reasons); in that case, set \fBashift=12\fR (which is 1<<12 = 4096).
574 .LP
575 For optimal performance, the pool sector size should be greater than or equal to the sector size of the underlying disks. Since the property cannot be changed after pool creation, if in a given pool, you \fIever\fR want to use drives that \fIreport\fR 4KiB sectors, you must set \fBashift=12\fR at pool creation time.
576 .LP
577 Keep in mind is that the \fBashift\fR is \fIvdev\fR specific and is not a \fIpool\fR global. This means that when adding new vdevs to an existing pool you may need to specify the \fBashift\fR.
578 .RE
579
580 .sp
581 .LP
582 The following property can be set at creation time and import time:
583 .sp
584 .ne 2
585 .na
586 \fB\fBaltroot\fR=(unset) | \fIpath\fR\fR
587 .ad
588 .sp .6
589 .RS 4n
590 Alternate root directory. If set, this directory is prepended to any mount points within the pool. This can be used when examining an unknown pool where the mount points cannot be trusted, or in an alternate boot environment, where the typical paths are not valid. \fBaltroot\fR is not a persistent property. It is valid only while the system is up. Setting \fBaltroot\fR defaults to using \fBcachefile\fR=none, though this may be overridden using an explicit setting.
591 .RE
592
593 .sp
594 .LP
595 The following property can only be set at import time:
596 .sp
597 .ne 2
598 .na
599 \fB\fBreadonly\fR=\fBoff\fR | \fBon\fR\fR
600 .ad
601 .sp .6
602 .RS 4n
603 If set to \fBon\fR, the pool will be imported in read-only mode: Synchronous data in the intent log will not be accessible, properties of the pool can not be changed and datasets of the pool can only be mounted read-only. The \fBreadonly\fR property of its datasets will be implicitly set to \fBon\fR.
604
605 It can also be specified by its column name of \fBrdonly\fR.
606
607 To write to a read-only pool, a export and import of the pool is required.
608 .RE
609
610 .sp
611 .LP
612 The following properties can be set at creation time and import time, and later changed with the \fBzpool set\fR command:
613 .sp
614 .ne 2
615 .na
616 \fB\fBautoexpand\fR=\fBoff\fR | \fBon\fR\fR
617 .ad
618 .sp .6
619 .RS 4n
620 Controls automatic pool expansion when the underlying LUN is grown. If set to \fBon\fR, the pool will be resized according to the size of the expanded device. If the device is part of a mirror or \fBraidz\fR then all devices within that mirror/\fBraidz\fR group must be expanded before the new space is made available to the pool. The default behavior is \fBoff\fR. This property can also be referred to by its shortened column name, \fBexpand\fR.
621 .RE
622
623 .sp
624 .ne 2
625 .na
626 \fB\fBautoreplace\fR=\fBoff\fR | \fBon\fR\fR
627 .ad
628 .sp .6
629 .RS 4n
630 Controls automatic device replacement. If set to "\fBoff\fR", device replacement must be initiated by the administrator by using the "\fBzpool replace\fR" command. If set to "\fBon\fR", any new device, found in the same physical location as a device that previously belonged to the pool, is automatically formatted and replaced. The default behavior is "\fBoff\fR". This property can also be referred to by its shortened column name, "replace". Autoreplace can also be used with virtual disks (like device mapper) provided that you use the /dev/disk/by-vdev paths setup by vdev_id.conf. See the vdev_id.conf man page for more details. Autoreplace and autoonline require libudev to be present at build time. If you're using device mapper disks, you must have libdevmapper installed at build time as well.
631 .RE
632
633 .sp
634 .ne 2
635 .na
636 \fB\fBbootfs\fR=(unset) | \fIpool\fR/\fIdataset\fR\fR
637 .ad
638 .sp .6
639 .RS 4n
640 Identifies the default bootable dataset for the root pool. This property is expected to be set mainly by the installation and upgrade programs. Not all Linux distribution boot processes use the \fBbootfs\fR property.
641 .RE
642
643 .sp
644 .ne 2
645 .na
646 \fB\fBcachefile\fR=\fBnone\fR | \fIpath\fR\fR
647 .ad
648 .sp .6
649 .RS 4n
650 Controls the location of where the pool configuration is cached. Discovering all pools on system startup requires a cached copy of the configuration data that is stored on the root file system. All pools in this cache are automatically imported when the system boots. Some environments, such as install and clustering, need to cache this information in a different location so that pools are not automatically imported. Setting this property caches the pool configuration in a different location that can later be imported with "\fBzpool import -c\fR". Setting it to the special value "\fBnone\fR" creates a temporary pool that is never cached, and the special value \fB\&''\fR (empty string) uses the default location.
651 .sp
652 Multiple pools can share the same cache file. Because the kernel destroys and recreates this file when pools are added and removed, care should be taken when attempting to access this file. When the last pool using a \fBcachefile\fR is exported or destroyed, the file is removed.
653 .RE
654
655 .sp
656 .ne 2
657 .na
658 \fB\fBcomment\fR=(unset) | \fB\fItext\fR\fR
659 .ad
660 .sp .6
661 .RS 4n
662 A text string consisting of printable ASCII characters that will be stored such that it is available even if the pool becomes faulted. An administrator can provide additional information about a pool using this property.
663 .RE
664
665 .sp
666 .ne 2
667 .na
668 \fB\fBdedupditto\fR=\fB\fInumber\fR\fR
669 .ad
670 .sp .6
671 .RS 4n
672 Threshold for the number of block ditto copies. If the reference count for a deduplicated block increases above this number, a new ditto copy of this block is automatically stored. The default setting is 0 which causes no ditto copies to be created for deduplicated blocks. The minimum valid nonzero setting is 100.
673 .RE
674
675 .sp
676 .ne 2
677 .na
678 \fB\fBdelegation\fR=\fBon\fR | \fBoff\fR\fR
679 .ad
680 .sp .6
681 .RS 4n
682 Controls whether a non-privileged user is granted access based on the dataset permissions defined on the dataset. See \fBzfs\fR(8) for more information on \fBZFS\fR delegated administration.
683 .RE
684
685 .sp
686 .ne 2
687 .na
688 \fB\fBfailmode\fR=\fBwait\fR | \fBcontinue\fR | \fBpanic\fR\fR
689 .ad
690 .sp .6
691 .RS 4n
692 Controls the system behavior in the event of catastrophic pool failure. This condition is typically a result of a loss of connectivity to the underlying storage device(s) or a failure of all devices within the pool. The behavior of such an event is determined as follows:
693 .sp
694 .ne 2
695 .na
696 \fB\fBwait\fR\fR
697 .ad
698 .RS 12n
699 Blocks all \fBI/O\fR access until the device connectivity is recovered and the errors are cleared. This is the default behavior.
700 .RE
701
702 .sp
703 .ne 2
704 .na
705 \fB\fBcontinue\fR\fR
706 .ad
707 .RS 12n
708 Returns \fBEIO\fR to any new write \fBI/O\fR requests but allows reads to any of the remaining healthy devices. Any write requests that have yet to be committed to disk would be blocked.
709 .RE
710
711 .sp
712 .ne 2
713 .na
714 \fB\fBpanic\fR\fR
715 .ad
716 .RS 12n
717 Prints out a message to the console and generates a system crash dump.
718 .RE
719
720 .RE
721
722 .sp
723 .ne 2
724 .na
725 \fB\fBfeature@\fR\fIfeature_name\fR=\fBenabled\fR\fR
726 .ad
727 .RS 4n
728 The value of this property is the current state of \fIfeature_name\fR. The
729 only valid value when setting this property is \fBenabled\fR which moves
730 \fIfeature_name\fR to the enabled state. See \fBzpool-features\fR(5) for
731 details on feature states.
732 .RE
733
734 .sp
735 .ne 2
736 .na
737 \fB\fBlistsnapshots\fR=on | off\fR
738 .ad
739 .sp .6
740 .RS 4n
741 Controls whether information about snapshots associated with this pool is output when "\fBzfs list\fR" is run without the \fB-t\fR option. The default value is "off".
742 .sp
743 This property can also be referred to by its shortened name, \fBlistsnaps\fR.
744 .RE
745
746 .sp
747 .ne 2
748 .na
749 \fB\fBversion\fR=(unset) | \fIversion\fR\fR
750 .ad
751 .sp .6
752 .RS 4n
753 The current on-disk version of the pool. This can be increased, but never decreased. The preferred method of updating pools is with the "\fBzpool upgrade\fR" command, though this property can be used when a specific version is needed for backwards compatibility. Once feature flags are enabled on a pool this property will no longer have a value.
754 .RE
755
756 .SS "Subcommands"
757 .sp
758 .LP
759 All subcommands that modify state are logged persistently to the pool in their original form.
760 .sp
761 .LP
762 The \fBzpool\fR command provides subcommands to create and destroy storage pools, add capacity to storage pools, and provide information about the storage pools. The following subcommands are supported:
763 .sp
764 .ne 2
765 .na
766 \fB\fBzpool\fR \fB-?\fR\fR
767 .ad
768 .sp .6
769 .RS 4n
770 Displays a help message.
771 .RE
772
773 .sp
774 .ne 2
775 .na
776 \fB\fBzpool add\fR [\fB-fgLnP\fR] [\fB-o\fR \fIproperty=value\fR] \fIpool\fR \fIvdev\fR ...\fR
777 .ad
778 .sp .6
779 .RS 4n
780 Adds the specified virtual devices to the given pool. The \fIvdev\fR specification is described in the "Virtual Devices" section. The behavior of the \fB-f\fR option, and the device checks performed are described in the "zpool create" subcommand.
781 .sp
782 .ne 2
783 .na
784 \fB\fB-f\fR\fR
785 .ad
786 .RS 6n
787 Forces use of \fBvdev\fRs, even if they appear in use or specify a conflicting replication level. Not all devices can be overridden in this manner.
788 .RE
789
790 .sp
791 .ne 2
792 .na
793 \fB\fB-g\fR\fR
794 .ad
795 .RS 6n
796 Display vdev GUIDs instead of the normal device names. These GUIDs can be used in place of device names for the zpool detach/offline/remove/replace commands.
797 .RE
798
799 .sp
800 .ne 2
801 .na
802 \fB\fB-L\fR\fR
803 .ad
804 .RS 6n
805 Display real paths for vdevs resolving all symbolic links. This can be used to look up the current block device name regardless of the /dev/disk/ path used to open it.
806 .RE
807
808 .sp
809 .ne 2
810 .na
811 \fB\fB-n\fR\fR
812 .ad
813 .RS 6n
814 Displays the configuration that would be used without actually adding the \fBvdev\fRs. The actual pool creation can still fail due to insufficient privileges or device sharing.
815 .RE
816
817 .sp
818 .ne 2
819 .na
820 \fB\fB-P\fR\fR
821 .ad
822 .RS 6n
823 Display full paths for vdevs instead of only the last component of the path. This can be used in conjunction with the \fB-L\fR flag.
824 .RE
825
826 .sp
827 .ne 2
828 .na
829 \fB\fB-o\fR \fIproperty=value\fR
830 .ad
831 .sp .6
832 .RS 4n
833 Sets the given pool properties. See the "Properties" section for a list of valid properties that can be set. The only property supported at the moment is \fBashift\fR. \fBDo note\fR that some properties (among them \fBashift\fR) are \fInot\fR inherited from a previous vdev. They are vdev specific, not pool specific.
834 .RE
835
836 Do not add a disk that is currently configured as a quorum device to a zpool. After a disk is in the pool, that disk can then be configured as a quorum device.
837 .RE
838
839 .sp
840 .ne 2
841 .na
842 \fB\fBzpool attach\fR [\fB-f\fR] [\fB-o\fR \fIproperty=value\fR] \fIpool\fR \fIdevice\fR \fInew_device\fR\fR
843 .ad
844 .sp .6
845 .RS 4n
846 Attaches \fInew_device\fR to an existing \fBzpool\fR device. The existing device cannot be part of a \fBraidz\fR configuration. If \fIdevice\fR is not currently part of a mirrored configuration, \fIdevice\fR automatically transforms into a two-way mirror of \fIdevice\fR and \fInew_device\fR. If \fIdevice\fR is part of a two-way mirror, attaching \fInew_device\fR creates a three-way mirror, and so on. In either case, \fInew_device\fR begins to resilver immediately.
847 .sp
848 .ne 2
849 .na
850 \fB\fB-f\fR\fR
851 .ad
852 .RS 6n
853 Forces use of \fInew_device\fR, even if its appears to be in use. Not all devices can be overridden in this manner.
854 .RE
855
856 .sp
857 .ne 2
858 .na
859 \fB\fB-o\fR \fIproperty=value\fR
860 .ad
861 .sp .6
862 .RS 4n
863 Sets the given pool properties. See the "Properties" section for a list of valid properties that can be set. The only property supported at the moment is "ashift".
864 .RE
865
866 .RE
867
868 .sp
869 .ne 2
870 .na
871 \fB\fBzpool clear\fR \fIpool\fR [\fIdevice\fR] ...\fR
872 .ad
873 .sp .6
874 .RS 4n
875 Clears device errors in a pool. If no arguments are specified, all device errors within the pool are cleared. If one or more devices is specified, only those errors associated with the specified device or devices are cleared.
876 .RE
877
878 .sp
879 .ne 2
880 .na
881 \fB\fBzpool create\fR [\fB-fnd\fR] [\fB-o\fR \fIproperty=value\fR] ... [\fB-o\fR feature@\fIfeature=value\fR] ... [\fB-O\fR \fIfile-system-property=value\fR] ... [\fB-m\fR \fImountpoint\fR] [\fB-R\fR \fIroot\fR] [\fB-t\fR \fItname\fR] \fIpool\fR \fIvdev\fR ...\fR
882 .ad
883 .sp .6
884 .RS 4n
885 Creates a new storage pool containing the virtual devices specified on the command line. The pool name must begin with a letter, and can only contain alphanumeric characters as well as underscore ("_"), dash ("-"), period ("."), colon (":"), and space (" "). The pool names "mirror", "raidz", "spare" and "log" are reserved, as are names beginning with the pattern "c[0-9]". The \fBvdev\fR specification is described in the "Virtual Devices" section.
886 .sp
887 The command verifies that each device specified is accessible and not currently in use by another subsystem. There are some uses, such as being currently mounted, or specified as the dedicated dump device, that prevents a device from ever being used by \fBZFS\fR. Other uses, such as having a preexisting \fBUFS\fR file system, can be overridden with the \fB-f\fR option.
888 .sp
889 The command also checks that the replication strategy for the pool is consistent. An attempt to combine redundant and non-redundant storage in a single pool, or to mix disks and files, results in an error unless \fB-f\fR is specified. The use of differently sized devices within a single \fBraidz\fR or mirror group is also flagged as an error unless \fB-f\fR is specified.
890 .sp
891 Unless the \fB-R\fR option is specified, the default mount point is "/\fIpool\fR". The mount point must not exist or must be empty, or else the root dataset cannot be mounted. This can be overridden with the \fB-m\fR option.
892 .sp
893 By default all supported features are enabled on the new pool unless the \fB-d\fR option is specified.
894 .sp
895 .ne 2
896 .na
897 \fB\fB-f\fR\fR
898 .ad
899 .sp .6
900 .RS 4n
901 Forces use of \fBvdev\fRs, even if they appear in use or specify a conflicting replication level. Not all devices can be overridden in this manner.
902 .RE
903
904 .sp
905 .ne 2
906 .na
907 \fB\fB-n\fR\fR
908 .ad
909 .sp .6
910 .RS 4n
911 Displays the configuration that would be used without actually creating the pool. The actual pool creation can still fail due to insufficient privileges or device sharing.
912 .RE
913
914 .sp
915 .ne 2
916 .na
917 \fB\fB-d\fR\fR
918 .ad
919 .sp .6
920 .RS 4n
921 Do not enable any features on the new pool. Individual features can be enabled by setting their corresponding properties to \fBenabled\fR with the \fB-o\fR option. See \fBzpool-features\fR(5) for details about feature properties.
922 .RE
923
924 .sp
925 .ne 2
926 .na
927 \fB\fB-o\fR \fIproperty=value\fR [\fB-o\fR \fIproperty=value\fR] ...\fR
928 .ad
929 .sp .6
930 .RS 4n
931 Sets the given pool properties. See the "Properties" section for a list of valid properties that can be set.
932 .RE
933
934 .sp
935 .ne 2
936 .na
937 \fB\fB-o\fR feature@\fIfeature=value\fR [\fB-o\fR feature@\fIfeature=value\fR] ...\fR
938 .ad
939 .sp .6
940 .RS 4n
941 Sets the given pool feature. See \fBzpool-features(5)\fR for a list of valid features that can be set.
942 .sp
943 Value can be either \fBdisabled\fR or \fBenabled\fR.
944 .RE
945
946 .sp
947 .ne 2
948 .na
949 \fB\fB-O\fR \fIfile-system-property=value\fR\fR
950 .ad
951 .br
952 .na
953 \fB[\fB-O\fR \fIfile-system-property=value\fR] ...\fR
954 .ad
955 .sp .6
956 .RS 4n
957 Sets the given file system properties in the root file system of the pool. See the "Properties" section of \fBzfs\fR(8) for a list of valid properties that can be set.
958 .RE
959
960 .sp
961 .ne 2
962 .na
963 \fB\fB-R\fR \fIroot\fR\fR
964 .ad
965 .sp .6
966 .RS 4n
967 Equivalent to "-o cachefile=none,altroot=\fIroot\fR"
968 .RE
969
970 .sp
971 .ne 2
972 .na
973 \fB\fB-m\fR \fImountpoint\fR\fR
974 .ad
975 .sp .6
976 .RS 4n
977 Sets the mount point for the root dataset. The default mount point is "/\fIpool\fR" or "\fBaltroot\fR/\fIpool\fR" if \fBaltroot\fR is specified. The mount point must be an absolute path, "\fBlegacy\fR", or "\fBnone\fR". For more information on dataset mount points, see \fBzfs\fR(8).
978 .RE
979
980 .sp
981 .ne 2
982 .na
983 \fB\fB-t\fR \fItname\fR\fR
984 .ad
985 .sp .6
986 .RS 4n
987 Sets the in-core pool name to "\fBtname\fR" while the on-disk name will be the name specified as the pool name "\fBpool\fR". This will set the default cachefile property to none. This is intended to handle name space collisions when creating pools for other systems, such as virtual machines or physical machines whose pools live on network block devices.
988 .RE
989
990 .RE
991
992 .sp
993 .ne 2
994 .na
995 \fB\fBzpool destroy\fR [\fB-f\fR] \fIpool\fR\fR
996 .ad
997 .sp .6
998 .RS 4n
999 Destroys the given pool, freeing up any devices for other use. This command tries to unmount any active datasets before destroying the pool.
1000 .sp
1001 .ne 2
1002 .na
1003 \fB\fB-f\fR\fR
1004 .ad
1005 .RS 6n
1006 Forces any active datasets contained within the pool to be unmounted.
1007 .RE
1008
1009 .RE
1010
1011 .sp
1012 .ne 2
1013 .na
1014 \fB\fBzpool detach\fR \fIpool\fR \fIdevice\fR\fR
1015 .ad
1016 .sp .6
1017 .RS 4n
1018 Detaches \fIdevice\fR from a mirror. The operation is refused if there are no other valid replicas of the data. If \fIdevice\fR may be re-added to the pool later on then consider the "\fBzpool offline\fR" command instead.
1019 .RE
1020
1021 .RE
1022
1023 .sp
1024 .ne 2
1025 .na
1026 \fBzpool events\fR [\fB-vHfc\fR] [\fIpool\fR] ...
1027 .ad
1028 .sp .6
1029 .RS 4n
1030 Description of the different events generated by the ZFS kernel modules. See \fBzfs-events\fR(5) for more information about the subclasses and event payloads that can be generated.
1031
1032 .sp
1033 .ne 2
1034 .na
1035 \fB\fB-v\fR\fR
1036 .ad
1037 .RS 6n
1038 Get a full detail of the events and what information is available about it.
1039 .RE
1040
1041 .sp
1042 .ne 2
1043 .na
1044 \fB\fB-H\fR\fR
1045 .ad
1046 .RS 6n
1047 Scripted mode. Do not display headers, and separate fields by a single tab instead of arbitrary space.
1048 .RE
1049
1050 .sp
1051 .ne 2
1052 .na
1053 \fB\fB-f\fR\fR
1054 .ad
1055 .RS 6n
1056 Follow mode.
1057 .RE
1058
1059 .sp
1060 .ne 2
1061 .na
1062 \fB\fB-c\fR\fR
1063 .ad
1064 .RS 6n
1065 Clear all previous events.
1066 .RE
1067
1068 .RE
1069
1070 .sp
1071 .ne 2
1072 .na
1073 \fB\fBzpool export\fR [\fB-a\fR] [\fB-f\fR] \fIpool\fR ...\fR
1074 .ad
1075 .sp .6
1076 .RS 4n
1077 Exports the given pools from the system. All devices are marked as exported, but are still considered in use by other subsystems. The devices can be moved between systems (even those of different endianness) and imported as long as a sufficient number of devices are present.
1078 .sp
1079 Before exporting the pool, all datasets within the pool are unmounted. A pool can not be exported if it has a shared spare that is currently being used.
1080 .sp
1081 For pools to be portable, you must give the \fBzpool\fR command whole disks, not just partitions, so that \fBZFS\fR can label the disks with portable \fBEFI\fR labels. Otherwise, disk drivers on platforms of different endianness will not recognize the disks.
1082 .sp
1083 .ne 2
1084 .na
1085 \fB\fB-a\fR\fR
1086 .ad
1087 .RS 6n
1088 Exports all pools imported on the system.
1089 .RE
1090
1091 .sp
1092 .ne 2
1093 .na
1094 \fB\fB-f\fR\fR
1095 .ad
1096 .RS 6n
1097 Forcefully unmount all datasets, using the "\fBunmount -f\fR" command.
1098 .sp
1099 This command will forcefully export the pool even if it has a shared spare that is currently being used. This may lead to potential data corruption.
1100 .RE
1101
1102 .RE
1103
1104 .sp
1105 .ne 2
1106 .na
1107 \fB\fBzpool get\fR [\fB-Hp\fR] [\fB-o \fR\fIfield\fR[,...]] "\fIall\fR" | \fIproperty\fR[,...]
1108 \fIpool\fR ...\fR
1109 .ad
1110 .sp .6
1111 .RS 4n
1112 Retrieves the given list of properties (or all properties if "\fBall\fR" is used) for the specified storage pool(s). These properties are displayed with the following fields:
1113 .sp
1114 .in +2
1115 .nf
1116 name Name of storage pool
1117 property Property name
1118 value Property value
1119 source Property source, either 'default' or 'local'.
1120 .fi
1121 .in -2
1122 .sp
1123
1124 See the "Properties" section for more information on the available pool properties.
1125
1126 .sp
1127 .ne 2
1128 .na
1129 \fB\fB-H\fR\fR
1130 .ad
1131 .RS 6n
1132 Scripted mode. Do not display headers, and separate fields by a single tab instead of arbitrary space.
1133 .RE
1134
1135 .sp
1136 .ne 2
1137 .na
1138 \fB\fB-p\fR\fR
1139 .ad
1140 .RS 6n
1141 Display numbers in parsable (exact) values.
1142 .RE
1143
1144 .sp
1145 .ne 2
1146 .na
1147 \fB\fB-o\fR \fIfield\fR\fR
1148 .ad
1149 .RS 12n
1150 A comma-separated list of columns to display. \fBname,property,value,source\fR
1151 is the default value.
1152 .RE
1153 .RE
1154
1155 .sp
1156 .ne 2
1157 .na
1158 \fB\fBzpool history\fR [\fB-il\fR] [\fIpool\fR] ...\fR
1159 .ad
1160 .sp .6
1161 .RS 4n
1162 Displays the command history of the specified pools or all pools if no pool is specified.
1163 .sp
1164 .ne 2
1165 .na
1166 \fB\fB-i\fR\fR
1167 .ad
1168 .RS 6n
1169 Displays internally logged \fBZFS\fR events in addition to user initiated events.
1170 .RE
1171
1172 .sp
1173 .ne 2
1174 .na
1175 \fB\fB-l\fR\fR
1176 .ad
1177 .RS 6n
1178 Displays log records in long format, which in addition to standard format includes, the user name, the hostname, and the zone in which the operation was performed.
1179 .RE
1180
1181 .RE
1182
1183 .sp
1184 .ne 2
1185 .na
1186 \fB\fBzpool import\fR [\fB-d\fR \fIdir\fR | \fB-c\fR \fIcachefile\fR] [\fB-D\fR]\fR
1187 .ad
1188 .sp .6
1189 .RS 4n
1190 Lists pools available to import. If the \fB-d\fR option is not specified, this command searches for devices in "/dev". The \fB-d\fR option can be specified multiple times, and all directories are searched. If the device appears to be part of an exported pool, this command displays a summary of the pool with the name of the pool, a numeric identifier, as well as the \fIvdev\fR layout and current health of the device for each device or file. Destroyed pools, pools that were previously destroyed with the "\fBzpool destroy\fR" command, are not listed unless the \fB-D\fR option is specified.
1191 .sp
1192 The numeric identifier is unique, and can be used instead of the pool name when multiple exported pools of the same name are available.
1193 .sp
1194 .ne 2
1195 .na
1196 \fB\fB-c\fR \fIcachefile\fR\fR
1197 .ad
1198 .RS 16n
1199 Reads configuration from the given \fBcachefile\fR that was created with the "\fBcachefile\fR" pool property. This \fBcachefile\fR is used instead of searching for devices.
1200 .RE
1201
1202 .sp
1203 .ne 2
1204 .na
1205 \fB\fB-d\fR \fIdir\fR\fR
1206 .ad
1207 .RS 16n
1208 Searches for devices or files in \fIdir\fR. The \fB-d\fR option can be specified multiple times.
1209 .RE
1210
1211 .sp
1212 .ne 2
1213 .na
1214 \fB\fB-D\fR\fR
1215 .ad
1216 .RS 16n
1217 Lists destroyed pools only.
1218 .RE
1219
1220 .RE
1221
1222 .sp
1223 .ne 2
1224 .na
1225 \fB\fBzpool import\fR [\fB-o\fR \fImntopts\fR] [ \fB-o\fR \fIproperty\fR=\fIvalue\fR] ... [\fB-d\fR \fIdir\fR | \fB-c\fR \fIcachefile\fR] [\fB-D\fR] [\fB-f\fR] [\fB-m\fR] [\fB-N\fR] [\fB-R\fR \fIroot\fR] [\fB-F\fR [\fB-n\fR]] [\fB-s\fR] \fB-a\fR\fR
1226 .ad
1227 .sp .6
1228 .RS 4n
1229 Imports all pools found in the search directories. Identical to the previous command, except that all pools with a sufficient number of devices available are imported. Destroyed pools, pools that were previously destroyed with the "\fBzpool destroy\fR" command, will not be imported unless the \fB-D\fR option is specified.
1230 .sp
1231 .ne 2
1232 .na
1233 \fB\fB-o\fR \fImntopts\fR\fR
1234 .ad
1235 .RS 21n
1236 Comma-separated list of mount options to use when mounting datasets within the pool. See \fBzfs\fR(8) for a description of dataset properties and mount options.
1237 .RE
1238
1239 .sp
1240 .ne 2
1241 .na
1242 \fB\fB-o\fR \fIproperty=value\fR\fR
1243 .ad
1244 .RS 21n
1245 Sets the specified property on the imported pool. See the "Properties" section for more information on the available pool properties.
1246 .RE
1247
1248 .sp
1249 .ne 2
1250 .na
1251 \fB\fB-c\fR \fIcachefile\fR\fR
1252 .ad
1253 .RS 21n
1254 Reads configuration from the given \fBcachefile\fR that was created with the "\fBcachefile\fR" pool property. This \fBcachefile\fR is used instead of searching for devices.
1255 .RE
1256
1257 .sp
1258 .ne 2
1259 .na
1260 \fB\fB-d\fR \fIdir\fR\fR
1261 .ad
1262 .RS 21n
1263 Searches for devices or files in \fIdir\fR. The \fB-d\fR option can be specified multiple times. This option is incompatible with the \fB-c\fR option.
1264 .RE
1265
1266 .sp
1267 .ne 2
1268 .na
1269 \fB\fB-D\fR\fR
1270 .ad
1271 .RS 21n
1272 Imports destroyed pools only. The \fB-f\fR option is also required.
1273 .RE
1274
1275 .sp
1276 .ne 2
1277 .na
1278 \fB\fB-f\fR\fR
1279 .ad
1280 .RS 21n
1281 Forces import, even if the pool appears to be potentially active.
1282 .RE
1283
1284 .sp
1285 .ne 2
1286 .na
1287 \fB\fB-F\fR\fR
1288 .ad
1289 .RS 21n
1290 Recovery mode for a non-importable pool. Attempt to return the pool to an importable state by discarding the last few transactions. Not all damaged pools can be recovered by using this option. If successful, the data from the discarded transactions is irretrievably lost. This option is ignored if the pool is importable or already imported.
1291 .RE
1292
1293 .sp
1294 .ne 2
1295 .na
1296 \fB\fB-a\fR\fR
1297 .ad
1298 .RS 21n
1299 Searches for and imports all pools found.
1300 .RE
1301
1302 .sp
1303 .ne 2
1304 .na
1305 \fB\fB-m\fR\fR
1306 .ad
1307 .RS 21n
1308 Allows a pool to import when there is a missing log device.
1309 .RE
1310
1311 .sp
1312 .ne 2
1313 .na
1314 \fB\fB-R\fR \fIroot\fR\fR
1315 .ad
1316 .RS 21n
1317 Sets the "\fBcachefile\fR" property to "\fBnone\fR" and the "\fIaltroot\fR" property to "\fIroot\fR".
1318 .RE
1319
1320 .sp
1321 .ne 2
1322 .na
1323 \fB\fB-N\fR\fR
1324 .ad
1325 .RS 21n
1326 Import the pool without mounting any file systems.
1327 .RE
1328
1329 .sp
1330 .ne 2
1331 .na
1332 \fB\fB-n\fR\fR
1333 .ad
1334 .RS 21n
1335 Used with the \fB-F\fR recovery option. Determines whether a non-importable pool can be made importable again, but does not actually perform the pool recovery. For more details about pool recovery mode, see the \fB-F\fR option, above.
1336 .RE
1337
1338 .sp
1339 .ne 2
1340 .na
1341 \fB\fB-X\fR\fR
1342 .ad
1343 .RS 21n
1344 Used with the \fB-F\fR recovery option. Determines whether extreme measures to find a valid txg should take place. This allows the pool to be rolled back to a txg which is no longer guaranteed to be consistent. Pools imported at an inconsistent txg may contain uncorrectable checksum errors. For more details about pool recovery mode, see the \fB-F\fR option, above.
1345 \fBWARNING\fR: This option can be extremely hazardous to the health of your pool and should only be used as a last resort.
1346 .RE
1347
1348 .sp
1349 .ne 2
1350 .na
1351 \fB\fB-T\fR\fR
1352 .ad
1353 .RS 21n
1354 Specify the txg to use for rollback. Implies \fB-FX\fR. For more details about pool recovery mode, see the \fB-X\fR option, above.
1355 \fBWARNING\fR: This option can be extremely hazardous to the health of your pool and should only be used as a last resort.
1356 .RE
1357
1358 .sp
1359 .ne 2
1360 .na
1361 \fB\fB-s\fR
1362 .ad
1363 .RS 21n
1364 Scan using the default search path, the libblkid cache will not be consulted. A custom search path may be specified by setting the \fBZPOOL_IMPORT_PATH\fR environment variable.
1365 .RE
1366
1367 .RE
1368
1369 .sp
1370 .ne 2
1371 .na
1372 \fB\fBzpool import\fR [\fB-o\fR \fImntopts\fR] [ \fB-o\fR \fIproperty\fR=\fIvalue\fR] ... [\fB-d\fR \fIdir\fR | \fB-c\fR \fIcachefile\fR] [\fB-D\fR] [\fB-f\fR] [\fB-m\fR] [\fB-R\fR \fIroot\fR] [\fB-F\fR [\fB-n\fR]] [\fB-t\fR]] [\fB-s\fR] \fIpool\fR | \fIid\fR [\fInewpool\fR]\fR
1373 .ad
1374 .sp .6
1375 .RS 4n
1376 Imports a specific pool. A pool can be identified by its name or the numeric identifier. If \fInewpool\fR is specified, the pool is imported using the name \fInewpool\fR. Otherwise, it is imported with the same name as its exported name.
1377 .sp
1378 If a device is removed from a system without running "\fBzpool export\fR" first, the device appears as potentially active. It cannot be determined if this was a failed export, or whether the device is really in use from another host. To import a pool in this state, the \fB-f\fR option is required.
1379 .sp
1380 .ne 2
1381 .na
1382 \fB\fB-o\fR \fImntopts\fR\fR
1383 .ad
1384 .sp .6
1385 .RS 4n
1386 Comma-separated list of mount options to use when mounting datasets within the pool. See \fBzfs\fR(8) for a description of dataset properties and mount options.
1387 .RE
1388
1389 .sp
1390 .ne 2
1391 .na
1392 \fB\fB-o\fR \fIproperty=value\fR\fR
1393 .ad
1394 .sp .6
1395 .RS 4n
1396 Sets the specified property on the imported pool. See the "Properties" section for more information on the available pool properties.
1397 .RE
1398
1399 .sp
1400 .ne 2
1401 .na
1402 \fB\fB-c\fR \fIcachefile\fR\fR
1403 .ad
1404 .sp .6
1405 .RS 4n
1406 Reads configuration from the given \fBcachefile\fR that was created with the "\fBcachefile\fR" pool property. This \fBcachefile\fR is used instead of searching for devices.
1407 .RE
1408
1409 .sp
1410 .ne 2
1411 .na
1412 \fB\fB-d\fR \fIdir\fR\fR
1413 .ad
1414 .sp .6
1415 .RS 4n
1416 Searches for devices or files in \fIdir\fR. The \fB-d\fR option can be specified multiple times. This option is incompatible with the \fB-c\fR option.
1417 .RE
1418
1419 .sp
1420 .ne 2
1421 .na
1422 \fB\fB-D\fR\fR
1423 .ad
1424 .sp .6
1425 .RS 4n
1426 Imports destroyed pool. The \fB-f\fR option is also required.
1427 .RE
1428
1429 .sp
1430 .ne 2
1431 .na
1432 \fB\fB-f\fR\fR
1433 .ad
1434 .sp .6
1435 .RS 4n
1436 Forces import, even if the pool appears to be potentially active.
1437 .RE
1438
1439 .sp
1440 .ne 2
1441 .na
1442 \fB\fB-F\fR\fR
1443 .ad
1444 .sp .6
1445 .RS 4n
1446 Recovery mode for a non-importable pool. Attempt to return the pool to an importable state by discarding the last few transactions. Not all damaged pools can be recovered by using this option. If successful, the data from the discarded transactions is irretrievably lost. This option is ignored if the pool is importable or already imported.
1447 .RE
1448
1449 .sp
1450 .ne 2
1451 .na
1452 \fB\fB-R\fR \fIroot\fR\fR
1453 .ad
1454 .sp .6
1455 .RS 4n
1456 Sets the "\fBcachefile\fR" property to "\fBnone\fR" and the "\fIaltroot\fR" property to "\fIroot\fR".
1457 .RE
1458
1459 .sp
1460 .ne 2
1461 .na
1462 \fB\fB-n\fR\fR
1463 .ad
1464 .sp .6
1465 .RS 4n
1466 Used with the \fB-F\fR recovery option. Determines whether a non-importable pool can be made importable again, but does not actually perform the pool recovery. For more details about pool recovery mode, see the \fB-F\fR option, above.
1467 .RE
1468
1469 .sp
1470 .ne 2
1471 .na
1472 \fB\fB-X\fR\fR
1473 .ad
1474 .sp .6
1475 .RS 4n
1476 Used with the \fB-F\fR recovery option. Determines whether extreme measures to find a valid txg should take place. This allows the pool to be rolled back to a txg which is no longer guaranteed to be consistent. Pools imported at an inconsistent txg may contain uncorrectable checksum errors. For more details about pool recovery mode, see the \fB-F\fR option, above.
1477 \fBWARNING\fR: This option can be extremely hazardous to the health of your pool and should only be used as a last resort.
1478 .RE
1479
1480 .sp
1481 .ne 2
1482 .na
1483 \fB\fB-T\fR\fR
1484 .ad
1485 .sp .6
1486 .RS 4n
1487 Specify the txg to use for rollback. Implies \fB-FX\fR. For more details about pool recovery mode, see the \fB-X\fR option, above.
1488 \fBWARNING\fR: This option can be extremely hazardous to the health of your pool and should only be used as a last resort.
1489 .RE
1490
1491 .sp
1492 .ne 2
1493 .na
1494 \fB\fB-t\fR\fR
1495 .ad
1496 .sp .6
1497 .RS 4n
1498 Used with "\fBnewpool\fR". Specifies that "\fBnewpool\fR" is temporary. Temporary pool names last until export. Ensures that the original pool name will be used in all label updates and therefore is retained upon export. Will also set -o cachefile=none when not explicitly specified.
1499 .RE
1500
1501 .sp
1502 .ne 2
1503 .na
1504 \fB\fB-m\fR\fR
1505 .ad
1506 .sp .6
1507 .RS 4n
1508 Allows a pool to import when there is a missing log device.
1509 .RE
1510
1511 .sp
1512 .ne 2
1513 .na
1514 \fB\fB-s\fR
1515 .ad
1516 .sp .6
1517 .RS 4n
1518 Scan using the default search path, the libblkid cache will not be consulted. A custom search path may be specified by setting the \fBZPOOL_IMPORT_PATH\fR environment variable.
1519 .RE
1520
1521 .RE
1522
1523 .sp
1524 .ne 2
1525 .na
1526 \fB\fBzpool iostat\fR [\fB-c\fR \fBCMD\fR] [\fB-T\fR \fBd\fR | \fBu\fR] [\fB-ghHLpPvy\fR] [[\fB-lq\fR]|[\fB-r\fR|\fB-w\fR]] [[\fIpool\fR ...]|[\fIpool vdev\fR ...]|[\fIvdev\fR ...]] [\fIinterval\fR[\fIcount\fR]]\fR
1527
1528 .ad
1529 .sp .6
1530 .RS 4n
1531 Displays \fBI/O\fR statistics for the given \fIpool\fRs/\fIvdev\fRs. You can
1532 pass in a list of \fIpool\fRs, a \fIpool\fR and list of \fIvdev\fRs in that
1533 \fIpool\fR, or a list of any \fIvdev\fRs from any \fIpool\fR. If no items are
1534 specified, statistics for every pool in the system are shown. When given an
1535 interval, the statistics are printed every \fIinterval\fR seconds until
1536 \fBCtrl-C\fR is pressed. If \fIcount\fR is specified, the command exits after
1537 \fIcount\fR reports are printed. The first report printed is always the
1538 statistics since boot regardless of whether \fIinterval\fR and \fIcount\fR
1539 are passed. However, this behavior can be suppressed with the -y flag. Also
1540 note that the units of 'K', 'M', 'G'... that are printed in the report are in
1541 base 1024. To get the raw values, use the \fB-p\fR flag.
1542 .sp
1543 .ne 2
1544 .na
1545 \fB\fB-c\fR \fBCMD\fR
1546 .ad
1547 .RS 12n
1548 Run a command on each vdev and include first line of output
1549 .sp
1550 The \fB-c\fR option allows you to run an arbitrary command on each vdev and
1551 display the first line of output in zpool iostat. The following environment
1552 vars are set before running each command:
1553 .sp
1554 \fB$VDEV_PATH\fR: Full path to the vdev.
1555 .LP
1556 \fB$VDEV_UPATH\fR: "Underlying path" to the vdev. For device mapper, multipath, or
1557 partitioned vdevs, \fBVDEV_UPATH\fR is the actual underlying /dev/sd* disk.
1558 This can be useful if the command you're running requires a /dev/sd* device.
1559 .LP
1560 \fB$VDEV_ENC_SYSFS_PATH\fR: The sysfs path to the vdev's enclosure LEDs (if any).
1561 .RE
1562
1563 .sp
1564 .ne 2
1565 .na
1566 \fB\fB-T\fR \fBu\fR | \fBd\fR\fR
1567 .ad
1568 .RS 12n
1569 Display a time stamp.
1570 .sp
1571 Specify \fBu\fR for a printed representation of the internal representation of time. See \fBtime\fR(2). Specify \fBd\fR for standard date format. See \fBdate\fR(1).
1572 .RE
1573
1574 .sp
1575 .ne 2
1576 .na
1577 \fB\fB-g\fR\fR
1578 .ad
1579 .RS 12n
1580 Display vdev GUIDs instead of the normal device names. These GUIDs can be used in place of device names for the zpool detach/offline/remove/replace commands.
1581 .RE
1582
1583 .sp
1584 .ne 2
1585 .na
1586 \fB\fB-H\fR\fR
1587 .ad
1588 .RS 12n
1589 Scripted mode. Do not display headers, and separate fields by a single tab instead of arbitrary space.
1590 .RE
1591
1592 .sp
1593 .ne 2
1594 .na
1595 \fB\fB-L\fR\fR
1596 .ad
1597 .RS 12n
1598 Display real paths for vdevs resolving all symbolic links. This can be used to look up the current block device name regardless of the /dev/disk/ path used to open it.
1599 .RE
1600
1601 .sp
1602 .ne 2
1603 .na
1604 \fB\fB-p\fR\fR
1605 .ad
1606 .RS 12n
1607 Display numbers in parsable (exact) values. Time values are in nanoseconds.
1608 .RE
1609
1610 .sp
1611 .ne 2
1612 .na
1613 \fB\fB-P\fR\fR
1614 .ad
1615 .RS 12n
1616 Display full paths for vdevs instead of only the last component of the path. This can be used in conjunction with the \fB-L\fR flag.
1617 .RE
1618
1619 .sp
1620 .ne 2
1621 .na
1622 \fB\fB-r\fR\fR
1623 .ad
1624 .RS 12n
1625 Print request size histograms for the leaf ZIOs. This includes histograms of
1626 individual ZIOs ("ind") and aggregate ZIOs ("agg"). These stats can be useful
1627 for seeing how well the ZFS IO aggregator is working. Do not confuse these
1628 request size stats with the block layer requests; it's possible ZIOs can
1629 be broken up before being sent to the block device.
1630 .RE
1631
1632 .sp
1633 .ne 2
1634 .na
1635 \fB\fB-v\fR\fR
1636 .ad
1637 .RS 12n
1638 Verbose statistics. Reports usage statistics for individual \fIvdevs\fR within the pool, in addition to the pool-wide statistics.
1639 .RE
1640
1641 .sp
1642 .ne 2
1643 .na
1644 \fB\fB-y\fR\fR
1645 .ad
1646 .RS 12n
1647 Omit statistics since boot. Normally the first line of output reports the statistics since boot. This option suppresses that first line of output.
1648 .RE
1649 .sp
1650 .ne 2
1651 .na
1652 \fB\fB-w\fR\fR
1653 .ad
1654 .RS 12n
1655 Display latency histograms:
1656
1657 .sp
1658 .ne 2
1659 .na
1660 total_wait:
1661 .ad
1662 .RS 20n
1663 Total IO time (queuing + disk IO time).
1664 .RE
1665 .ne 2
1666 .na
1667 disk_wait:
1668 .ad
1669 .RS 20n
1670 Disk IO time (time reading/writing the disk).
1671 .RE
1672 .ne 2
1673 .na
1674 syncq_wait:
1675 .ad
1676 .RS 20n
1677 Amount of time IO spent in synchronous priority queues. Does not include
1678 disk time.
1679 .RE
1680 .ne 2
1681 .na
1682 asyncq_wait:
1683 .ad
1684 .RS 20n
1685 Amount of time IO spent in asynchronous priority queues. Does not include
1686 disk time.
1687 .RE
1688 .ne 2
1689 .na
1690 scrub:
1691 .ad
1692 .RS 20n
1693 Amount of time IO spent in scrub queue. Does not include disk time.
1694
1695
1696 .RE
1697
1698 All histogram buckets are power-of-two sized. The time labels are the end
1699 ranges of the buckets, so for example, a 15ns bucket stores latencies from
1700 8-15ns. The last bucket is also a catch-all for latencies higher than the
1701 maximum.
1702 .RE
1703 .sp
1704 .ne 2
1705 .na
1706 \fB\fB-l\fR\fR
1707 .ad
1708 .RS 12n
1709 Include average latency statistics:
1710
1711 .sp
1712 .ne 2
1713 .na
1714 total_wait:
1715 .ad
1716 .RS 20n
1717 Average total IO time (queuing + disk IO time).
1718 .RE
1719 .ne 2
1720 .na
1721 disk_wait:
1722 .ad
1723 .RS 20n
1724 Average disk IO time (time reading/writing the disk).
1725 .RE
1726 .ne 2
1727 .na
1728 syncq_wait:
1729 .ad
1730 .RS 20n
1731 Average amount of time IO spent in synchronous priority queues. Does not
1732 include disk time.
1733 .RE
1734 .ne 2
1735 .na
1736 asyncq_wait:
1737 .ad
1738 .RS 20n
1739 Average amount of time IO spent in asynchronous priority queues. Does not
1740 include disk time.
1741 .RE
1742 .ne 2
1743 .na
1744 scrub:
1745 .ad
1746 .RS 20n
1747 Average queuing time in scrub queue. Does not include disk time.
1748 .RE
1749
1750 .RE
1751 .sp
1752 .ne 2
1753 .na
1754 \fB\fB-q\fR\fR
1755 .ad
1756 .RS 12n
1757 Include active queue statistics. Each priority queue has both pending ("pend")
1758 and active ("activ") IOs. Pending IOs are waiting to be issued to the disk, and
1759 active IOs have been issued to disk and are waiting for completion. These stats
1760 are broken out by priority queue:
1761 .sp
1762 .ne 2
1763 .na
1764 syncq_read/write:
1765 .ad
1766 .RS 20n
1767 Current number of entries in synchronous priority queues.
1768 .RE
1769 .ne 2
1770 .na
1771 asyncq_read/write:
1772 .ad
1773 .RS 20n
1774 Current number of entries in asynchronous priority queues.
1775 .RE
1776 .ne 2
1777 .na
1778 scrubq_read:
1779 .ad
1780 .RS 20n
1781 Current number of entries in scrub queue.
1782 .RE
1783
1784 All queue statistics are instantaneous measurements of the number of entries
1785 in the queues. If you specify an interval, the measurements will be sampled
1786 from the end of the interval.
1787 .RE
1788
1789 .RE
1790
1791 .sp
1792 .ne 2
1793 .na
1794 \fB\fBzpool labelclear\fR [\fB-f\fR] \fIdevice\fR
1795 .ad
1796 .sp .6
1797 .RS 4n
1798 Removes ZFS label information from the specified device. The device must not be part of an active pool configuration.
1799 .sp
1800 .ne 2
1801 .na
1802 \fB\fB-f\fR\fR
1803 .ad
1804 .RS 12n
1805 Treat exported or foreign devices as inactive.
1806 .RE
1807
1808 .RE
1809
1810 .sp
1811 .ne 2
1812 .na
1813 \fB\fBzpool list\fR [\fB-T\fR \fBd\fR | \fBu\fR] [\fB-HgLpPv\fR] [\fB-o\fR \fIprops\fR[,...]] [\fIpool\fR] ... [\fIinterval\fR[\fIcount\fR]]\fR
1814 .ad
1815 .sp .6
1816 .RS 4n
1817 Lists the given pools along with a health status and space usage. If no \fIpools\fR are specified, all pools in the system are listed. When given an \fIinterval\fR, the information is printed every \fIinterval\fR seconds until \fBCtrl-C\fR is pressed. If \fIcount\fR is specified, the command exits after \fIcount\fR reports are printed.
1818 .sp
1819 .ne 2
1820 .na
1821 \fB\fB-H\fR\fR
1822 .ad
1823 .RS 12n
1824 Scripted mode. Do not display headers, and separate fields by a single tab instead of arbitrary space.
1825 .RE
1826
1827 .sp
1828 .ne 2
1829 .na
1830 \fB\fB-g\fR\fR
1831 .ad
1832 .RS 12n
1833 Display vdev GUIDs instead of the normal device names. These GUIDs can be used in place of device names for the zpool detach/offline/remove/replace commands.
1834 .RE
1835
1836 .sp
1837 .ne 2
1838 .na
1839 \fB\fB-L\fR\fR
1840 .ad
1841 .RS 12n
1842 Display real paths for vdevs resolving all symbolic links. This can be used to look up the current block device name regardless of the /dev/disk/ path used to open it.
1843 .RE
1844
1845 .sp
1846 .ne 2
1847 .na
1848 \fB\fB-p\fR\fR
1849 .ad
1850 .RS 12n
1851 Display numbers in parsable (exact) values.
1852 .RE
1853
1854 .sp
1855 .ne 2
1856 .na
1857 \fB\fB-P\fR\fR
1858 .ad
1859 .RS 12n
1860 Display full paths for vdevs instead of only the last component of the path. This can be used in conjunction with the \fB-L\fR flag.
1861 .RE
1862
1863 .sp
1864 .ne 2
1865 .na
1866 \fB\fB-T\fR \fBd\fR | \fBu\fR\fR
1867 .ad
1868 .RS 12n
1869 Display a time stamp.
1870 .sp
1871 Specify \fBu\fR for a printed representation of the internal representation of time. See \fBtime\fR(2). Specify \fBd\fR for standard date format. See \fBdate\fR(1).
1872 .RE
1873
1874 .sp
1875 .ne 2
1876 .na
1877 \fB\fB-o\fR \fIprops\fR\fR
1878 .ad
1879 .RS 12n
1880 Comma-separated list of properties to display. See the "Properties" section for a list of valid properties. The default list is "name, size, used, available, fragmentation, expandsize, capacity, dedupratio, health, altroot"
1881 .RE
1882
1883 .sp
1884 .ne 2
1885 .na
1886 \fB\fB-v\fR\fR
1887 .ad
1888 .RS 12n
1889 Verbose statistics. Reports usage statistics for individual \fIvdevs\fR within the pool, in addition to the pool-wise statistics.
1890 .RE
1891
1892 .RE
1893
1894 .sp
1895 .ne 2
1896 .na
1897 \fB\fBzpool offline\fR [\fB-t\fR] \fIpool\fR \fIdevice\fR ...\fR
1898 .ad
1899 .sp .6
1900 .RS 4n
1901 Takes the specified physical device offline. While the \fIdevice\fR is offline, no attempt is made to read or write to the device.
1902 .sp
1903 This command is not applicable to spares or cache devices.
1904 .sp
1905 .ne 2
1906 .na
1907 \fB\fB-t\fR\fR
1908 .ad
1909 .RS 6n
1910 Temporary. Upon reboot, the specified physical device reverts to its previous state.
1911 .RE
1912
1913 .RE
1914
1915 .sp
1916 .ne 2
1917 .na
1918 \fB\fBzpool online\fR [\fB-e\fR] \fIpool\fR \fIdevice\fR...\fR
1919 .ad
1920 .sp .6
1921 .RS 4n
1922 Brings the specified physical device online.
1923 .sp
1924 This command is not applicable to spares or cache devices.
1925 .sp
1926 .ne 2
1927 .na
1928 \fB\fB-e\fR\fR
1929 .ad
1930 .RS 6n
1931 Expand the device to use all available space. If the device is part of a mirror or \fBraidz\fR then all devices must be expanded before the new space will become available to the pool.
1932 .RE
1933
1934 .RE
1935
1936 .sp
1937 .ne 2
1938 .na
1939 \fB\fBzpool reguid\fR \fIpool\fR
1940 .ad
1941 .sp .6
1942 .RS 4n
1943 Generates a new unique identifier for the pool. You must ensure that all
1944 devices in this pool are online and healthy before performing this action.
1945 .RE
1946
1947 .sp
1948 .ne 2
1949 .na
1950 \fB\fBzpool reopen\fR \fIpool\fR
1951 .ad
1952 .sp .6
1953 .RS 4n
1954 Reopen all the vdevs associated with the pool.
1955 .RE
1956
1957 .sp
1958 .ne 2
1959 .na
1960 \fB\fBzpool remove\fR \fIpool\fR \fIdevice\fR ...\fR
1961 .ad
1962 .sp .6
1963 .RS 4n
1964 Removes the specified device from the pool. This command currently only supports removing hot spares, cache, and log devices. A mirrored log device can be removed by specifying the top-level mirror for the log. Non-log devices that are part of a mirrored configuration can be removed using the \fBzpool detach\fR command. Non-redundant and \fBraidz\fR devices cannot be removed from a pool.
1965 .RE
1966
1967 .sp
1968 .ne 2
1969 .na
1970 \fB\fBzpool replace\fR [\fB-f\fR] [\fB-o\fR \fIproperty=value\fR] \fIpool\fR \fIold_device\fR [\fInew_device\fR]\fR
1971 .ad
1972 .sp .6
1973 .RS 4n
1974 Replaces \fIold_device\fR with \fInew_device\fR. This is equivalent to attaching \fInew_device\fR, waiting for it to resilver, and then detaching \fIold_device\fR.
1975 .sp
1976 The size of \fInew_device\fR must be greater than or equal to the minimum size of all the devices in a mirror or \fBraidz\fR configuration.
1977 .sp
1978 \fInew_device\fR is required if the pool is not redundant. If \fInew_device\fR is not specified, it defaults to \fIold_device\fR. This form of replacement is useful after an existing disk has failed and has been physically replaced. In this case, the new disk may have the same \fB/dev\fR path as the old device, even though it is actually a different disk. \fBZFS\fR recognizes this.
1979 .sp
1980 .ne 2
1981 .na
1982 \fB\fB-f\fR\fR
1983 .ad
1984 .RS 6n
1985 Forces use of \fInew_device\fR, even if its appears to be in use. Not all devices can be overridden in this manner.
1986 .RE
1987
1988 .sp
1989 .ne 2
1990 .na
1991 \fB\fB-o\fR \fIproperty=value\fR
1992 .ad
1993 .sp .6n
1994 .RS 6n
1995 Sets the given pool properties. See the "Properties" section for a list of valid properties that can be set. The only property supported at the moment is \fBashift\fR. \fBDo note\fR that some properties (among them \fBashift\fR) are \fInot\fR inherited from a previous vdev. They are vdev specific, not pool specific.
1996 .RE
1997
1998 .RE
1999
2000 .sp
2001 .ne 2
2002 .na
2003 \fB\fBzpool scrub\fR [\fB-s\fR] \fIpool\fR ...\fR
2004 .ad
2005 .sp .6
2006 .RS 4n
2007 Begins a scrub. The scrub examines all data in the specified pools to verify that it checksums correctly. For replicated (mirror or \fBraidz\fR) devices, \fBZFS\fR automatically repairs any damage discovered during the scrub. The "\fBzpool status\fR" command reports the progress of the scrub and summarizes the results of the scrub upon completion.
2008 .sp
2009 Scrubbing and resilvering are very similar operations. The difference is that resilvering only examines data that \fBZFS\fR knows to be out of date (for example, when attaching a new device to a mirror or replacing an existing device), whereas scrubbing examines all data to discover silent errors due to hardware faults or disk failure.
2010 .sp
2011 Because scrubbing and resilvering are \fBI/O\fR-intensive operations, \fBZFS\fR only allows one at a time. If a scrub is already in progress, the "\fBzpool scrub\fR" command terminates it and starts a new scrub. If a resilver is in progress, \fBZFS\fR does not allow a scrub to be started until the resilver completes.
2012 .sp
2013 .ne 2
2014 .na
2015 \fB\fB-s\fR\fR
2016 .ad
2017 .RS 6n
2018 Stop scrubbing.
2019 .RE
2020
2021 .RE
2022
2023 .sp
2024 .ne 2
2025 .na
2026 \fB\fBzpool set\fR \fIproperty\fR=\fIvalue\fR \fIpool\fR\fR
2027 .ad
2028 .sp .6
2029 .RS 4n
2030 Sets the given property on the specified pool. See the "Properties" section for more information on what properties can be set and acceptable values.
2031 .RE
2032
2033 .sp
2034 .ne 2
2035 .na
2036 \fBzpool split\fR [\fB-gLnP\fR] [\fB-R\fR \fIaltroot\fR] [\fB-o\fR \fIproperty=value\fR] \fIpool\fR \fInewpool\fR [\fIdevice\fR ...]
2037 .ad
2038 .sp .6
2039 .RS 4n
2040 Split devices off \fIpool\fR creating \fInewpool\fR. All \fBvdev\fRs in \fIpool\fR must be mirrors and the pool must not be in the process of resilvering. At the time of the split, \fInewpool\fR will be a replica of \fIpool\fR. By default, the last device in each mirror is split from \fIpool\fR to create \fInewpool\fR.
2041
2042 The optional \fIdevice\fR specification causes the specified device(s) to be included in the new pool and, should any devices remain unspecified, the last device in each mirror is used as would be by default.
2043
2044 .sp
2045 .ne 2
2046 .na
2047 \fB\fB-g\fR\fR
2048 .ad
2049 .RS 6n
2050 Display vdev GUIDs instead of the normal device names. These GUIDs can be used in place of device names for the zpool detach/offline/remove/replace commands.
2051 .RE
2052
2053 .sp
2054 .ne 2
2055 .na
2056 \fB\fB-L\fR\fR
2057 .ad
2058 .RS 6n
2059 Display real paths for vdevs resolving all symbolic links. This can be used to look up the current block device name regardless of the /dev/disk/ path used to open it.
2060 .RE
2061
2062 .sp
2063 .ne 2
2064 .na
2065 \fB\fB-n\fR \fR
2066 .ad
2067 .sp .6
2068 .RS 4n
2069 Do dry run, do not actually perform the split. Print out the expected configuration of \fInewpool\fR.
2070 .RE
2071
2072 .sp
2073 .ne 2
2074 .na
2075 \fB\fB-P\fR\fR
2076 .ad
2077 .RS 6n
2078 Display full paths for vdevs instead of only the last component of the path. This can be used in conjunction with the \fB-L\fR flag.
2079 .RE
2080
2081 .sp
2082 .ne 2
2083 .na
2084 \fB\fB-R\fR \fIaltroot\fR \fR
2085 .ad
2086 .sp .6
2087 .RS 4n
2088 Set \fIaltroot\fR for \fInewpool\fR and automatically import it. This can be useful to avoid mountpoint collisions if \fInewpool\fR is imported on the same filesystem as \fIpool\fR.
2089 .RE
2090
2091 .sp
2092 .ne 2
2093 .na
2094 \fB\fB-o\fR \fIproperty=value\fR \fR
2095 .ad
2096 .sp .6
2097 .RS 4n
2098 Sets the specified property for \fInewpool\fR. See the “Properties” section for more information on the available pool properties.
2099 .RE
2100
2101 .RE
2102
2103 .sp
2104 .ne 2
2105 .na
2106 \fBzpool status\fR [\fB-c\fR \fBCMD\fR] [\fB-gLPvxD\fR] [\fB-T\fR d | u] [\fIpool\fR] ... [\fIinterval\fR [\fIcount\fR]]
2107 .ad
2108 .sp .6
2109 .RS 4n
2110 Displays the detailed health status for the given pools. If no \fIpool\fR is specified, then the status of each pool in the system is displayed. For more information on pool and device health, see the "Device Failure and Recovery" section.
2111 .sp
2112 If a scrub or resilver is in progress, this command reports the percentage done and the estimated time to completion. Both of these are only approximate, because the amount of data in the pool and the other workloads on the system can change.
2113
2114 .sp
2115 .ne 2
2116 .na
2117 \fB\fB-c\fR \fBCMD\fR
2118 .ad
2119 .RS 12n
2120 Run a command on each vdev and include first line of output
2121 .sp
2122 The \fB-c\fR option allows you to run an arbitrary command on each vdev and
2123 display the first line of output in zpool iostat. The following environment
2124 vars are set before running each command:
2125 .sp
2126 \fB$VDEV_PATH\fR: Full path to the vdev.
2127 .LP
2128 \fB$VDEV_UPATH\fR: "Underlying path" to the vdev. For device mapper, multipath, or
2129 partitioned vdevs, \fBVDEV_UPATH\fR is the actual underlying /dev/sd* disk.
2130 This can be useful if the command you're running requires a /dev/sd* device.
2131 .LP
2132 \fB$VDEV_ENC_SYSFS_PATH\fR: The sysfs path to the vdev's enclosure LEDs (if any).
2133 .RE
2134
2135 .sp
2136 .ne 2
2137 .na
2138 \fB\fB-g\fR\fR
2139 .ad
2140 .RS 12n
2141 Display vdev GUIDs instead of the normal device names. These GUIDs can be used innplace of device names for the zpool detach/offline/remove/replace commands.
2142 .RE
2143
2144 .sp
2145 .ne 2
2146 .na
2147 \fB\fB-L\fR\fR
2148 .ad
2149 .RS 12n
2150 Display real paths for vdevs resolving all symbolic links. This can be used to look up the current block device name regardless of the /dev/disk/ path used to open it.
2151 .RE
2152
2153 .sp
2154 .ne 2
2155 .na
2156 \fB\fB-P\fR\fR
2157 .ad
2158 .RS 12n
2159 Display full paths for vdevs instead of only the last component of the path. This can be used in conjunction with the \fB-L\fR flag.
2160 .RE
2161
2162 .sp
2163 .ne 2
2164 .na
2165 \fB\fB-v\fR\fR
2166 .ad
2167 .RS 12n
2168 Displays verbose data error information, printing out a complete list of all data errors since the last complete pool scrub.
2169 .RE
2170
2171 .sp
2172 .ne 2
2173 .na
2174 \fB\fB-x\fR\fR
2175 .ad
2176 .RS 12n
2177 Only display status for pools that are exhibiting errors or are otherwise unavailable. Warnings about pools not using the latest on-disk format will not be included.
2178 .RE
2179
2180 .sp
2181 .ne 2
2182 .na
2183 \fB\fB-D\fR\fR
2184 .ad
2185 .RS 12n
2186 Display a histogram of deduplication statistics, showing the allocated (physically present on disk) and
2187 referenced (logically referenced in the pool) block counts and sizes by reference count.
2188 .RE
2189
2190 .sp
2191 .ne 2
2192 .na
2193 \fB\fB-T\fR \fBd\fR | \fBu\fR\fR
2194 .ad
2195 .RS 12n
2196 Display a time stamp.
2197 .sp
2198 Specify \fBu\fR for a printed representation of the internal representation of time. See \fBtime\fR(2). Specify \fBd\fR for standard date format. See \fBdate\fR(1).
2199 .RE
2200
2201 .RE
2202
2203 .sp
2204 .ne 2
2205 .na
2206 \fB\fBzpool upgrade\fR\fR
2207 .ad
2208 .sp .6
2209 .RS 4n
2210 Displays pools which do not have all supported features enabled and pools formatted using a legacy ZFS version number. These pools can continue to be used, but some features may not be available. Use "\fBzpool upgrade -a\fR" to enable all features on all pools.
2211 .RE
2212
2213 .sp
2214 .ne 2
2215 .na
2216 \fB\fBzpool upgrade\fR \fB-v\fR\fR
2217 .ad
2218 .sp .6
2219 .RS 4n
2220 Displays legacy \fBZFS\fR versions supported by the current software. See \fBzfs-features\fR(5) for a description of feature flags features supported by the current software.
2221 .RE
2222
2223 .sp
2224 .ne 2
2225 .na
2226 \fB\fBzpool upgrade\fR [\fB-V\fR \fIversion\fR] \fB-a\fR | \fIpool\fR ...\fR
2227 .ad
2228 .sp .6
2229 .RS 4n
2230 Enables all supported features on the given pool. Once this is done, the pool will no longer be accessible on systems that do not support feature flags. See \fBzfs-features\fR(5) for details on compatibility with systems that support feature flags, but do not support all features enabled on the pool.
2231 .sp
2232 .ne 2
2233 .na
2234 \fB\fB-a\fR\fR
2235 .ad
2236 .RS 14n
2237 Enables all supported features on all pools.
2238 .RE
2239
2240 .sp
2241 .ne 2
2242 .na
2243 \fB\fB-V\fR \fIversion\fR\fR
2244 .ad
2245 .RS 14n
2246 Upgrade to the specified legacy version. If the \fB-V\fR flag is specified, no features will be enabled on the pool. This option can only be used to increase the version number up to the last supported legacy version number.
2247 .RE
2248
2249 .RE
2250
2251 .SH EXAMPLES
2252 .LP
2253 \fBExample 1 \fRCreating a RAID-Z Storage Pool
2254 .sp
2255 .LP
2256 The following command creates a pool with a single \fBraidz\fR root \fIvdev\fR that consists of six disks.
2257
2258 .sp
2259 .in +2
2260 .nf
2261 # \fBzpool create tank raidz sda sdb sdc sdd sde sdf\fR
2262 .fi
2263 .in -2
2264 .sp
2265
2266 .LP
2267 \fBExample 2 \fRCreating a Mirrored Storage Pool
2268 .sp
2269 .LP
2270 The following command creates a pool with two mirrors, where each mirror contains two disks.
2271
2272 .sp
2273 .in +2
2274 .nf
2275 # \fBzpool create tank mirror sda sdb mirror sdc sdd\fR
2276 .fi
2277 .in -2
2278 .sp
2279
2280 .LP
2281 \fBExample 3 \fRCreating a ZFS Storage Pool by Using Partitions
2282 .sp
2283 .LP
2284 The following command creates an unmirrored pool using two disk partitions.
2285
2286 .sp
2287 .in +2
2288 .nf
2289 # \fBzpool create tank sda1 sdb2\fR
2290 .fi
2291 .in -2
2292 .sp
2293
2294 .LP
2295 \fBExample 4 \fRCreating a ZFS Storage Pool by Using Files
2296 .sp
2297 .LP
2298 The following command creates an unmirrored pool using files. While not recommended, a pool based on files can be useful for experimental purposes.
2299
2300 .sp
2301 .in +2
2302 .nf
2303 # \fBzpool create tank /path/to/file/a /path/to/file/b\fR
2304 .fi
2305 .in -2
2306 .sp
2307
2308 .LP
2309 \fBExample 5 \fRAdding a Mirror to a ZFS Storage Pool
2310 .sp
2311 .LP
2312 The following command adds two mirrored disks to the pool \fItank\fR, assuming the pool is already made up of two-way mirrors. The additional space is immediately available to any datasets within the pool.
2313
2314 .sp
2315 .in +2
2316 .nf
2317 # \fBzpool add tank mirror sda sdb\fR
2318 .fi
2319 .in -2
2320 .sp
2321
2322 .LP
2323 \fBExample 6 \fRListing Available ZFS Storage Pools
2324 .sp
2325 .LP
2326 The following command lists all available pools on the system. In this case, the pool \fIzion\fR is faulted due to a missing device.
2327
2328 .sp
2329 .LP
2330 The results from this command are similar to the following:
2331
2332 .sp
2333 .in +2
2334 .nf
2335 # \fBzpool list\fR
2336 NAME SIZE ALLOC FREE FRAG EXPANDSZ CAP DEDUP HEALTH ALTROOT
2337 rpool 19.9G 8.43G 11.4G 33% - 42% 1.00x ONLINE -
2338 tank 61.5G 20.0G 41.5G 48% - 32% 1.00x ONLINE -
2339 zion - - - - - - - FAULTED -
2340 .fi
2341 .in -2
2342 .sp
2343
2344 .LP
2345 \fBExample 7 \fRDestroying a ZFS Storage Pool
2346 .sp
2347 .LP
2348 The following command destroys the pool \fItank\fR and any datasets contained within.
2349
2350 .sp
2351 .in +2
2352 .nf
2353 # \fBzpool destroy -f tank\fR
2354 .fi
2355 .in -2
2356 .sp
2357
2358 .LP
2359 \fBExample 8 \fRExporting a ZFS Storage Pool
2360 .sp
2361 .LP
2362 The following command exports the devices in pool \fItank\fR so that they can be relocated or later imported.
2363
2364 .sp
2365 .in +2
2366 .nf
2367 # \fBzpool export tank\fR
2368 .fi
2369 .in -2
2370 .sp
2371
2372 .LP
2373 \fBExample 9 \fRImporting a ZFS Storage Pool
2374 .sp
2375 .LP
2376 The following command displays available pools, and then imports the pool \fItank\fR for use on the system.
2377
2378 .sp
2379 .LP
2380 The results from this command are similar to the following:
2381
2382 .sp
2383 .in +2
2384 .nf
2385 # \fBzpool import\fR
2386 pool: tank
2387 id: 15451357997522795478
2388 state: ONLINE
2389 action: The pool can be imported using its name or numeric identifier.
2390 config:
2391
2392 tank ONLINE
2393 mirror ONLINE
2394 sda ONLINE
2395 sdb ONLINE
2396
2397 # \fBzpool import tank\fR
2398 .fi
2399 .in -2
2400 .sp
2401
2402 .LP
2403 \fBExample 10 \fRUpgrading All ZFS Storage Pools to the Current Version
2404 .sp
2405 .LP
2406 The following command upgrades all ZFS Storage pools to the current version of the software.
2407
2408 .sp
2409 .in +2
2410 .nf
2411 # \fBzpool upgrade -a\fR
2412 This system is currently running ZFS pool version 28.
2413 .fi
2414 .in -2
2415 .sp
2416
2417 .LP
2418 \fBExample 11 \fRManaging Hot Spares
2419 .sp
2420 .LP
2421 The following command creates a new pool with an available hot spare:
2422
2423 .sp
2424 .in +2
2425 .nf
2426 # \fBzpool create tank mirror sda sdb spare sdc\fR
2427 .fi
2428 .in -2
2429 .sp
2430
2431 .sp
2432 .LP
2433 If one of the disks were to fail, the pool would be reduced to the degraded state. The failed device can be replaced using the following command:
2434
2435 .sp
2436 .in +2
2437 .nf
2438 # \fBzpool replace tank sda sdd\fR
2439 .fi
2440 .in -2
2441 .sp
2442
2443 .sp
2444 .LP
2445 Once the data has been resilvered, the spare is automatically removed and is made available for use should another device fails. The hot spare can be permanently removed from the pool using the following command:
2446
2447 .sp
2448 .in +2
2449 .nf
2450 # \fBzpool remove tank sdc\fR
2451 .fi
2452 .in -2
2453 .sp
2454
2455 .LP
2456 \fBExample 12 \fRCreating a ZFS Pool with Mirrored Separate Intent Logs
2457 .sp
2458 .LP
2459 The following command creates a ZFS storage pool consisting of two, two-way mirrors and mirrored log devices:
2460
2461 .sp
2462 .in +2
2463 .nf
2464 # \fBzpool create pool mirror sda sdb mirror sdc sdd log mirror \e
2465 sde sdf\fR
2466 .fi
2467 .in -2
2468 .sp
2469
2470 .LP
2471 \fBExample 13 \fRAdding Cache Devices to a ZFS Pool
2472 .sp
2473 .LP
2474 The following command adds two disks for use as cache devices to a ZFS storage pool:
2475
2476 .sp
2477 .in +2
2478 .nf
2479 # \fBzpool add pool cache sdc sdd\fR
2480 .fi
2481 .in -2
2482 .sp
2483
2484 .sp
2485 .LP
2486 Once added, the cache devices gradually fill with content from main memory. Depending on the size of your cache devices, it could take over an hour for them to fill. Capacity and reads can be monitored using the \fBiostat\fR option as follows:
2487
2488 .sp
2489 .in +2
2490 .nf
2491 # \fBzpool iostat -v pool 5\fR
2492 .fi
2493 .in -2
2494 .sp
2495
2496 .LP
2497 \fBExample 14 \fRRemoving a Mirrored Log Device
2498 .sp
2499 .LP
2500 The following command removes the mirrored log device \fBmirror-2\fR.
2501
2502 .sp
2503 .LP
2504 Given this configuration:
2505
2506 .sp
2507 .in +2
2508 .nf
2509 pool: tank
2510 state: ONLINE
2511 scrub: none requested
2512 config:
2513
2514 NAME STATE READ WRITE CKSUM
2515 tank ONLINE 0 0 0
2516 mirror-0 ONLINE 0 0 0
2517 sda ONLINE 0 0 0
2518 sdb ONLINE 0 0 0
2519 mirror-1 ONLINE 0 0 0
2520 sdc ONLINE 0 0 0
2521 sdd ONLINE 0 0 0
2522 logs
2523 mirror-2 ONLINE 0 0 0
2524 sde ONLINE 0 0 0
2525 sdf ONLINE 0 0 0
2526 .fi
2527 .in -2
2528 .sp
2529
2530 .sp
2531 .LP
2532 The command to remove the mirrored log \fBmirror-2\fR is:
2533
2534 .sp
2535 .in +2
2536 .nf
2537 # \fBzpool remove tank mirror-2\fR
2538 .fi
2539 .in -2
2540 .sp
2541
2542 .LP
2543 \fBExample 15 \fRDisplaying expanded space on a device
2544 .sp
2545 .LP
2546 The following command displays the detailed information for the \fIdata\fR
2547 pool. This pool is comprised of a single \fIraidz\fR vdev where one of its
2548 devices increased its capacity by 10GB. In this example, the pool will not
2549 be able to utilized this extra capacity until all the devices under the
2550 \fIraidz\fR vdev have been expanded.
2551
2552 .sp
2553 .in +2
2554 .nf
2555 # \fBzpool list -v data\fR
2556 NAME SIZE ALLOC FREE FRAG EXPANDSZ CAP DEDUP HEALTH ALTROOT
2557 data 23.9G 14.6G 9.30G 48% - 61% 1.00x ONLINE -
2558 raidz1 23.9G 14.6G 9.30G 48% -
2559 c1t1d0 - - - - -
2560 c1t2d0 - - - - 10G
2561 c1t3d0 - - - - -
2562 .fi
2563 .in -2
2564 .sp
2565
2566 .LP
2567 \fBExample 16 \fRRunning commands in zpool status and zpool iostat with -c
2568 .sp
2569 .LP
2570 Some examples of using the command (-c) option with zpool status and zpool
2571 iostat:
2572 .sp
2573 .in +2
2574 .nf
2575 # \fBzpool status -c \[aq]echo I am $VDEV_PATH, $VDEV_UPATH\[aq]\fR
2576 NAME STATE READ WRITE CKSUM
2577 mypool ONLINE 0 0 0
2578 mirror-0 ONLINE 0 0 0
2579 mpatha ONLINE 0 0 0 I am /dev/mapper/mpatha, /dev/sdc
2580 sdb ONLINE 0 0 0 I am /dev/sdb1, /dev/sdb
2581 .fi
2582 .in -2
2583
2584 .sp
2585 .in +2
2586 .nf
2587 # \fBzpool iostat -v -c \[aq]smartctl -a $VDEV_UPATH | grep "Current Drive Temperature"\[aq]\fR
2588 mypool 997M 7.25T 0 0 105K 106K
2589 mirror 997M 7.25T 0 0 105K 106K
2590 B0 - - 0 0 17.4K 15.2K Current Drive Temperature: 25 C
2591 B1 - - 0 0 17.4K 15.2K Current Drive Temperature: 24 C
2592 B2 - - 0 0 17.5K 15.2K Current Drive Temperature: 24 C
2593 B3 - - 0 0 0 15.1K Current Drive Temperature: 24 C
2594 logs - - - - - -
2595 B8 0 7.25T 0 0 1.14K 20.2K Current Drive Temperature: 23 C
2596 .fi
2597 .in -2
2598
2599 .SH EXIT STATUS
2600 .sp
2601 .LP
2602 The following exit values are returned:
2603 .sp
2604 .ne 2
2605 .na
2606 \fB\fB0\fR\fR
2607 .ad
2608 .RS 5n
2609 Successful completion.
2610 .RE
2611
2612 .sp
2613 .ne 2
2614 .na
2615 \fB\fB1\fR\fR
2616 .ad
2617 .RS 5n
2618 An error occurred.
2619 .RE
2620
2621 .sp
2622 .ne 2
2623 .na
2624 \fB\fB2\fR\fR
2625 .ad
2626 .RS 5n
2627 Invalid command line options were specified.
2628 .RE
2629
2630 .SH "ENVIRONMENT VARIABLES"
2631 .TP
2632 .B "ZFS_ABORT
2633 Cause \fBzpool\fR to dump core on exit for the purposes of running \fB::findleaks\fR.
2634 .TP
2635 .B "ZPOOL_IMPORT_PATH"
2636 The search path for devices or files to use with the pool. This is a colon-separated list of directories in which \fBzpool\fR looks for device nodes and files.
2637 Similar to the \fB-d\fR option in \fIzpool import\fR.
2638 .TP
2639 .B "ZPOOL_VDEV_NAME_GUID"
2640 Cause \fBzpool\fR subcommands to output vdev guids by default. This behavior
2641 is identical to the \fBzpool status -g\fR command line option.
2642 .TP
2643 .B "ZPOOL_VDEV_NAME_FOLLOW_LINKS"
2644 Cause \fBzpool\fR subcommands to follow links for vdev names by default. This behavior is identical to the \fBzpool status -L\fR command line option.
2645 .TP
2646 .B "ZPOOL_VDEV_NAME_PATH"
2647 Cause \fBzpool\fR subcommands to output full vdev path names by default. This
2648 behavior is identical to the \fBzpool status -p\fR command line option.
2649 .TP
2650 .B "ZFS_VDEV_DEVID_OPT_OUT"
2651 Older ZFS on Linux implementations had issues when attempting to display pool
2652 config VDEV names if a "devid" NVP value is present in the pool's config.
2653
2654 For example, a pool that originated on illumos platform would have a devid
2655 value in the config and \fBzpool status\fR would fail when listing the config.
2656 This would also be true for future Linux based pools.
2657
2658 A pool can be stripped of any "devid" values on import or prevented from adding
2659 them on \fBzpool create\fR or \fBzpool add\fR by setting ZFS_VDEV_DEVID_OPT_OUT.
2660
2661 .SH SEE ALSO
2662 .sp
2663 .LP
2664 \fBzfs\fR(8), \fBzpool-features\fR(5), \fBzfs-events\fR(5), \fBzfs-module-parameters\fR(5)