1 '\" te
2 .\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
3 .\" Copyright 2011, Nexenta Systems, Inc. All Rights Reserved.
4 .\" Copyright (c) 2012 by Delphix. All rights reserved.
5 .\" The contents of this file are subject to the terms of the Common Development
6 .\" and Distribution License (the "License"). You may not use this file except
7 .\" in compliance with the License. You can obtain a copy of the license at
8 .\" usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing.
9 .\"
10 .\" See the License for the specific language governing permissions and
11 .\" limitations under the License. When distributing Covered Code, include this
12 .\" CDDL HEADER in each file and include the License file at
13 .\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this
14 .\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
15 .\" own identifying information:
16 .\" Portions Copyright [yyyy] [name of copyright owner]
17 .TH ZPOOL 1M "Mar 16, 2012"
18 .SH NAME
19 zpool \- configures ZFS storage pools
20 .SH SYNOPSIS
21 .LP
22 .nf
23 \fBzpool\fR [\fB-?\fR]
24 .fi
25
26 .LP
27 .nf
28 \fBzpool add\fR [\fB-fn\fR] \fIpool\fR \fIvdev\fR ...
29 .fi
30
31 .LP
32 .nf
33 \fBzpool attach\fR [\fB-f\fR] \fIpool\fR \fIdevice\fR \fInew_device\fR
34 .fi
35
36 .LP
37 .nf
38 \fBzpool clear\fR \fIpool\fR [\fIdevice\fR]
39 .fi
40
41 .LP
42 .nf
43 \fBzpool create\fR [\fB-fnd\fR] [\fB-o\fR \fIproperty=value\fR] ... [\fB-O\fR \fIfile-system-property=value\fR]
44 ... [\fB-m\fR \fImountpoint\fR] [\fB-R\fR \fIroot\fR] \fIpool\fR \fIvdev\fR ...
45 .fi
46
47 .LP
48 .nf
49 \fBzpool destroy\fR [\fB-f\fR] \fIpool\fR
50 .fi
51
52 .LP
53 .nf
54 \fBzpool detach\fR \fIpool\fR \fIdevice\fR
55 .fi
56
57 .LP
58 .nf
59 \fBzpool export\fR [\fB-f\fR] \fIpool\fR ...
60 .fi
61
62 .LP
63 .nf
64 \fBzpool get\fR "\fIall\fR" | \fIproperty\fR[,...] \fIpool\fR ...
65 .fi
66
67 .LP
68 .nf
69 \fBzpool history\fR [\fB-il\fR] [\fIpool\fR] ...
70 .fi
71
72 .LP
73 .nf
74 \fBzpool import\fR [\fB-d\fR \fIdir\fR] [\fB-D\fR]
75 .fi
76
77 .LP
78 .nf
79 \fBzpool import\fR [\fB-o \fImntopts\fR\fR] [\fB-o\fR \fIproperty=value\fR] ... [\fB-d\fR \fIdir\fR | \fB-c\fR \fIcachefile\fR]
80 [\fB-D\fR] [\fB-f\fR] [\fB-R\fR \fIroot\fR] \fB-a\fR
81 .fi
82
83 .LP
84 .nf
85 \fBzpool import\fR [\fB-o \fImntopts\fR\fR] [\fB-o\fR \fIproperty=value\fR] ... [\fB-d\fR \fIdir\fR | \fB-c\fR \fIcachefile\fR]
86 [\fB-D\fR] [\fB-f\fR] [\fB-R\fR \fIroot\fR] \fIpool\fR |\fIid\fR [\fInewpool\fR]
87 .fi
88
89 .LP
90 .nf
91 \fBzpool iostat\fR [\fB-T\fR u | d ] [\fB-v\fR] [\fIpool\fR] ... [\fIinterval\fR[\fIcount\fR]]
92 .fi
93
94 .LP
95 .nf
96 \fBzpool list\fR [\fB-Hv\fR] [\fB-o\fR \fIproperty\fR[,...]] [\fIpool\fR] ...
97 .fi
98
99 .LP
100 .nf
101 \fBzpool offline\fR [\fB-t\fR] \fIpool\fR \fIdevice\fR ...
102 .fi
103
104 .LP
105 .nf
106 \fBzpool online\fR \fIpool\fR \fIdevice\fR ...
107 .fi
108
109 .LP
110 .nf
111 \fBzpool reguid\fR \fIpool\fR
112 .fi
113
114 .LP
115 .nf
116 \fBzpool remove\fR \fIpool\fR \fIdevice\fR ...
117 .fi
118
119 .LP
120 .nf
121 \fBzpool replace\fR [\fB-f\fR] \fIpool\fR \fIdevice\fR [\fInew_device\fR]
122 .fi
123
124 .LP
125 .nf
126 \fBzpool scrub\fR [\fB-s\fR] \fIpool\fR ...
127 .fi
128
129 .LP
130 .nf
131 \fBzpool set\fR \fIproperty\fR=\fIvalue\fR \fIpool\fR
132 .fi
133
134 .LP
135 .nf
136 \fBzpool status\fR [\fB-xv\fR] [\fIpool\fR] ...
137 .fi
138
139 .LP
140 .nf
141 \fBzpool upgrade\fR
142 .fi
143
144 .LP
145 .nf
146 \fBzpool upgrade\fR \fB-v\fR
147 .fi
148
149 .LP
150 .nf
151 \fBzpool upgrade\fR [\fB-V\fR \fIversion\fR] \fB-a\fR | \fIpool\fR ...
152 .fi
153
154 .SH DESCRIPTION
155 .sp
156 .LP
157 The \fBzpool\fR command configures \fBZFS\fR storage pools. A storage pool is a
158 collection of devices that provides physical storage and data replication for
159 \fBZFS\fR datasets.
160 .sp
161 .LP
162 All datasets within a storage pool share the same space. See \fBzfs\fR(1M) for
163 information on managing datasets.
164 .SS "Virtual Devices (\fBvdev\fRs)"
165 .sp
166 .LP
167 A "virtual device" describes a single device or a collection of devices
168 organized according to certain performance and fault characteristics. The
169 following virtual devices are supported:
170 .sp
171 .ne 2
172 .na
173 \fB\fBdisk\fR\fR
174 .ad
175 .RS 10n
176 A block device, typically located under \fB/dev/dsk\fR. \fBZFS\fR can use
177 individual slices or partitions, though the recommended mode of operation is to
178 use whole disks. A disk can be specified by a full path, or it can be a
179 shorthand name (the relative portion of the path under "/dev/dsk"). A whole
180 disk can be specified by omitting the slice or partition designation. For
181 example, "c0t0d0" is equivalent to "/dev/dsk/c0t0d0s2". When given a whole
182 disk, \fBZFS\fR automatically labels the disk, if necessary.
183 .RE
184
185 .sp
186 .ne 2
187 .na
188 \fB\fBfile\fR\fR
189 .ad
190 .RS 10n
191 A regular file. The use of files as a backing store is strongly discouraged. It
192 is designed primarily for experimental purposes, as the fault tolerance of a
193 file is only as good as the file system of which it is a part. A file must be
194 specified by a full path.
195 .RE
196
197 .sp
198 .ne 2
199 .na
200 \fB\fBmirror\fR\fR
201 .ad
202 .RS 10n
203 A mirror of two or more devices. Data is replicated in an identical fashion
204 across all components of a mirror. A mirror with \fIN\fR disks of size \fIX\fR
205 can hold \fIX\fR bytes and can withstand (\fIN-1\fR) devices failing before
206 data integrity is compromised.
207 .RE
208
209 .sp
210 .ne 2
211 .na
212 \fB\fBraidz\fR\fR
213 .ad
214 .br
215 .na
216 \fB\fBraidz1\fR\fR
217 .ad
218 .br
219 .na
220 \fB\fBraidz2\fR\fR
221 .ad
222 .br
223 .na
224 \fB\fBraidz3\fR\fR
225 .ad
226 .RS 10n
227 A variation on \fBRAID-5\fR that allows for better distribution of parity and
228 eliminates the "\fBRAID-5\fR write hole" (in which data and parity become
229 inconsistent after a power loss). Data and parity is striped across all disks
230 within a \fBraidz\fR group.
231 .sp
232 A \fBraidz\fR group can have single-, double- , or triple parity, meaning that
233 the \fBraidz\fR group can sustain one, two, or three failures, respectively,
234 without losing any data. The \fBraidz1\fR \fBvdev\fR type specifies a
235 single-parity \fBraidz\fR group; the \fBraidz2\fR \fBvdev\fR type specifies a
236 double-parity \fBraidz\fR group; and the \fBraidz3\fR \fBvdev\fR type specifies
237 a triple-parity \fBraidz\fR group. The \fBraidz\fR \fBvdev\fR type is an alias
238 for \fBraidz1\fR.
239 .sp
240 A \fBraidz\fR group with \fIN\fR disks of size \fIX\fR with \fIP\fR parity
241 disks can hold approximately (\fIN-P\fR)*\fIX\fR bytes and can withstand
242 \fIP\fR device(s) failing before data integrity is compromised. The minimum
243 number of devices in a \fBraidz\fR group is one more than the number of parity
244 disks. The recommended number is between 3 and 9 to help increase performance.
245 .RE
246
247 .sp
248 .ne 2
249 .na
250 \fB\fBspare\fR\fR
251 .ad
252 .RS 10n
253 A special pseudo-\fBvdev\fR which keeps track of available hot spares for a
254 pool. For more information, see the "Hot Spares" section.
255 .RE
256
257 .sp
258 .ne 2
259 .na
260 \fB\fBlog\fR\fR
261 .ad
262 .RS 10n
263 A separate-intent log device. If more than one log device is specified, then
264 writes are load-balanced between devices. Log devices can be mirrored. However,
265 \fBraidz\fR \fBvdev\fR types are not supported for the intent log. For more
266 information, see the "Intent Log" section.
267 .RE
268
269 .sp
270 .ne 2
271 .na
272 \fB\fBcache\fR\fR
273 .ad
274 .RS 10n
275 A device used to cache storage pool data. A cache device cannot be cannot be
276 configured as a mirror or \fBraidz\fR group. For more information, see the
277 "Cache Devices" section.
278 .RE
279
280 .sp
281 .LP
282 Virtual devices cannot be nested, so a mirror or \fBraidz\fR virtual device can
283 only contain files or disks. Mirrors of mirrors (or other combinations) are not
284 allowed.
285 .sp
286 .LP
287 A pool can have any number of virtual devices at the top of the configuration
288 (known as "root vdevs"). Data is dynamically distributed across all top-level
289 devices to balance data among devices. As new virtual devices are added,
290 \fBZFS\fR automatically places data on the newly available devices.
291 .sp
292 .LP
293 Virtual devices are specified one at a time on the command line, separated by
294 whitespace. The keywords "mirror" and "raidz" are used to distinguish where a
295 group ends and another begins. For example, the following creates two root
296 vdevs, each a mirror of two disks:
297 .sp
298 .in +2
299 .nf
300 # \fBzpool create mypool mirror c0t0d0 c0t1d0 mirror c1t0d0 c1t1d0\fR
301 .fi
302 .in -2
303 .sp
304
305 .SS "Device Failure and Recovery"
306 .sp
307 .LP
308 \fBZFS\fR supports a rich set of mechanisms for handling device failure and
309 data corruption. All metadata and data is checksummed, and \fBZFS\fR
310 automatically repairs bad data from a good copy when corruption is detected.
311 .sp
312 .LP
313 In order to take advantage of these features, a pool must make use of some form
314 of redundancy, using either mirrored or \fBraidz\fR groups. While \fBZFS\fR
315 supports running in a non-redundant configuration, where each root vdev is
316 simply a disk or file, this is strongly discouraged. A single case of bit
317 corruption can render some or all of your data unavailable.
318 .sp
319 .LP
320 A pool's health status is described by one of three states: online, degraded,
321 or faulted. An online pool has all devices operating normally. A degraded pool
322 is one in which one or more devices have failed, but the data is still
323 available due to a redundant configuration. A faulted pool has corrupted
324 metadata, or one or more faulted devices, and insufficient replicas to continue
325 functioning.
326 .sp
327 .LP
328 The health of the top-level vdev, such as mirror or \fBraidz\fR device, is
329 potentially impacted by the state of its associated vdevs, or component
330 devices. A top-level vdev or component device is in one of the following
331 states:
332 .sp
333 .ne 2
334 .na
335 \fB\fBDEGRADED\fR\fR
336 .ad
337 .RS 12n
338 One or more top-level vdevs is in the degraded state because one or more
339 component devices are offline. Sufficient replicas exist to continue
340 functioning.
341 .sp
342 One or more component devices is in the degraded or faulted state, but
343 sufficient replicas exist to continue functioning. The underlying conditions
344 are as follows:
345 .RS +4
346 .TP
347 .ie t \(bu
348 .el o
349 The number of checksum errors exceeds acceptable levels and the device is
350 degraded as an indication that something may be wrong. \fBZFS\fR continues to
351 use the device as necessary.
352 .RE
353 .RS +4
354 .TP
355 .ie t \(bu
356 .el o
357 The number of I/O errors exceeds acceptable levels. The device could not be
358 marked as faulted because there are insufficient replicas to continue
359 functioning.
360 .RE
361 .RE
362
363 .sp
364 .ne 2
365 .na
366 \fB\fBFAULTED\fR\fR
367 .ad
368 .RS 12n
369 One or more top-level vdevs is in the faulted state because one or more
370 component devices are offline. Insufficient replicas exist to continue
371 functioning.
372 .sp
373 One or more component devices is in the faulted state, and insufficient
374 replicas exist to continue functioning. The underlying conditions are as
375 follows:
376 .RS +4
377 .TP
378 .ie t \(bu
379 .el o
380 The device could be opened, but the contents did not match expected values.
381 .RE
382 .RS +4
383 .TP
384 .ie t \(bu
385 .el o
386 The number of I/O errors exceeds acceptable levels and the device is faulted to
387 prevent further use of the device.
388 .RE
389 .RE
390
391 .sp
392 .ne 2
393 .na
394 \fB\fBOFFLINE\fR\fR
395 .ad
396 .RS 12n
397 The device was explicitly taken offline by the "\fBzpool offline\fR" command.
398 .RE
399
400 .sp
401 .ne 2
402 .na
403 \fB\fBONLINE\fR\fR
404 .ad
405 .RS 12n
406 The device is online and functioning.
407 .RE
408
409 .sp
410 .ne 2
411 .na
412 \fB\fBREMOVED\fR\fR
413 .ad
414 .RS 12n
415 The device was physically removed while the system was running. Device removal
416 detection is hardware-dependent and may not be supported on all platforms.
417 .RE
418
419 .sp
420 .ne 2
421 .na
422 \fB\fBUNAVAIL\fR\fR
423 .ad
424 .RS 12n
425 The device could not be opened. If a pool is imported when a device was
426 unavailable, then the device will be identified by a unique identifier instead
427 of its path since the path was never correct in the first place.
428 .RE
429
430 .sp
431 .LP
432 If a device is removed and later re-attached to the system, \fBZFS\fR attempts
433 to put the device online automatically. Device attach detection is
434 hardware-dependent and might not be supported on all platforms.
435 .SS "Hot Spares"
436 .sp
437 .LP
438 \fBZFS\fR allows devices to be associated with pools as "hot spares". These
439 devices are not actively used in the pool, but when an active device fails, it
440 is automatically replaced by a hot spare. To create a pool with hot spares,
441 specify a "spare" \fBvdev\fR with any number of devices. For example,
442 .sp
443 .in +2
444 .nf
445 # zpool create pool mirror c0d0 c1d0 spare c2d0 c3d0
446 .fi
447 .in -2
448 .sp
449
450 .sp
451 .LP
452 Spares can be shared across multiple pools, and can be added with the "\fBzpool
453 add\fR" command and removed with the "\fBzpool remove\fR" command. Once a spare
454 replacement is initiated, a new "spare" \fBvdev\fR is created within the
455 configuration that will remain there until the original device is replaced. At
456 this point, the hot spare becomes available again if another device fails.
457 .sp
458 .LP
459 If a pool has a shared spare that is currently being used, the pool can not be
460 exported since other pools may use this shared spare, which may lead to
461 potential data corruption.
462 .sp
463 .LP
464 An in-progress spare replacement can be cancelled by detaching the hot spare.
465 If the original faulted device is detached, then the hot spare assumes its
466 place in the configuration, and is removed from the spare list of all active
467 pools.
468 .sp
469 .LP
470 Spares cannot replace log devices.
471 .SS "Intent Log"
472 .sp
473 .LP
474 The \fBZFS\fR Intent Log (\fBZIL\fR) satisfies \fBPOSIX\fR requirements for
475 synchronous transactions. For instance, databases often require their
476 transactions to be on stable storage devices when returning from a system call.
477 \fBNFS\fR and other applications can also use \fBfsync\fR() to ensure data
478 stability. By default, the intent log is allocated from blocks within the main
479 pool. However, it might be possible to get better performance using separate
480 intent log devices such as \fBNVRAM\fR or a dedicated disk. For example:
481 .sp
482 .in +2
483 .nf
484 \fB# zpool create pool c0d0 c1d0 log c2d0\fR
485 .fi
486 .in -2
487 .sp
488
489 .sp
490 .LP
491 Multiple log devices can also be specified, and they can be mirrored. See the
492 EXAMPLES section for an example of mirroring multiple log devices.
493 .sp
494 .LP
495 Log devices can be added, replaced, attached, detached, and imported and
496 exported as part of the larger pool. Mirrored log devices can be removed by
497 specifying the top-level mirror for the log.
498 .SS "Cache Devices"
499 .sp
500 .LP
501 Devices can be added to a storage pool as "cache devices." These devices
502 provide an additional layer of caching between main memory and disk. For
503 read-heavy workloads, where the working set size is much larger than what can
504 be cached in main memory, using cache devices allow much more of this working
505 set to be served from low latency media. Using cache devices provides the
506 greatest performance improvement for random read-workloads of mostly static
507 content.
508 .sp
509 .LP
510 To create a pool with cache devices, specify a "cache" \fBvdev\fR with any
511 number of devices. For example:
512 .sp
513 .in +2
514 .nf
515 \fB# zpool create pool c0d0 c1d0 cache c2d0 c3d0\fR
516 .fi
517 .in -2
518 .sp
519
520 .sp
521 .LP
522 Cache devices cannot be mirrored or part of a \fBraidz\fR configuration. If a
523 read error is encountered on a cache device, that read \fBI/O\fR is reissued to
524 the original storage pool device, which might be part of a mirrored or
525 \fBraidz\fR configuration.
526 .sp
527 .LP
528 The content of the cache devices is considered volatile, as is the case with
529 other system caches.
530 .SS "Properties"
531 .sp
532 .LP
533 Each pool has several properties associated with it. Some properties are
534 read-only statistics while others are configurable and change the behavior of
535 the pool. The following are read-only properties:
536 .sp
537 .ne 2
538 .na
539 \fB\fBavailable\fR\fR
540 .ad
541 .RS 20n
542 Amount of storage available within the pool. This property can also be referred
543 to by its shortened column name, "avail".
544 .RE
545
546 .sp
547 .ne 2
548 .na
549 \fB\fBcapacity\fR\fR
550 .ad
551 .RS 20n
552 Percentage of pool space used. This property can also be referred to by its
553 shortened column name, "cap".
554 .RE
555
556 .sp
557 .ne 2
558 .na
559 \fB\fBexpandsize\fR\fR
560 .ad
561 .RS 20n
562 Amount of uninitialized space within the pool or device that can be used to
563 increase the total capacity of the pool. Uninitialized space consists of
564 any space on an EFI labeled vdev which has not been brought online
565 (i.e. zpool online -e). This space occurs when a LUN is dynamically expanded.
566 .RE
567
568 .sp
569 .ne 2
570 .na
571 \fB\fBfree\fR\fR
572 .ad
573 .RS 20n
574 The amount of free space available in the pool.
575 .RE
576
577 .sp
578 .ne 2
579 .na
580 \fB\fBfreeing\fR\fR
581 .ad
582 .RS 20n
583 After a file system or snapshot is destroyed, the space it was using is
584 returned to the pool asynchronously. \fB\fBfreeing\fR\fR is the amount of
585 space remaining to be reclaimed. Over time \fB\fBfreeing\fR\fR will decrease
586 while \fB\fBfree\fR\fR increases.
587 .RE
588
589 .sp
590 .ne 2
591 .na
592 \fB\fBhealth\fR\fR
593 .ad
594 .RS 20n
595 The current health of the pool. Health can be "\fBONLINE\fR", "\fBDEGRADED\fR",
596 "\fBFAULTED\fR", " \fBOFFLINE\fR", "\fBREMOVED\fR", or "\fBUNAVAIL\fR".
597 .RE
598
599 .sp
600 .ne 2
601 .na
602 \fB\fBguid\fR\fR
603 .ad
604 .RS 20n
605 A unique identifier for the pool.
606 .RE
607
608 .sp
609 .ne 2
610 .na
611 \fB\fBsize\fR\fR
612 .ad
613 .RS 20n
614 Total size of the storage pool.
615 .RE
616
617 .sp
618 .ne 2
619 .na
620 \fB\fBunsupported@\fR\fIfeature_guid\fR\fR
621 .ad
622 .RS 20n
623 Information about unsupported features that are enabled on the pool. See
624 \fBzpool-features\fR(5) for details.
625 .RE
626
627 .sp
628 .ne 2
629 .na
630 \fB\fBused\fR\fR
631 .ad
632 .RS 20n
633 Amount of storage space used within the pool.
634 .RE
635
636 .sp
637 .LP
638 The space usage properties report actual physical space available to the
639 storage pool. The physical space can be different from the total amount of
640 space that any contained datasets can actually use. The amount of space used in
641 a \fBraidz\fR configuration depends on the characteristics of the data being
642 written. In addition, \fBZFS\fR reserves some space for internal accounting
643 that the \fBzfs\fR(1M) command takes into account, but the \fBzpool\fR command
644 does not. For non-full pools of a reasonable size, these effects should be
645 invisible. For small pools, or pools that are close to being completely full,
646 these discrepancies may become more noticeable.
647 .sp
648 .LP
649 The following property can be set at creation time and import time:
650 .sp
651 .ne 2
652 .na
653 \fB\fBaltroot\fR\fR
654 .ad
655 .sp .6
656 .RS 4n
657 Alternate root directory. If set, this directory is prepended to any mount
658 points within the pool. This can be used when examining an unknown pool where
659 the mount points cannot be trusted, or in an alternate boot environment, where
660 the typical paths are not valid. \fBaltroot\fR is not a persistent property. It
661 is valid only while the system is up. Setting \fBaltroot\fR defaults to using
662 \fBcachefile\fR=none, though this may be overridden using an explicit setting.
663 .RE
664
665 .sp
666 .LP
667 The following properties can be set at creation time and import time, and later
668 changed with the \fBzpool set\fR command:
669 .sp
670 .ne 2
671 .na
672 \fB\fBautoexpand\fR=\fBon\fR | \fBoff\fR\fR
673 .ad
674 .sp .6
675 .RS 4n
676 Controls automatic pool expansion when the underlying LUN is grown. If set to
677 \fBon\fR, the pool will be resized according to the size of the expanded
678 device. If the device is part of a mirror or \fBraidz\fR then all devices
679 within that mirror/\fBraidz\fR group must be expanded before the new space is
680 made available to the pool. The default behavior is \fBoff\fR. This property
681 can also be referred to by its shortened column name, \fBexpand\fR.
682 .RE
683
684 .sp
685 .ne 2
686 .na
687 \fB\fBautoreplace\fR=\fBon\fR | \fBoff\fR\fR
688 .ad
689 .sp .6
690 .RS 4n
691 Controls automatic device replacement. If set to "\fBoff\fR", device
692 replacement must be initiated by the administrator by using the "\fBzpool
693 replace\fR" command. If set to "\fBon\fR", any new device, found in the same
694 physical location as a device that previously belonged to the pool, is
695 automatically formatted and replaced. The default behavior is "\fBoff\fR". This
696 property can also be referred to by its shortened column name, "replace".
697 .RE
698
699 .sp
700 .ne 2
701 .na
702 \fB\fBbootfs\fR=\fIpool\fR/\fIdataset\fR\fR
703 .ad
704 .sp .6
705 .RS 4n
706 Identifies the default bootable dataset for the root pool. This property is
707 expected to be set mainly by the installation and upgrade programs.
708 .RE
709
710 .sp
711 .ne 2
712 .na
713 \fB\fBcachefile\fR=\fIpath\fR | \fBnone\fR\fR
714 .ad
715 .sp .6
716 .RS 4n
717 Controls the location of where the pool configuration is cached. Discovering
718 all pools on system startup requires a cached copy of the configuration data
719 that is stored on the root file system. All pools in this cache are
720 automatically imported when the system boots. Some environments, such as
721 install and clustering, need to cache this information in a different location
722 so that pools are not automatically imported. Setting this property caches the
723 pool configuration in a different location that can later be imported with
724 "\fBzpool import -c\fR". Setting it to the special value "\fBnone\fR" creates a
725 temporary pool that is never cached, and the special value \fB\&''\fR (empty
726 string) uses the default location.
727 .sp
728 Multiple pools can share the same cache file. Because the kernel destroys and
729 recreates this file when pools are added and removed, care should be taken when
730 attempting to access this file. When the last pool using a \fBcachefile\fR is
731 exported or destroyed, the file is removed.
732 .RE
733
734 .sp
735 .ne 2
736 .na
737 \fB\fBcomment\fR=\fB\fItext\fR\fR
738 .ad
739 .RS 4n
740 A text string consisting of printable ASCII characters that will be stored
741 such that it is available even if the pool becomes faulted. An administrator
742 can provide additional information about a pool using this property.
743 .RE
744
745 .sp
746 .ne 2
747 .na
748 \fB\fBdelegation\fR=\fBon\fR | \fBoff\fR\fR
749 .ad
750 .sp .6
751 .RS 4n
752 Controls whether a non-privileged user is granted access based on the dataset
753 permissions defined on the dataset. See \fBzfs\fR(1M) for more information on
754 \fBZFS\fR delegated administration.
755 .RE
756
757 .sp
758 .ne 2
759 .na
760 \fB\fBfailmode\fR=\fBwait\fR | \fBcontinue\fR | \fBpanic\fR\fR
761 .ad
762 .sp .6
763 .RS 4n
764 Controls the system behavior in the event of catastrophic pool failure. This
765 condition is typically a result of a loss of connectivity to the underlying
766 storage device(s) or a failure of all devices within the pool. The behavior of
767 such an event is determined as follows:
768 .sp
769 .ne 2
770 .na
771 \fB\fBwait\fR\fR
772 .ad
773 .RS 12n
774 Blocks all \fBI/O\fR access until the device connectivity is recovered and the
775 errors are cleared. This is the default behavior.
776 .RE
777
778 .sp
779 .ne 2
780 .na
781 \fB\fBcontinue\fR\fR
782 .ad
783 .RS 12n
784 Returns \fBEIO\fR to any new write \fBI/O\fR requests but allows reads to any
785 of the remaining healthy devices. Any write requests that have yet to be
786 committed to disk would be blocked.
787 .RE
788
789 .sp
790 .ne 2
791 .na
792 \fB\fBpanic\fR\fR
793 .ad
794 .RS 12n
795 Prints out a message to the console and generates a system crash dump.
796 .RE
797
798 .RE
799
800 .sp
801 .ne 2
802 .na
803 \fB\fBfeature@\fR\fIfeature_name\fR=\fBenabled\fR\fR
804 .ad
805 .RS 4n
806 The value of this property is the current state of \fIfeature_name\fR. The
807 only valid value when setting this property is \fBenabled\fR which moves
808 \fIfeature_name\fR to the enabled state. See \fBzpool-features\fR(5) for
809 details on feature states.
810 .RE
811
812 .sp
813 .ne 2
814 .na
815 \fB\fBlistsnaps\fR=on | off\fR
816 .ad
817 .sp .6
818 .RS 4n
819 Controls whether information about snapshots associated with this pool is
820 output when "\fBzfs list\fR" is run without the \fB-t\fR option. The default
821 value is "off".
822 .RE
823
824 .sp
825 .ne 2
826 .na
827 \fB\fBversion\fR=\fIversion\fR\fR
828 .ad
829 .sp .6
830 .RS 4n
831 The current on-disk version of the pool. This can be increased, but never
832 decreased. The preferred method of updating pools is with the "\fBzpool
833 upgrade\fR" command, though this property can be used when a specific version
834 is needed for backwards compatibility. Once feature flags is enabled on a
835 pool this property will no longer have a value.
836 .RE
837
838 .SS "Subcommands"
839 .sp
840 .LP
841 All subcommands that modify state are logged persistently to the pool in their
842 original form.
843 .sp
844 .LP
845 The \fBzpool\fR command provides subcommands to create and destroy storage
846 pools, add capacity to storage pools, and provide information about the storage
847 pools. The following subcommands are supported:
848 .sp
849 .ne 2
850 .na
851 \fB\fBzpool\fR \fB-?\fR\fR
852 .ad
853 .sp .6
854 .RS 4n
855 Displays a help message.
856 .RE
857
858 .sp
859 .ne 2
860 .na
861 \fB\fBzpool add\fR [\fB-fn\fR] \fIpool\fR \fIvdev\fR ...\fR
862 .ad
863 .sp .6
864 .RS 4n
865 Adds the specified virtual devices to the given pool. The \fIvdev\fR
866 specification is described in the "Virtual Devices" section. The behavior of
867 the \fB-f\fR option, and the device checks performed are described in the
868 "zpool create" subcommand.
869 .sp
870 .ne 2
871 .na
872 \fB\fB-f\fR\fR
873 .ad
874 .RS 6n
875 Forces use of \fBvdev\fRs, even if they appear in use or specify a conflicting
876 replication level. Not all devices can be overridden in this manner.
877 .RE
878
879 .sp
880 .ne 2
881 .na
882 \fB\fB-n\fR\fR
883 .ad
884 .RS 6n
885 Displays the configuration that would be used without actually adding the
886 \fBvdev\fRs. The actual pool creation can still fail due to insufficient
887 privileges or device sharing.
888 .RE
889
890 Do not add a disk that is currently configured as a quorum device to a zpool.
891 After a disk is in the pool, that disk can then be configured as a quorum
892 device.
893 .RE
894
895 .sp
896 .ne 2
897 .na
898 \fB\fBzpool attach\fR [\fB-f\fR] \fIpool\fR \fIdevice\fR \fInew_device\fR\fR
899 .ad
900 .sp .6
901 .RS 4n
902 Attaches \fInew_device\fR to an existing \fBzpool\fR device. The existing
903 device cannot be part of a \fBraidz\fR configuration. If \fIdevice\fR is not
904 currently part of a mirrored configuration, \fIdevice\fR automatically
905 transforms into a two-way mirror of \fIdevice\fR and \fInew_device\fR. If
906 \fIdevice\fR is part of a two-way mirror, attaching \fInew_device\fR creates a
907 three-way mirror, and so on. In either case, \fInew_device\fR begins to
908 resilver immediately.
909 .sp
910 .ne 2
911 .na
912 \fB\fB-f\fR\fR
913 .ad
914 .RS 6n
915 Forces use of \fInew_device\fR, even if its appears to be in use. Not all
916 devices can be overridden in this manner.
917 .RE
918
919 .RE
920
921 .sp
922 .ne 2
923 .na
924 \fB\fBzpool clear\fR \fIpool\fR [\fIdevice\fR] ...\fR
925 .ad
926 .sp .6
927 .RS 4n
928 Clears device errors in a pool. If no arguments are specified, all device
929 errors within the pool are cleared. If one or more devices is specified, only
930 those errors associated with the specified device or devices are cleared.
931 .RE
932
933 .sp
934 .ne 2
935 .na
936 \fB\fBzpool create\fR [\fB-fnd\fR] [\fB-o\fR \fIproperty=value\fR] ... [\fB-O\fR
937 \fIfile-system-property=value\fR] ... [\fB-m\fR \fImountpoint\fR] [\fB-R\fR
938 \fIroot\fR] \fIpool\fR \fIvdev\fR ...\fR
939 .ad
940 .sp .6
941 .RS 4n
942 Creates a new storage pool containing the virtual devices specified on the
943 command line. The pool name must begin with a letter, and can only contain
944 alphanumeric characters as well as underscore ("_"), dash ("-"), and period
945 ("."). The pool names "mirror", "raidz", "spare" and "log" are reserved, as are
946 names beginning with the pattern "c[0-9]". The \fBvdev\fR specification is
947 described in the "Virtual Devices" section.
948 .sp
949 The command verifies that each device specified is accessible and not currently
950 in use by another subsystem. There are some uses, such as being currently
951 mounted, or specified as the dedicated dump device, that prevents a device from
952 ever being used by \fBZFS\fR. Other uses, such as having a preexisting
953 \fBUFS\fR file system, can be overridden with the \fB-f\fR option.
954 .sp
955 The command also checks that the replication strategy for the pool is
956 consistent. An attempt to combine redundant and non-redundant storage in a
957 single pool, or to mix disks and files, results in an error unless \fB-f\fR is
958 specified. The use of differently sized devices within a single \fBraidz\fR or
959 mirror group is also flagged as an error unless \fB-f\fR is specified.
960 .sp
961 Unless the \fB-R\fR option is specified, the default mount point is
962 "/\fIpool\fR". The mount point must not exist or must be empty, or else the
963 root dataset cannot be mounted. This can be overridden with the \fB-m\fR
964 option.
965 .sp
966 By default all supported features are enabled on the new pool unless the
967 \fB-d\fR option is specified.
968 .sp
969 .ne 2
970 .na
971 \fB\fB-f\fR\fR
972 .ad
973 .sp .6
974 .RS 4n
975 Forces use of \fBvdev\fRs, even if they appear in use or specify a conflicting
976 replication level. Not all devices can be overridden in this manner.
977 .RE
978
979 .sp
980 .ne 2
981 .na
982 \fB\fB-n\fR\fR
983 .ad
984 .sp .6
985 .RS 4n
986 Displays the configuration that would be used without actually creating the
987 pool. The actual pool creation can still fail due to insufficient privileges or
988 device sharing.
989 .RE
990
991 .sp
992 .ne 2
993 .na
994 \fB\fB-d\fR\fR
995 .ad
996 .sp .6
997 .RS 4n
998 Do not enable any features on the new pool. Individual features can be enabled
999 by setting their corresponding properties to \fBenabled\fR with the \fB-o\fR
1000 option. See \fBzpool-features\fR(5) for details about feature properties.
1001 .RE
1002
1003 .sp
1004 .ne 2
1005 .na
1006 \fB\fB-o\fR \fIproperty=value\fR [\fB-o\fR \fIproperty=value\fR] ...\fR
1007 .ad
1008 .sp .6
1009 .RS 4n
1010 Sets the given pool properties. See the "Properties" section for a list of
1011 valid properties that can be set.
1012 .RE
1013
1014 .sp
1015 .ne 2
1016 .na
1017 \fB\fB-O\fR \fIfile-system-property=value\fR\fR
1018 .ad
1019 .br
1020 .na
1021 \fB[\fB-O\fR \fIfile-system-property=value\fR] ...\fR
1022 .ad
1023 .sp .6
1024 .RS 4n
1025 Sets the given file system properties in the root file system of the pool. See
1026 the "Properties" section of \fBzfs\fR(1M) for a list of valid properties that
1027 can be set.
1028 .RE
1029
1030 .sp
1031 .ne 2
1032 .na
1033 \fB\fB-R\fR \fIroot\fR\fR
1034 .ad
1035 .sp .6
1036 .RS 4n
1037 Equivalent to "-o cachefile=none,altroot=\fIroot\fR"
1038 .RE
1039
1040 .sp
1041 .ne 2
1042 .na
1043 \fB\fB-m\fR \fImountpoint\fR\fR
1044 .ad
1045 .sp .6
1046 .RS 4n
1047 Sets the mount point for the root dataset. The default mount point is
1048 "/\fIpool\fR" or "\fBaltroot\fR/\fIpool\fR" if \fBaltroot\fR is specified. The
1049 mount point must be an absolute path, "\fBlegacy\fR", or "\fBnone\fR". For more
1050 information on dataset mount points, see \fBzfs\fR(1M).
1051 .RE
1052
1053 .RE
1054
1055 .sp
1056 .ne 2
1057 .na
1058 \fB\fBzpool destroy\fR [\fB-f\fR] \fIpool\fR\fR
1059 .ad
1060 .sp .6
1061 .RS 4n
1062 Destroys the given pool, freeing up any devices for other use. This command
1063 tries to unmount any active datasets before destroying the pool.
1064 .sp
1065 .ne 2
1066 .na
1067 \fB\fB-f\fR\fR
1068 .ad
1069 .RS 6n
1070 Forces any active datasets contained within the pool to be unmounted.
1071 .RE
1072
1073 .RE
1074
1075 .sp
1076 .ne 2
1077 .na
1078 \fB\fBzpool detach\fR \fIpool\fR \fIdevice\fR\fR
1079 .ad
1080 .sp .6
1081 .RS 4n
1082 Detaches \fIdevice\fR from a mirror. The operation is refused if there are no
1083 other valid replicas of the data.
1084 .RE
1085
1086 .sp
1087 .ne 2
1088 .na
1089 \fB\fBzpool export\fR [\fB-f\fR] \fIpool\fR ...\fR
1090 .ad
1091 .sp .6
1092 .RS 4n
1093 Exports the given pools from the system. All devices are marked as exported,
1094 but are still considered in use by other subsystems. The devices can be moved
1095 between systems (even those of different endianness) and imported as long as a
1096 sufficient number of devices are present.
1097 .sp
1098 Before exporting the pool, all datasets within the pool are unmounted. A pool
1099 can not be exported if it has a shared spare that is currently being used.
1100 .sp
1101 For pools to be portable, you must give the \fBzpool\fR command whole disks,
1102 not just slices, so that \fBZFS\fR can label the disks with portable \fBEFI\fR
1103 labels. Otherwise, disk drivers on platforms of different endianness will not
1104 recognize the disks.
1105 .sp
1106 .ne 2
1107 .na
1108 \fB\fB-f\fR\fR
1109 .ad
1110 .RS 6n
1111 Forcefully unmount all datasets, using the "\fBunmount -f\fR" command.
1112 .sp
1113 This command will forcefully export the pool even if it has a shared spare that
1114 is currently being used. This may lead to potential data corruption.
1115 .RE
1116
1117 .RE
1118
1119 .sp
1120 .ne 2
1121 .na
1122 \fB\fBzpool get\fR "\fIall\fR" | \fIproperty\fR[,...] \fIpool\fR ...\fR
1123 .ad
1124 .sp .6
1125 .RS 4n
1126 Retrieves the given list of properties (or all properties if "\fBall\fR" is
1127 used) for the specified storage pool(s). These properties are displayed with
1128 the following fields:
1129 .sp
1130 .in +2
1131 .nf
1132 name Name of storage pool
1133 property Property name
1134 value Property value
1135 source Property source, either 'default' or 'local'.
1136 .fi
1137 .in -2
1138 .sp
1139
1140 See the "Properties" section for more information on the available pool
1141 properties.
1142 .RE
1143
1144 .sp
1145 .ne 2
1146 .na
1147 \fB\fBzpool history\fR [\fB-il\fR] [\fIpool\fR] ...\fR
1148 .ad
1149 .sp .6
1150 .RS 4n
1151 Displays the command history of the specified pools or all pools if no pool is
1152 specified.
1153 .sp
1154 .ne 2
1155 .na
1156 \fB\fB-i\fR\fR
1157 .ad
1158 .RS 6n
1159 Displays internally logged \fBZFS\fR events in addition to user initiated
1160 events.
1161 .RE
1162
1163 .sp
1164 .ne 2
1165 .na
1166 \fB\fB-l\fR\fR
1167 .ad
1168 .RS 6n
1169 Displays log records in long format, which in addition to standard format
1170 includes, the user name, the hostname, and the zone in which the operation was
1171 performed.
1172 .RE
1173
1174 .RE
1175
1176 .sp
1177 .ne 2
1178 .na
1179 \fB\fBzpool import\fR [\fB-d\fR \fIdir\fR | \fB-c\fR \fIcachefile\fR]
1180 [\fB-D\fR]\fR
1181 .ad
1182 .sp .6
1183 .RS 4n
1184 Lists pools available to import. If the \fB-d\fR option is not specified, this
1185 command searches for devices in "/dev/dsk". The \fB-d\fR option can be
1186 specified multiple times, and all directories are searched. If the device
1187 appears to be part of an exported pool, this command displays a summary of the
1188 pool with the name of the pool, a numeric identifier, as well as the \fIvdev\fR
1189 layout and current health of the device for each device or file. Destroyed
1190 pools, pools that were previously destroyed with the "\fBzpool destroy\fR"
1191 command, are not listed unless the \fB-D\fR option is specified.
1192 .sp
1193 The numeric identifier is unique, and can be used instead of the pool name when
1194 multiple exported pools of the same name are available.
1195 .sp
1196 .ne 2
1197 .na
1198 \fB\fB-c\fR \fIcachefile\fR\fR
1199 .ad
1200 .RS 16n
1201 Reads configuration from the given \fBcachefile\fR that was created with the
1202 "\fBcachefile\fR" pool property. This \fBcachefile\fR is used instead of
1203 searching for devices.
1204 .RE
1205
1206 .sp
1207 .ne 2
1208 .na
1209 \fB\fB-d\fR \fIdir\fR\fR
1210 .ad
1211 .RS 16n
1212 Searches for devices or files in \fIdir\fR. The \fB-d\fR option can be
1213 specified multiple times.
1214 .RE
1215
1216 .sp
1217 .ne 2
1218 .na
1219 \fB\fB-D\fR\fR
1220 .ad
1221 .RS 16n
1222 Lists destroyed pools only.
1223 .RE
1224
1225 .RE
1226
1227 .sp
1228 .ne 2
1229 .na
1230 \fB\fBzpool import\fR [\fB-o\fR \fImntopts\fR] [ \fB-o\fR
1231 \fIproperty\fR=\fIvalue\fR] ... [\fB-d\fR \fIdir\fR | \fB-c\fR \fIcachefile\fR]
1232 [\fB-D\fR] [\fB-f\fR] [\fB-R\fR \fIroot\fR] \fB-a\fR\fR
1233 .ad
1234 .sp .6
1235 .RS 4n
1236 Imports all pools found in the search directories. Identical to the previous
1237 command, except that all pools with a sufficient number of devices available
1238 are imported. Destroyed pools, pools that were previously destroyed with the
1239 "\fBzpool destroy\fR" command, will not be imported unless the \fB-D\fR option
1240 is specified.
1241 .sp
1242 .ne 2
1243 .na
1244 \fB\fB-o\fR \fImntopts\fR\fR
1245 .ad
1246 .RS 21n
1247 Comma-separated list of mount options to use when mounting datasets within the
1248 pool. See \fBzfs\fR(1M) for a description of dataset properties and mount
1249 options.
1250 .RE
1251
1252 .sp
1253 .ne 2
1254 .na
1255 \fB\fB-o\fR \fIproperty=value\fR\fR
1256 .ad
1257 .RS 21n
1258 Sets the specified property on the imported pool. See the "Properties" section
1259 for more information on the available pool properties.
1260 .RE
1261
1262 .sp
1263 .ne 2
1264 .na
1265 \fB\fB-c\fR \fIcachefile\fR\fR
1266 .ad
1267 .RS 21n
1268 Reads configuration from the given \fBcachefile\fR that was created with the
1269 "\fBcachefile\fR" pool property. This \fBcachefile\fR is used instead of
1270 searching for devices.
1271 .RE
1272
1273 .sp
1274 .ne 2
1275 .na
1276 \fB\fB-d\fR \fIdir\fR\fR
1277 .ad
1278 .RS 21n
1279 Searches for devices or files in \fIdir\fR. The \fB-d\fR option can be
1280 specified multiple times. This option is incompatible with the \fB-c\fR option.
1281 .RE
1282
1283 .sp
1284 .ne 2
1285 .na
1286 \fB\fB-D\fR\fR
1287 .ad
1288 .RS 21n
1289 Imports destroyed pools only. The \fB-f\fR option is also required.
1290 .RE
1291
1292 .sp
1293 .ne 2
1294 .na
1295 \fB\fB-f\fR\fR
1296 .ad
1297 .RS 21n
1298 Forces import, even if the pool appears to be potentially active.
1299 .RE
1300
1301 .sp
1302 .ne 2
1303 .na
1304 \fB\fB-a\fR\fR
1305 .ad
1306 .RS 21n
1307 Searches for and imports all pools found.
1308 .RE
1309
1310 .sp
1311 .ne 2
1312 .na
1313 \fB\fB-R\fR \fIroot\fR\fR
1314 .ad
1315 .RS 21n
1316 Sets the "\fBcachefile\fR" property to "\fBnone\fR" and the "\fIaltroot\fR"
1317 property to "\fIroot\fR".
1318 .RE
1319
1320 .RE
1321
1322 .sp
1323 .ne 2
1324 .na
1325 \fB\fBzpool import\fR [\fB-o\fR \fImntopts\fR] [ \fB-o\fR
1326 \fIproperty\fR=\fIvalue\fR] ... [\fB-d\fR \fIdir\fR | \fB-c\fR \fIcachefile\fR]
1327 [\fB-D\fR] [\fB-f\fR] [\fB-R\fR \fIroot\fR] \fIpool\fR | \fIid\fR
1328 [\fInewpool\fR]\fR
1329 .ad
1330 .sp .6
1331 .RS 4n
1332 Imports a specific pool. A pool can be identified by its name or the numeric
1333 identifier. If \fInewpool\fR is specified, the pool is imported using the name
1334 \fInewpool\fR. Otherwise, it is imported with the same name as its exported
1335 name.
1336 .sp
1337 If a device is removed from a system without running "\fBzpool export\fR"
1338 first, the device appears as potentially active. It cannot be determined if
1339 this was a failed export, or whether the device is really in use from another
1340 host. To import a pool in this state, the \fB-f\fR option is required.
1341 .sp
1342 .ne 2
1343 .na
1344 \fB\fB-o\fR \fImntopts\fR\fR
1345 .ad
1346 .sp .6
1347 .RS 4n
1348 Comma-separated list of mount options to use when mounting datasets within the
1349 pool. See \fBzfs\fR(1M) for a description of dataset properties and mount
1350 options.
1351 .RE
1352
1353 .sp
1354 .ne 2
1355 .na
1356 \fB\fB-o\fR \fIproperty=value\fR\fR
1357 .ad
1358 .sp .6
1359 .RS 4n
1360 Sets the specified property on the imported pool. See the "Properties" section
1361 for more information on the available pool properties.
1362 .RE
1363
1364 .sp
1365 .ne 2
1366 .na
1367 \fB\fB-c\fR \fIcachefile\fR\fR
1368 .ad
1369 .sp .6
1370 .RS 4n
1371 Reads configuration from the given \fBcachefile\fR that was created with the
1372 "\fBcachefile\fR" pool property. This \fBcachefile\fR is used instead of
1373 searching for devices.
1374 .RE
1375
1376 .sp
1377 .ne 2
1378 .na
1379 \fB\fB-d\fR \fIdir\fR\fR
1380 .ad
1381 .sp .6
1382 .RS 4n
1383 Searches for devices or files in \fIdir\fR. The \fB-d\fR option can be
1384 specified multiple times. This option is incompatible with the \fB-c\fR option.
1385 .RE
1386
1387 .sp
1388 .ne 2
1389 .na
1390 \fB\fB-D\fR\fR
1391 .ad
1392 .sp .6
1393 .RS 4n
1394 Imports destroyed pool. The \fB-f\fR option is also required.
1395 .RE
1396
1397 .sp
1398 .ne 2
1399 .na
1400 \fB\fB-f\fR\fR
1401 .ad
1402 .sp .6
1403 .RS 4n
1404 Forces import, even if the pool appears to be potentially active.
1405 .RE
1406
1407 .sp
1408 .ne 2
1409 .na
1410 \fB\fB-R\fR \fIroot\fR\fR
1411 .ad
1412 .sp .6
1413 .RS 4n
1414 Sets the "\fBcachefile\fR" property to "\fBnone\fR" and the "\fIaltroot\fR"
1415 property to "\fIroot\fR".
1416 .RE
1417
1418 .RE
1419
1420 .sp
1421 .ne 2
1422 .na
1423 \fB\fBzpool iostat\fR [\fB-T\fR \fBu\fR | \fBd\fR] [\fB-v\fR] [\fIpool\fR] ...
1424 [\fIinterval\fR[\fIcount\fR]]\fR
1425 .ad
1426 .sp .6
1427 .RS 4n
1428 Displays \fBI/O\fR statistics for the given pools. When given an interval, the
1429 statistics are printed every \fIinterval\fR seconds until \fBCtrl-C\fR is
1430 pressed. If no \fIpools\fR are specified, statistics for every pool in the
1431 system is shown. If \fIcount\fR is specified, the command exits after
1432 \fIcount\fR reports are printed.
1433 .sp
1434 .ne 2
1435 .na
1436 \fB\fB-T\fR \fBu\fR | \fBd\fR\fR
1437 .ad
1438 .RS 12n
1439 Display a time stamp.
1440 .sp
1441 Specify \fBu\fR for a printed representation of the internal representation of
1442 time. See \fBtime\fR(2). Specify \fBd\fR for standard date format. See
1443 \fBdate\fR(1).
1444 .RE
1445
1446 .sp
1447 .ne 2
1448 .na
1449 \fB\fB-v\fR\fR
1450 .ad
1451 .RS 12n
1452 Verbose statistics. Reports usage statistics for individual \fIvdevs\fR within
1453 the pool, in addition to the pool-wide statistics.
1454 .RE
1455
1456 .RE
1457
1458 .sp
1459 .ne 2
1460 .na
1461 \fB\fBzpool list\fR [\fB-Hv\fR] [\fB-o\fR \fIprops\fR[,...]] [\fIpool\fR] ...\fR
1462 .ad
1463 .sp .6
1464 .RS 4n
1465 Lists the given pools along with a health status and space usage. When given no
1466 arguments, all pools in the system are listed.
1467 .sp
1468 .ne 2
1469 .na
1470 \fB\fB-H\fR\fR
1471 .ad
1472 .RS 12n
1473 Scripted mode. Do not display headers, and separate fields by a single tab
1474 instead of arbitrary space.
1475 .RE
1476
1477 .sp
1478 .ne 2
1479 .na
1480 \fB\fB-o\fR \fIprops\fR\fR
1481 .ad
1482 .RS 12n
1483 Comma-separated list of properties to display. See the "Properties" section for
1484 a list of valid properties. The default list is "name, size, used, available,
1485 expandsize, capacity, dedupratio, health, altroot"
1486 .RE
1487
1488 .sp
1489 .ne 2
1490 .na
1491 \fB\fB-v\fR\fR
1492 .ad
1493 .RS 12n
1494 Verbose statistics. Reports usage statistics for individual \fIvdevs\fR within
1495 the pool, in addition to the pool-wise statistics.
1496 .RE
1497
1498 .RE
1499
1500 .sp
1501 .ne 2
1502 .na
1503 \fB\fBzpool offline\fR [\fB-t\fR] \fIpool\fR \fIdevice\fR ...\fR
1504 .ad
1505 .sp .6
1506 .RS 4n
1507 Takes the specified physical device offline. While the \fIdevice\fR is offline,
1508 no attempt is made to read or write to the device.
1509 .sp
1510 This command is not applicable to spares or cache devices.
1511 .sp
1512 .ne 2
1513 .na
1514 \fB\fB-t\fR\fR
1515 .ad
1516 .RS 6n
1517 Temporary. Upon reboot, the specified physical device reverts to its previous
1518 state.
1519 .RE
1520
1521 .RE
1522
1523 .sp
1524 .ne 2
1525 .na
1526 \fB\fBzpool online\fR [\fB-e\fR] \fIpool\fR \fIdevice\fR...\fR
1527 .ad
1528 .sp .6
1529 .RS 4n
1530 Brings the specified physical device online.
1531 .sp
1532 This command is not applicable to spares or cache devices.
1533 .sp
1534 .ne 2
1535 .na
1536 \fB\fB-e\fR\fR
1537 .ad
1538 .RS 6n
1539 Expand the device to use all available space. If the device is part of a mirror
1540 or \fBraidz\fR then all devices must be expanded before the new space will
1541 become available to the pool.
1542 .RE
1543
1544 .RE
1545
1546 .sp
1547 .ne 2
1548 .na
1549 \fB\fBzpool reguid\fR \fIpool\fR
1550 .ad
1551 .sp .6
1552 .RS 4n
1553 Generates a new unique identifier for the pool. You must ensure that all devices in this pool are online and
1554 healthy before performing this action.
1555 .RE
1556
1557 .sp
1558 .ne 2
1559 .na
1560 \fB\fBzpool remove\fR \fIpool\fR \fIdevice\fR ...\fR
1561 .ad
1562 .sp .6
1563 .RS 4n
1564 Removes the specified device from the pool. This command currently only
1565 supports removing hot spares, cache, and log devices. A mirrored log device can
1566 be removed by specifying the top-level mirror for the log. Non-log devices that
1567 are part of a mirrored configuration can be removed using the \fBzpool
1568 detach\fR command. Non-redundant and \fBraidz\fR devices cannot be removed from
1569 a pool.
1570 .RE
1571
1572 .sp
1573 .ne 2
1574 .na
1575 \fB\fBzpool replace\fR [\fB-f\fR] \fIpool\fR \fIold_device\fR
1576 [\fInew_device\fR]\fR
1577 .ad
1578 .sp .6
1579 .RS 4n
1580 Replaces \fIold_device\fR with \fInew_device\fR. This is equivalent to
1581 attaching \fInew_device\fR, waiting for it to resilver, and then detaching
1582 \fIold_device\fR.
1583 .sp
1584 The size of \fInew_device\fR must be greater than or equal to the minimum size
1585 of all the devices in a mirror or \fBraidz\fR configuration.
1586 .sp
1587 \fInew_device\fR is required if the pool is not redundant. If \fInew_device\fR
1588 is not specified, it defaults to \fIold_device\fR. This form of replacement is
1589 useful after an existing disk has failed and has been physically replaced. In
1590 this case, the new disk may have the same \fB/dev/dsk\fR path as the old
1591 device, even though it is actually a different disk. \fBZFS\fR recognizes this.
1592 .sp
1593 .ne 2
1594 .na
1595 \fB\fB-f\fR\fR
1596 .ad
1597 .RS 6n
1598 Forces use of \fInew_device\fR, even if its appears to be in use. Not all
1599 devices can be overridden in this manner.
1600 .RE
1601
1602 .RE
1603
1604 .sp
1605 .ne 2
1606 .na
1607 \fB\fBzpool scrub\fR [\fB-s\fR] \fIpool\fR ...\fR
1608 .ad
1609 .sp .6
1610 .RS 4n
1611 Begins a scrub. The scrub examines all data in the specified pools to verify
1612 that it checksums correctly. For replicated (mirror or \fBraidz\fR) devices,
1613 \fBZFS\fR automatically repairs any damage discovered during the scrub. The
1614 "\fBzpool status\fR" command reports the progress of the scrub and summarizes
1615 the results of the scrub upon completion.
1616 .sp
1617 Scrubbing and resilvering are very similar operations. The difference is that
1618 resilvering only examines data that \fBZFS\fR knows to be out of date (for
1619 example, when attaching a new device to a mirror or replacing an existing
1620 device), whereas scrubbing examines all data to discover silent errors due to
1621 hardware faults or disk failure.
1622 .sp
1623 Because scrubbing and resilvering are \fBI/O\fR-intensive operations, \fBZFS\fR
1624 only allows one at a time. If a scrub is already in progress, the "\fBzpool
1625 scrub\fR" command terminates it and starts a new scrub. If a resilver is in
1626 progress, \fBZFS\fR does not allow a scrub to be started until the resilver
1627 completes.
1628 .sp
1629 .ne 2
1630 .na
1631 \fB\fB-s\fR\fR
1632 .ad
1633 .RS 6n
1634 Stop scrubbing.
1635 .RE
1636
1637 .RE
1638
1639 .sp
1640 .ne 2
1641 .na
1642 \fB\fBzpool set\fR \fIproperty\fR=\fIvalue\fR \fIpool\fR\fR
1643 .ad
1644 .sp .6
1645 .RS 4n
1646 Sets the given property on the specified pool. See the "Properties" section for
1647 more information on what properties can be set and acceptable values.
1648 .RE
1649
1650 .sp
1651 .ne 2
1652 .na
1653 \fB\fBzpool status\fR [\fB-xv\fR] [\fIpool\fR] ...\fR
1654 .ad
1655 .sp .6
1656 .RS 4n
1657 Displays the detailed health status for the given pools. If no \fIpool\fR is
1658 specified, then the status of each pool in the system is displayed. For more
1659 information on pool and device health, see the "Device Failure and Recovery"
1660 section.
1661 .sp
1662 If a scrub or resilver is in progress, this command reports the percentage done
1663 and the estimated time to completion. Both of these are only approximate,
1664 because the amount of data in the pool and the other workloads on the system
1665 can change.
1666 .sp
1667 .ne 2
1668 .na
1669 \fB\fB-x\fR\fR
1670 .ad
1671 .RS 6n
1672 Only display status for pools that are exhibiting errors or are otherwise
1673 unavailable.
1674 .RE
1675
1676 .sp
1677 .ne 2
1678 .na
1679 \fB\fB-v\fR\fR
1680 .ad
1681 .RS 6n
1682 Displays verbose data error information, printing out a complete list of all
1683 data errors since the last complete pool scrub.
1684 .RE
1685
1686 .RE
1687
1688 .sp
1689 .ne 2
1690 .na
1691 \fB\fBzpool upgrade\fR\fR
1692 .ad
1693 .sp .6
1694 .RS 4n
1695 Displays all pools formatted using a different \fBZFS\fR on-disk version. Older
1696 versions can continue to be used, but some features may not be available. These
1697 pools can be upgraded using "\fBzpool upgrade -a\fR". Pools that are formatted
1698 with a more recent version are also displayed, although these pools will be
1699 inaccessible on the system.
1700 .RE
1701
1702 .sp
1703 .ne 2
1704 .na
1705 \fB\fBzpool upgrade\fR \fB-v\fR\fR
1706 .ad
1707 .sp .6
1708 .RS 4n
1709 Displays \fBZFS\fR versions supported by the current software. The current
1710 \fBZFS\fR versions and all previous supported versions are displayed, along
1711 with an explanation of the features provided with each version.
1712 .RE
1713
1714 .sp
1715 .ne 2
1716 .na
1717 \fB\fBzpool upgrade\fR [\fB-V\fR \fIversion\fR] \fB-a\fR | \fIpool\fR ...\fR
1718 .ad
1719 .sp .6
1720 .RS 4n
1721 Upgrades the given pool to the latest on-disk version. Once this is done, the
1722 pool will no longer be accessible on systems running older versions of the
1723 software.
1724 .sp
1725 .ne 2
1726 .na
1727 \fB\fB-a\fR\fR
1728 .ad
1729 .RS 14n
1730 Upgrades all pools.
1731 .RE
1732
1733 .sp
1734 .ne 2
1735 .na
1736 \fB\fB-V\fR \fIversion\fR\fR
1737 .ad
1738 .RS 14n
1739 Upgrade to the specified version. If the \fB-V\fR flag is not specified, the
1740 pool is upgraded to the most recent version. This option can only be used to
1741 increase the version number, and only up to the most recent version supported
1742 by this software.
1743 .RE
1744
1745 .RE
1746
1747 .SH EXAMPLES
1748 .LP
1749 \fBExample 1 \fRCreating a RAID-Z Storage Pool
1750 .sp
1751 .LP
1752 The following command creates a pool with a single \fBraidz\fR root \fIvdev\fR
1753 that consists of six disks.
1754
1755 .sp
1756 .in +2
1757 .nf
1758 # \fBzpool create tank raidz c0t0d0 c0t1d0 c0t2d0 c0t3d0 c0t4d0 c0t5d0\fR
1759 .fi
1760 .in -2
1761 .sp
1762
1763 .LP
1764 \fBExample 2 \fRCreating a Mirrored Storage Pool
1765 .sp
1766 .LP
1767 The following command creates a pool with two mirrors, where each mirror
1768 contains two disks.
1769
1770 .sp
1771 .in +2
1772 .nf
1773 # \fBzpool create tank mirror c0t0d0 c0t1d0 mirror c0t2d0 c0t3d0\fR
1774 .fi
1775 .in -2
1776 .sp
1777
1778 .LP
1779 \fBExample 3 \fRCreating a ZFS Storage Pool by Using Slices
1780 .sp
1781 .LP
1782 The following command creates an unmirrored pool using two disk slices.
1783
1784 .sp
1785 .in +2
1786 .nf
1787 # \fBzpool create tank /dev/dsk/c0t0d0s1 c0t1d0s4\fR
1788 .fi
1789 .in -2
1790 .sp
1791
1792 .LP
1793 \fBExample 4 \fRCreating a ZFS Storage Pool by Using Files
1794 .sp
1795 .LP
1796 The following command creates an unmirrored pool using files. While not
1797 recommended, a pool based on files can be useful for experimental purposes.
1798
1799 .sp
1800 .in +2
1801 .nf
1802 # \fBzpool create tank /path/to/file/a /path/to/file/b\fR
1803 .fi
1804 .in -2
1805 .sp
1806
1807 .LP
1808 \fBExample 5 \fRAdding a Mirror to a ZFS Storage Pool
1809 .sp
1810 .LP
1811 The following command adds two mirrored disks to the pool "\fItank\fR",
1812 assuming the pool is already made up of two-way mirrors. The additional space
1813 is immediately available to any datasets within the pool.
1814
1815 .sp
1816 .in +2
1817 .nf
1818 # \fBzpool add tank mirror c1t0d0 c1t1d0\fR
1819 .fi
1820 .in -2
1821 .sp
1822
1823 .LP
1824 \fBExample 6 \fRListing Available ZFS Storage Pools
1825 .sp
1826 .LP
1827 The following command lists all available pools on the system. In this case,
1828 the pool \fIzion\fR is faulted due to a missing device.
1829
1830 .sp
1831 .LP
1832 The results from this command are similar to the following:
1833
1834 .sp
1835 .in +2
1836 .nf
1837 # \fBzpool list\fR
1838 NAME SIZE ALLOC FREE EXPANDSZ CAP DEDUP HEALTH ALTROOT
1839 rpool 19.9G 8.43G 11.4G - 42% 1.00x ONLINE -
1840 tank 61.5G 20.0G 41.5G - 32% 1.00x ONLINE -
1841 zion - - - - - - FAULTED -
1842 .fi
1843 .in -2
1844 .sp
1845
1846 .LP
1847 \fBExample 7 \fRDestroying a ZFS Storage Pool
1848 .sp
1849 .LP
1850 The following command destroys the pool "\fItank\fR" and any datasets contained
1851 within.
1852
1853 .sp
1854 .in +2
1855 .nf
1856 # \fBzpool destroy -f tank\fR
1857 .fi
1858 .in -2
1859 .sp
1860
1861 .LP
1862 \fBExample 8 \fRExporting a ZFS Storage Pool
1863 .sp
1864 .LP
1865 The following command exports the devices in pool \fItank\fR so that they can
1866 be relocated or later imported.
1867
1868 .sp
1869 .in +2
1870 .nf
1871 # \fBzpool export tank\fR
1872 .fi
1873 .in -2
1874 .sp
1875
1876 .LP
1877 \fBExample 9 \fRImporting a ZFS Storage Pool
1878 .sp
1879 .LP
1880 The following command displays available pools, and then imports the pool
1881 "tank" for use on the system.
1882
1883 .sp
1884 .LP
1885 The results from this command are similar to the following:
1886
1887 .sp
1888 .in +2
1889 .nf
1890 # \fBzpool import\fR
1891 pool: tank
1892 id: 15451357997522795478
1893 state: ONLINE
1894 action: The pool can be imported using its name or numeric identifier.
1895 config:
1896
1897 tank ONLINE
1898 mirror ONLINE
1899 c1t2d0 ONLINE
1900 c1t3d0 ONLINE
1901
1902 # \fBzpool import tank\fR
1903 .fi
1904 .in -2
1905 .sp
1906
1907 .LP
1908 \fBExample 10 \fRUpgrading All ZFS Storage Pools to the Current Version
1909 .sp
1910 .LP
1911 The following command upgrades all ZFS Storage pools to the current version of
1912 the software.
1913
1914 .sp
1915 .in +2
1916 .nf
1917 # \fBzpool upgrade -a\fR
1918 This system is currently running ZFS version 2.
1919 .fi
1920 .in -2
1921 .sp
1922
1923 .LP
1924 \fBExample 11 \fRManaging Hot Spares
1925 .sp
1926 .LP
1927 The following command creates a new pool with an available hot spare:
1928
1929 .sp
1930 .in +2
1931 .nf
1932 # \fBzpool create tank mirror c0t0d0 c0t1d0 spare c0t2d0\fR
1933 .fi
1934 .in -2
1935 .sp
1936
1937 .sp
1938 .LP
1939 If one of the disks were to fail, the pool would be reduced to the degraded
1940 state. The failed device can be replaced using the following command:
1941
1942 .sp
1943 .in +2
1944 .nf
1945 # \fBzpool replace tank c0t0d0 c0t3d0\fR
1946 .fi
1947 .in -2
1948 .sp
1949
1950 .sp
1951 .LP
1952 Once the data has been resilvered, the spare is automatically removed and is
1953 made available should another device fails. The hot spare can be permanently
1954 removed from the pool using the following command:
1955
1956 .sp
1957 .in +2
1958 .nf
1959 # \fBzpool remove tank c0t2d0\fR
1960 .fi
1961 .in -2
1962 .sp
1963
1964 .LP
1965 \fBExample 12 \fRCreating a ZFS Pool with Mirrored Separate Intent Logs
1966 .sp
1967 .LP
1968 The following command creates a ZFS storage pool consisting of two, two-way
1969 mirrors and mirrored log devices:
1970
1971 .sp
1972 .in +2
1973 .nf
1974 # \fBzpool create pool mirror c0d0 c1d0 mirror c2d0 c3d0 log mirror \e
1975 c4d0 c5d0\fR
1976 .fi
1977 .in -2
1978 .sp
1979
1980 .LP
1981 \fBExample 13 \fRAdding Cache Devices to a ZFS Pool
1982 .sp
1983 .LP
1984 The following command adds two disks for use as cache devices to a ZFS storage
1985 pool:
1986
1987 .sp
1988 .in +2
1989 .nf
1990 # \fBzpool add pool cache c2d0 c3d0\fR
1991 .fi
1992 .in -2
1993 .sp
1994
1995 .sp
1996 .LP
1997 Once added, the cache devices gradually fill with content from main memory.
1998 Depending on the size of your cache devices, it could take over an hour for
1999 them to fill. Capacity and reads can be monitored using the \fBiostat\fR option
2000 as follows:
2001
2002 .sp
2003 .in +2
2004 .nf
2005 # \fBzpool iostat -v pool 5\fR
2006 .fi
2007 .in -2
2008 .sp
2009
2010 .LP
2011 \fBExample 14 \fRRemoving a Mirrored Log Device
2012 .sp
2013 .LP
2014 The following command removes the mirrored log device \fBmirror-2\fR.
2015
2016 .sp
2017 .LP
2018 Given this configuration:
2019
2020 .sp
2021 .in +2
2022 .nf
2023 pool: tank
2024 state: ONLINE
2025 scrub: none requested
2026 config:
2027
2028 NAME STATE READ WRITE CKSUM
2029 tank ONLINE 0 0 0
2030 mirror-0 ONLINE 0 0 0
2031 c6t0d0 ONLINE 0 0 0
2032 c6t1d0 ONLINE 0 0 0
2033 mirror-1 ONLINE 0 0 0
2034 c6t2d0 ONLINE 0 0 0
2035 c6t3d0 ONLINE 0 0 0
2036 logs
2037 mirror-2 ONLINE 0 0 0
2038 c4t0d0 ONLINE 0 0 0
2039 c4t1d0 ONLINE 0 0 0
2040 .fi
2041 .in -2
2042 .sp
2043
2044 .sp
2045 .LP
2046 The command to remove the mirrored log \fBmirror-2\fR is:
2047
2048 .sp
2049 .in +2
2050 .nf
2051 # \fBzpool remove tank mirror-2\fR
2052 .fi
2053 .in -2
2054 .sp
2055
2056 .LP
2057 \fBExample 15 \fRDisplaying expanded space on a device
2058 .sp
2059 .LP
2060 The following command dipslays the detailed information for the \fIdata\fR
2061 pool. This pool is comprised of a single \fIraidz\fR vdev where one of its
2062 devices increased its capacity by 1GB. In this example, the pool will not
2063 be able to utilized this extra capacity until all the devices under the
2064 \fIraidz\fR vdev have been expanded.
2065
2066 .sp
2067 .in +2
2068 .nf
2069 # \fBzpool list -v data\fR
2070 NAME SIZE ALLOC FREE EXPANDSZ CAP DEDUP HEALTH ALTROOT
2071 data 17.9G 174K 17.9G - 0% 1.00x ONLINE -
2072 raidz1 17.9G 174K 17.9G -
2073 c4t2d0 - - - 1G
2074 c4t3d0 - - - -
2075 c4t4d0 - - - -
2076 .fi
2077 .in -2
2078
2079 .SH EXIT STATUS
2080 .sp
2081 .LP
2082 The following exit values are returned:
2083 .sp
2084 .ne 2
2085 .na
2086 \fB\fB0\fR\fR
2087 .ad
2088 .RS 5n
2089 Successful completion.
2090 .RE
2091
2092 .sp
2093 .ne 2
2094 .na
2095 \fB\fB1\fR\fR
2096 .ad
2097 .RS 5n
2098 An error occurred.
2099 .RE
2100
2101 .sp
2102 .ne 2
2103 .na
2104 \fB\fB2\fR\fR
2105 .ad
2106 .RS 5n
2107 Invalid command line options were specified.
2108 .RE
2109
2110 .SH ATTRIBUTES
2111 .sp
2112 .LP
2113 See \fBattributes\fR(5) for descriptions of the following attributes:
2114 .sp
2115
2116 .sp
2117 .TS
2118 box;
2119 c | c
2120 l | l .
2121 ATTRIBUTE TYPE ATTRIBUTE VALUE
2122 _
2123 Interface Stability Evolving
2124 .TE
2125
2126 .SH SEE ALSO
2127 .sp
2128 .LP
2129 \fBzfs\fR(1M), \fBzpool-features\fR(5), \fBattributes\fR(5)