1 '\" te
   2 .\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
   3 .\" The contents of this file are subject to the terms of the Common Development and Distribution License (the "License"). You may not use this file except in compliance with the License. You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing.
   4 .\" See the License for the specific language governing permissions and limitations under the License. When distributing Covered Code, include this CDDL HEADER in each file and include the License file at usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this CDDL HEADER, with the
   5 .\" fields enclosed by brackets "[]" replaced with your own identifying information: Portions Copyright [yyyy] [name of copyright owner]
   6 .\" Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
   7 .TH ZPOOL 1M "Oct 25, 2011"
   8 .SH NAME
   9 zpool \- configures ZFS storage pools
  10 .SH SYNOPSIS
  11 .LP
  12 .nf
  13 \fBzpool\fR [\fB-?\fR]
  14 .fi
  15 
  16 .LP
  17 .nf
  18 \fBzpool add\fR [\fB-fn\fR] \fIpool\fR \fIvdev\fR ...
  19 .fi
  20 
  21 .LP
  22 .nf
  23 \fBzpool attach\fR [\fB-f\fR] \fIpool\fR \fIdevice\fR \fInew_device\fR
  24 .fi
  25 
  26 .LP
  27 .nf
  28 \fBzpool clear\fR \fIpool\fR [\fIdevice\fR]
  29 .fi
  30 
  31 .LP
  32 .nf
  33 \fBzpool create\fR [\fB-fn\fR] [\fB-o\fR \fIproperty=value\fR] ... [\fB-O\fR \fIfile-system-property=value\fR]
  34      ... [\fB-m\fR \fImountpoint\fR] [\fB-R\fR \fIroot\fR] \fIpool\fR \fIvdev\fR ...
  35 .fi
  36 
  37 .LP
  38 .nf
  39 \fBzpool destroy\fR [\fB-f\fR] \fIpool\fR
  40 .fi
  41 
  42 .LP
  43 .nf
  44 \fBzpool detach\fR \fIpool\fR \fIdevice\fR
  45 .fi
  46 
  47 .LP
  48 .nf
  49 \fBzpool export\fR [\fB-f\fR] \fIpool\fR ...
  50 .fi
  51 
  52 .LP
  53 .nf
  54 \fBzpool get\fR "\fIall\fR" | \fIproperty\fR[,...] \fIpool\fR ...
  55 .fi
  56 
  57 .LP
  58 .nf
  59 \fBzpool history\fR [\fB-il\fR] [\fIpool\fR] ...
  60 .fi
  61 
  62 .LP
  63 .nf
  64 \fBzpool import\fR [\fB-d\fR \fIdir\fR] [\fB-D\fR]
  65 .fi
  66 
  67 .LP
  68 .nf
  69 \fBzpool import\fR [\fB-o \fImntopts\fR\fR] [\fB-o\fR \fIproperty=value\fR] ... [\fB-d\fR \fIdir\fR | \fB-c\fR \fIcachefile\fR]
  70      [\fB-D\fR] [\fB-f\fR] [\fB-R\fR \fIroot\fR] \fB-a\fR
  71 .fi
  72 
  73 .LP
  74 .nf
  75 \fBzpool import\fR [\fB-o \fImntopts\fR\fR] [\fB-o\fR \fIproperty=value\fR] ... [\fB-d\fR \fIdir\fR | \fB-c\fR \fIcachefile\fR]
  76      [\fB-D\fR] [\fB-f\fR] [\fB-R\fR \fIroot\fR] \fIpool\fR |\fIid\fR [\fInewpool\fR]
  77 .fi
  78 
  79 .LP
  80 .nf
  81 \fBzpool iostat\fR [\fB-T\fR u | d ] [\fB-v\fR] [\fIpool\fR] ... [\fIinterval\fR[\fIcount\fR]]
  82 .fi
  83 
  84 .LP
  85 .nf
  86 \fBzpool list\fR [\fB-H\fR] [\fB-o\fR \fIproperty\fR[,...]] [\fIpool\fR] ...
  87 .fi
  88 
  89 .LP
  90 .nf
  91 \fBzpool offline\fR [\fB-t\fR] \fIpool\fR \fIdevice\fR ...
  92 .fi
  93 
  94 .LP
  95 .nf
  96 \fBzpool online\fR \fIpool\fR \fIdevice\fR ...
  97 .fi
  98 
  99 .LP
 100 .nf
 101 \fBzpool reguid\fR \fIpool\fR
 102 .fi
 103 
 104 .LP
 105 .nf
 106 \fBzpool remove\fR \fIpool\fR \fIdevice\fR ...
 107 .fi
 108 
 109 .LP
 110 .nf
 111 \fBzpool replace\fR [\fB-f\fR] \fIpool\fR \fIdevice\fR [\fInew_device\fR]
 112 .fi
 113 
 114 .LP
 115 .nf
 116 \fBzpool scrub\fR [\fB-s\fR] \fIpool\fR ...
 117 .fi
 118 
 119 .LP
 120 .nf
 121 \fBzpool set\fR \fIproperty\fR=\fIvalue\fR \fIpool\fR
 122 .fi
 123 
 124 .LP
 125 .nf
 126 \fBzpool status\fR [\fB-xv\fR] [\fIpool\fR] ...
 127 .fi
 128 
 129 .LP
 130 .nf
 131 \fBzpool upgrade\fR
 132 .fi
 133 
 134 .LP
 135 .nf
 136 \fBzpool upgrade\fR \fB-v\fR
 137 .fi
 138 
 139 .LP
 140 .nf
 141 \fBzpool upgrade\fR [\fB-V\fR \fIversion\fR] \fB-a\fR | \fIpool\fR ...
 142 .fi
 143 
 144 .SH DESCRIPTION
 145 .sp
 146 .LP
 147 The \fBzpool\fR command configures \fBZFS\fR storage pools. A storage pool is a
 148 collection of devices that provides physical storage and data replication for
 149 \fBZFS\fR datasets.
 150 .sp
 151 .LP
 152 All datasets within a storage pool share the same space. See \fBzfs\fR(1M) for
 153 information on managing datasets.
 154 .SS "Virtual Devices (\fBvdev\fRs)"
 155 .sp
 156 .LP
 157 A "virtual device" describes a single device or a collection of devices
 158 organized according to certain performance and fault characteristics. The
 159 following virtual devices are supported:
 160 .sp
 161 .ne 2
 162 .na
 163 \fB\fBdisk\fR\fR
 164 .ad
 165 .RS 10n
 166 A block device, typically located under \fB/dev/dsk\fR. \fBZFS\fR can use
 167 individual slices or partitions, though the recommended mode of operation is to
 168 use whole disks. A disk can be specified by a full path, or it can be a
 169 shorthand name (the relative portion of the path under "/dev/dsk"). A whole
 170 disk can be specified by omitting the slice or partition designation. For
 171 example, "c0t0d0" is equivalent to "/dev/dsk/c0t0d0s2". When given a whole
 172 disk, \fBZFS\fR automatically labels the disk, if necessary.
 173 .RE
 174 
 175 .sp
 176 .ne 2
 177 .na
 178 \fB\fBfile\fR\fR
 179 .ad
 180 .RS 10n
 181 A regular file. The use of files as a backing store is strongly discouraged. It
 182 is designed primarily for experimental purposes, as the fault tolerance of a
 183 file is only as good as the file system of which it is a part. A file must be
 184 specified by a full path.
 185 .RE
 186 
 187 .sp
 188 .ne 2
 189 .na
 190 \fB\fBmirror\fR\fR
 191 .ad
 192 .RS 10n
 193 A mirror of two or more devices. Data is replicated in an identical fashion
 194 across all components of a mirror. A mirror with \fIN\fR disks of size \fIX\fR
 195 can hold \fIX\fR bytes and can withstand (\fIN-1\fR) devices failing before
 196 data integrity is compromised.
 197 .RE
 198 
 199 .sp
 200 .ne 2
 201 .na
 202 \fB\fBraidz\fR\fR
 203 .ad
 204 .br
 205 .na
 206 \fB\fBraidz1\fR\fR
 207 .ad
 208 .br
 209 .na
 210 \fB\fBraidz2\fR\fR
 211 .ad
 212 .br
 213 .na
 214 \fB\fBraidz3\fR\fR
 215 .ad
 216 .RS 10n
 217 A variation on \fBRAID-5\fR that allows for better distribution of parity and
 218 eliminates the "\fBRAID-5\fR write hole" (in which data and parity become
 219 inconsistent after a power loss). Data and parity is striped across all disks
 220 within a \fBraidz\fR group.
 221 .sp
 222 A \fBraidz\fR group can have single-, double- , or triple parity, meaning that
 223 the \fBraidz\fR group can sustain one, two, or three failures, respectively,
 224 without losing any data. The \fBraidz1\fR \fBvdev\fR type specifies a
 225 single-parity \fBraidz\fR group; the \fBraidz2\fR \fBvdev\fR type specifies a
 226 double-parity \fBraidz\fR group; and the \fBraidz3\fR \fBvdev\fR type specifies
 227 a triple-parity \fBraidz\fR group. The \fBraidz\fR \fBvdev\fR type is an alias
 228 for \fBraidz1\fR.
 229 .sp
 230 A \fBraidz\fR group with \fIN\fR disks of size \fIX\fR with \fIP\fR parity
 231 disks can hold approximately (\fIN-P\fR)*\fIX\fR bytes and can withstand
 232 \fIP\fR device(s) failing before data integrity is compromised. The minimum
 233 number of devices in a \fBraidz\fR group is one more than the number of parity
 234 disks. The recommended number is between 3 and 9 to help increase performance.
 235 .RE
 236 
 237 .sp
 238 .ne 2
 239 .na
 240 \fB\fBspare\fR\fR
 241 .ad
 242 .RS 10n
 243 A special pseudo-\fBvdev\fR which keeps track of available hot spares for a
 244 pool. For more information, see the "Hot Spares" section.
 245 .RE
 246 
 247 .sp
 248 .ne 2
 249 .na
 250 \fB\fBlog\fR\fR
 251 .ad
 252 .RS 10n
 253 A separate-intent log device. If more than one log device is specified, then
 254 writes are load-balanced between devices. Log devices can be mirrored. However,
 255 \fBraidz\fR \fBvdev\fR types are not supported for the intent log. For more
 256 information, see the "Intent Log" section.
 257 .RE
 258 
 259 .sp
 260 .ne 2
 261 .na
 262 \fB\fBcache\fR\fR
 263 .ad
 264 .RS 10n
 265 A device used to cache storage pool data. A cache device cannot be cannot be
 266 configured as a mirror or \fBraidz\fR group. For more information, see the
 267 "Cache Devices" section.
 268 .RE
 269 
 270 .sp
 271 .LP
 272 Virtual devices cannot be nested, so a mirror or \fBraidz\fR virtual device can
 273 only contain files or disks. Mirrors of mirrors (or other combinations) are not
 274 allowed.
 275 .sp
 276 .LP
 277 A pool can have any number of virtual devices at the top of the configuration
 278 (known as "root vdevs"). Data is dynamically distributed across all top-level
 279 devices to balance data among devices. As new virtual devices are added,
 280 \fBZFS\fR automatically places data on the newly available devices.
 281 .sp
 282 .LP
 283 Virtual devices are specified one at a time on the command line, separated by
 284 whitespace. The keywords "mirror" and "raidz" are used to distinguish where a
 285 group ends and another begins. For example, the following creates two root
 286 vdevs, each a mirror of two disks:
 287 .sp
 288 .in +2
 289 .nf
 290 # \fBzpool create mypool mirror c0t0d0 c0t1d0 mirror c1t0d0 c1t1d0\fR
 291 .fi
 292 .in -2
 293 .sp
 294 
 295 .SS "Device Failure and Recovery"
 296 .sp
 297 .LP
 298 \fBZFS\fR supports a rich set of mechanisms for handling device failure and
 299 data corruption. All metadata and data is checksummed, and \fBZFS\fR
 300 automatically repairs bad data from a good copy when corruption is detected.
 301 .sp
 302 .LP
 303 In order to take advantage of these features, a pool must make use of some form
 304 of redundancy, using either mirrored or \fBraidz\fR groups. While \fBZFS\fR
 305 supports running in a non-redundant configuration, where each root vdev is
 306 simply a disk or file, this is strongly discouraged. A single case of bit
 307 corruption can render some or all of your data unavailable.
 308 .sp
 309 .LP
 310 A pool's health status is described by one of three states: online, degraded,
 311 or faulted. An online pool has all devices operating normally. A degraded pool
 312 is one in which one or more devices have failed, but the data is still
 313 available due to a redundant configuration. A faulted pool has corrupted
 314 metadata, or one or more faulted devices, and insufficient replicas to continue
 315 functioning.
 316 .sp
 317 .LP
 318 The health of the top-level vdev, such as mirror or \fBraidz\fR device, is
 319 potentially impacted by the state of its associated vdevs, or component
 320 devices. A top-level vdev or component device is in one of the following
 321 states:
 322 .sp
 323 .ne 2
 324 .na
 325 \fB\fBDEGRADED\fR\fR
 326 .ad
 327 .RS 12n
 328 One or more top-level vdevs is in the degraded state because one or more
 329 component devices are offline. Sufficient replicas exist to continue
 330 functioning.
 331 .sp
 332 One or more component devices is in the degraded or faulted state, but
 333 sufficient replicas exist to continue functioning. The underlying conditions
 334 are as follows:
 335 .RS +4
 336 .TP
 337 .ie t \(bu
 338 .el o
 339 The number of checksum errors exceeds acceptable levels and the device is
 340 degraded as an indication that something may be wrong. \fBZFS\fR continues to
 341 use the device as necessary.
 342 .RE
 343 .RS +4
 344 .TP
 345 .ie t \(bu
 346 .el o
 347 The number of I/O errors exceeds acceptable levels. The device could not be
 348 marked as faulted because there are insufficient replicas to continue
 349 functioning.
 350 .RE
 351 .RE
 352 
 353 .sp
 354 .ne 2
 355 .na
 356 \fB\fBFAULTED\fR\fR
 357 .ad
 358 .RS 12n
 359 One or more top-level vdevs is in the faulted state because one or more
 360 component devices are offline. Insufficient replicas exist to continue
 361 functioning.
 362 .sp
 363 One or more component devices is in the faulted state, and insufficient
 364 replicas exist to continue functioning. The underlying conditions are as
 365 follows:
 366 .RS +4
 367 .TP
 368 .ie t \(bu
 369 .el o
 370 The device could be opened, but the contents did not match expected values.
 371 .RE
 372 .RS +4
 373 .TP
 374 .ie t \(bu
 375 .el o
 376 The number of I/O errors exceeds acceptable levels and the device is faulted to
 377 prevent further use of the device.
 378 .RE
 379 .RE
 380 
 381 .sp
 382 .ne 2
 383 .na
 384 \fB\fBOFFLINE\fR\fR
 385 .ad
 386 .RS 12n
 387 The device was explicitly taken offline by the "\fBzpool offline\fR" command.
 388 .RE
 389 
 390 .sp
 391 .ne 2
 392 .na
 393 \fB\fBONLINE\fR\fR
 394 .ad
 395 .RS 12n
 396 The device is online and functioning.
 397 .RE
 398 
 399 .sp
 400 .ne 2
 401 .na
 402 \fB\fBREMOVED\fR\fR
 403 .ad
 404 .RS 12n
 405 The device was physically removed while the system was running. Device removal
 406 detection is hardware-dependent and may not be supported on all platforms.
 407 .RE
 408 
 409 .sp
 410 .ne 2
 411 .na
 412 \fB\fBUNAVAIL\fR\fR
 413 .ad
 414 .RS 12n
 415 The device could not be opened. If a pool is imported when a device was
 416 unavailable, then the device will be identified by a unique identifier instead
 417 of its path since the path was never correct in the first place.
 418 .RE
 419 
 420 .sp
 421 .LP
 422 If a device is removed and later re-attached to the system, \fBZFS\fR attempts
 423 to put the device online automatically. Device attach detection is
 424 hardware-dependent and might not be supported on all platforms.
 425 .SS "Hot Spares"
 426 .sp
 427 .LP
 428 \fBZFS\fR allows devices to be associated with pools as "hot spares". These
 429 devices are not actively used in the pool, but when an active device fails, it
 430 is automatically replaced by a hot spare. To create a pool with hot spares,
 431 specify a "spare" \fBvdev\fR with any number of devices. For example,
 432 .sp
 433 .in +2
 434 .nf
 435 # zpool create pool mirror c0d0 c1d0 spare c2d0 c3d0
 436 .fi
 437 .in -2
 438 .sp
 439 
 440 .sp
 441 .LP
 442 Spares can be shared across multiple pools, and can be added with the "\fBzpool
 443 add\fR" command and removed with the "\fBzpool remove\fR" command. Once a spare
 444 replacement is initiated, a new "spare" \fBvdev\fR is created within the
 445 configuration that will remain there until the original device is replaced. At
 446 this point, the hot spare becomes available again if another device fails.
 447 .sp
 448 .LP
 449 If a pool has a shared spare that is currently being used, the pool can not be
 450 exported since other pools may use this shared spare, which may lead to
 451 potential data corruption.
 452 .sp
 453 .LP
 454 An in-progress spare replacement can be cancelled by detaching the hot spare.
 455 If the original faulted device is detached, then the hot spare assumes its
 456 place in the configuration, and is removed from the spare list of all active
 457 pools.
 458 .sp
 459 .LP
 460 Spares cannot replace log devices.
 461 .SS "Intent Log"
 462 .sp
 463 .LP
 464 The \fBZFS\fR Intent Log (\fBZIL\fR) satisfies \fBPOSIX\fR requirements for
 465 synchronous transactions. For instance, databases often require their
 466 transactions to be on stable storage devices when returning from a system call.
 467 \fBNFS\fR and other applications can also use \fBfsync\fR() to ensure data
 468 stability. By default, the intent log is allocated from blocks within the main
 469 pool. However, it might be possible to get better performance using separate
 470 intent log devices such as \fBNVRAM\fR or a dedicated disk. For example:
 471 .sp
 472 .in +2
 473 .nf
 474 \fB# zpool create pool c0d0 c1d0 log c2d0\fR
 475 .fi
 476 .in -2
 477 .sp
 478 
 479 .sp
 480 .LP
 481 Multiple log devices can also be specified, and they can be mirrored. See the
 482 EXAMPLES section for an example of mirroring multiple log devices.
 483 .sp
 484 .LP
 485 Log devices can be added, replaced, attached, detached, and imported and
 486 exported as part of the larger pool. Mirrored log devices can be removed by
 487 specifying the top-level mirror for the log.
 488 .SS "Cache Devices"
 489 .sp
 490 .LP
 491 Devices can be added to a storage pool as "cache devices." These devices
 492 provide an additional layer of caching between main memory and disk. For
 493 read-heavy workloads, where the working set size is much larger than what can
 494 be cached in main memory, using cache devices allow much more of this working
 495 set to be served from low latency media. Using cache devices provides the
 496 greatest performance improvement for random read-workloads of mostly static
 497 content.
 498 .sp
 499 .LP
 500 To create a pool with cache devices, specify a "cache" \fBvdev\fR with any
 501 number of devices. For example:
 502 .sp
 503 .in +2
 504 .nf
 505 \fB# zpool create pool c0d0 c1d0 cache c2d0 c3d0\fR
 506 .fi
 507 .in -2
 508 .sp
 509 
 510 .sp
 511 .LP
 512 Cache devices cannot be mirrored or part of a \fBraidz\fR configuration. If a
 513 read error is encountered on a cache device, that read \fBI/O\fR is reissued to
 514 the original storage pool device, which might be part of a mirrored or
 515 \fBraidz\fR configuration.
 516 .sp
 517 .LP
 518 The content of the cache devices is considered volatile, as is the case with
 519 other system caches.
 520 .SS "Properties"
 521 .sp
 522 .LP
 523 Each pool has several properties associated with it. Some properties are
 524 read-only statistics while others are configurable and change the behavior of
 525 the pool. The following are read-only properties:
 526 .sp
 527 .ne 2
 528 .na
 529 \fB\fBavailable\fR\fR
 530 .ad
 531 .RS 20n
 532 Amount of storage available within the pool. This property can also be referred
 533 to by its shortened column name, "avail".
 534 .RE
 535 
 536 .sp
 537 .ne 2
 538 .na
 539 \fB\fBcapacity\fR\fR
 540 .ad
 541 .RS 20n
 542 Percentage of pool space used. This property can also be referred to by its
 543 shortened column name, "cap".
 544 .RE
 545 
 546 .sp
 547 .ne 2
 548 .na
 549 \fB\fBhealth\fR\fR
 550 .ad
 551 .RS 20n
 552 The current health of the pool. Health can be "\fBONLINE\fR", "\fBDEGRADED\fR",
 553 "\fBFAULTED\fR", " \fBOFFLINE\fR", "\fBREMOVED\fR", or "\fBUNAVAIL\fR".
 554 .RE
 555 
 556 .sp
 557 .ne 2
 558 .na
 559 \fB\fBguid\fR\fR
 560 .ad
 561 .RS 20n
 562 A unique identifier for the pool.
 563 .RE
 564 
 565 .sp
 566 .ne 2
 567 .na
 568 \fB\fBsize\fR\fR
 569 .ad
 570 .RS 20n
 571 Total size of the storage pool.
 572 .RE
 573 
 574 .sp
 575 .ne 2
 576 .na
 577 \fB\fBused\fR\fR
 578 .ad
 579 .RS 20n
 580 Amount of storage space used within the pool.
 581 .RE
 582 
 583 .sp
 584 .LP
 585 These space usage properties report actual physical space available to the
 586 storage pool. The physical space can be different from the total amount of
 587 space that any contained datasets can actually use. The amount of space used in
 588 a \fBraidz\fR configuration depends on the characteristics of the data being
 589 written. In addition, \fBZFS\fR reserves some space for internal accounting
 590 that the \fBzfs\fR(1M) command takes into account, but the \fBzpool\fR command
 591 does not. For non-full pools of a reasonable size, these effects should be
 592 invisible. For small pools, or pools that are close to being completely full,
 593 these discrepancies may become more noticeable.
 594 .sp
 595 .LP
 596 The following property can be set at creation time and import time:
 597 .sp
 598 .ne 2
 599 .na
 600 \fB\fBaltroot\fR\fR
 601 .ad
 602 .sp .6
 603 .RS 4n
 604 Alternate root directory. If set, this directory is prepended to any mount
 605 points within the pool. This can be used when examining an unknown pool where
 606 the mount points cannot be trusted, or in an alternate boot environment, where
 607 the typical paths are not valid. \fBaltroot\fR is not a persistent property. It
 608 is valid only while the system is up. Setting \fBaltroot\fR defaults to using
 609 \fBcachefile\fR=none, though this may be overridden using an explicit setting.
 610 .RE
 611 
 612 .sp
 613 .LP
 614 The following properties can be set at creation time and import time, and later
 615 changed with the \fBzpool set\fR command:
 616 .sp
 617 .ne 2
 618 .na
 619 \fB\fBautoexpand\fR=\fBon\fR | \fBoff\fR\fR
 620 .ad
 621 .sp .6
 622 .RS 4n
 623 Controls automatic pool expansion when the underlying LUN is grown. If set to
 624 \fBon\fR, the pool will be resized according to the size of the expanded
 625 device. If the device is part of a mirror or \fBraidz\fR then all devices
 626 within that mirror/\fBraidz\fR group must be expanded before the new space is
 627 made available to the pool. The default behavior is \fBoff\fR. This property
 628 can also be referred to by its shortened column name, \fBexpand\fR.
 629 .RE
 630 
 631 .sp
 632 .ne 2
 633 .na
 634 \fB\fBautoreplace\fR=\fBon\fR | \fBoff\fR\fR
 635 .ad
 636 .sp .6
 637 .RS 4n
 638 Controls automatic device replacement. If set to "\fBoff\fR", device
 639 replacement must be initiated by the administrator by using the "\fBzpool
 640 replace\fR" command. If set to "\fBon\fR", any new device, found in the same
 641 physical location as a device that previously belonged to the pool, is
 642 automatically formatted and replaced. The default behavior is "\fBoff\fR". This
 643 property can also be referred to by its shortened column name, "replace".
 644 .RE
 645 
 646 .sp
 647 .ne 2
 648 .na
 649 \fB\fBbootfs\fR=\fIpool\fR/\fIdataset\fR\fR
 650 .ad
 651 .sp .6
 652 .RS 4n
 653 Identifies the default bootable dataset for the root pool. This property is
 654 expected to be set mainly by the installation and upgrade programs.
 655 .RE
 656 
 657 .sp
 658 .ne 2
 659 .na
 660 \fB\fBcachefile\fR=\fIpath\fR | \fBnone\fR\fR
 661 .ad
 662 .sp .6
 663 .RS 4n
 664 Controls the location of where the pool configuration is cached. Discovering
 665 all pools on system startup requires a cached copy of the configuration data
 666 that is stored on the root file system. All pools in this cache are
 667 automatically imported when the system boots. Some environments, such as
 668 install and clustering, need to cache this information in a different location
 669 so that pools are not automatically imported. Setting this property caches the
 670 pool configuration in a different location that can later be imported with
 671 "\fBzpool import -c\fR". Setting it to the special value "\fBnone\fR" creates a
 672 temporary pool that is never cached, and the special value \fB\&''\fR (empty
 673 string) uses the default location.
 674 .sp
 675 Multiple pools can share the same cache file. Because the kernel destroys and
 676 recreates this file when pools are added and removed, care should be taken when
 677 attempting to access this file. When the last pool using a \fBcachefile\fR is
 678 exported or destroyed, the file is removed.
 679 .RE
 680 
 681 .sp
 682 .ne 2
 683 .na
 684 \fB\fBdelegation\fR=\fBon\fR | \fBoff\fR\fR
 685 .ad
 686 .sp .6
 687 .RS 4n
 688 Controls whether a non-privileged user is granted access based on the dataset
 689 permissions defined on the dataset. See \fBzfs\fR(1M) for more information on
 690 \fBZFS\fR delegated administration.
 691 .RE
 692 
 693 .sp
 694 .ne 2
 695 .na
 696 \fB\fBfailmode\fR=\fBwait\fR | \fBcontinue\fR | \fBpanic\fR\fR
 697 .ad
 698 .sp .6
 699 .RS 4n
 700 Controls the system behavior in the event of catastrophic pool failure. This
 701 condition is typically a result of a loss of connectivity to the underlying
 702 storage device(s) or a failure of all devices within the pool. The behavior of
 703 such an event is determined as follows:
 704 .sp
 705 .ne 2
 706 .na
 707 \fB\fBwait\fR\fR
 708 .ad
 709 .RS 12n
 710 Blocks all \fBI/O\fR access until the device connectivity is recovered and the
 711 errors are cleared. This is the default behavior.
 712 .RE
 713 
 714 .sp
 715 .ne 2
 716 .na
 717 \fB\fBcontinue\fR\fR
 718 .ad
 719 .RS 12n
 720 Returns \fBEIO\fR to any new write \fBI/O\fR requests but allows reads to any
 721 of the remaining healthy devices. Any write requests that have yet to be
 722 committed to disk would be blocked.
 723 .RE
 724 
 725 .sp
 726 .ne 2
 727 .na
 728 \fB\fBpanic\fR\fR
 729 .ad
 730 .RS 12n
 731 Prints out a message to the console and generates a system crash dump.
 732 .RE
 733 
 734 .RE
 735 
 736 .sp
 737 .ne 2
 738 .na
 739 \fB\fBlistsnaps\fR=on | off\fR
 740 .ad
 741 .sp .6
 742 .RS 4n
 743 Controls whether information about snapshots associated with this pool is
 744 output when "\fBzfs list\fR" is run without the \fB-t\fR option. The default
 745 value is "off".
 746 .RE
 747 
 748 .sp
 749 .ne 2
 750 .na
 751 \fB\fBversion\fR=\fIversion\fR\fR
 752 .ad
 753 .sp .6
 754 .RS 4n
 755 The current on-disk version of the pool. This can be increased, but never
 756 decreased. The preferred method of updating pools is with the "\fBzpool
 757 upgrade\fR" command, though this property can be used when a specific version
 758 is needed for backwards compatibility. This property can be any number between
 759 1 and the current version reported by "\fBzpool upgrade -v\fR".
 760 .RE
 761 
 762 .SS "Subcommands"
 763 .sp
 764 .LP
 765 All subcommands that modify state are logged persistently to the pool in their
 766 original form.
 767 .sp
 768 .LP
 769 The \fBzpool\fR command provides subcommands to create and destroy storage
 770 pools, add capacity to storage pools, and provide information about the storage
 771 pools. The following subcommands are supported:
 772 .sp
 773 .ne 2
 774 .na
 775 \fB\fBzpool\fR \fB-?\fR\fR
 776 .ad
 777 .sp .6
 778 .RS 4n
 779 Displays a help message.
 780 .RE
 781 
 782 .sp
 783 .ne 2
 784 .na
 785 \fB\fBzpool add\fR [\fB-fn\fR] \fIpool\fR \fIvdev\fR ...\fR
 786 .ad
 787 .sp .6
 788 .RS 4n
 789 Adds the specified virtual devices to the given pool. The \fIvdev\fR
 790 specification is described in the "Virtual Devices" section. The behavior of
 791 the \fB-f\fR option, and the device checks performed are described in the
 792 "zpool create" subcommand.
 793 .sp
 794 .ne 2
 795 .na
 796 \fB\fB-f\fR\fR
 797 .ad
 798 .RS 6n
 799 Forces use of \fBvdev\fRs, even if they appear in use or specify a conflicting
 800 replication level. Not all devices can be overridden in this manner.
 801 .RE
 802 
 803 .sp
 804 .ne 2
 805 .na
 806 \fB\fB-n\fR\fR
 807 .ad
 808 .RS 6n
 809 Displays the configuration that would be used without actually adding the
 810 \fBvdev\fRs. The actual pool creation can still fail due to insufficient
 811 privileges or device sharing.
 812 .RE
 813 
 814 Do not add a disk that is currently configured as a quorum device to a zpool.
 815 After a disk is in the pool, that disk can then be configured as a quorum
 816 device.
 817 .RE
 818 
 819 .sp
 820 .ne 2
 821 .na
 822 \fB\fBzpool attach\fR [\fB-f\fR] \fIpool\fR \fIdevice\fR \fInew_device\fR\fR
 823 .ad
 824 .sp .6
 825 .RS 4n
 826 Attaches \fInew_device\fR to an existing \fBzpool\fR device. The existing
 827 device cannot be part of a \fBraidz\fR configuration. If \fIdevice\fR is not
 828 currently part of a mirrored configuration, \fIdevice\fR automatically
 829 transforms into a two-way mirror of \fIdevice\fR and \fInew_device\fR. If
 830 \fIdevice\fR is part of a two-way mirror, attaching \fInew_device\fR creates a
 831 three-way mirror, and so on. In either case, \fInew_device\fR begins to
 832 resilver immediately.
 833 .sp
 834 .ne 2
 835 .na
 836 \fB\fB-f\fR\fR
 837 .ad
 838 .RS 6n
 839 Forces use of \fInew_device\fR, even if its appears to be in use. Not all
 840 devices can be overridden in this manner.
 841 .RE
 842 
 843 .RE
 844 
 845 .sp
 846 .ne 2
 847 .na
 848 \fB\fBzpool clear\fR \fIpool\fR [\fIdevice\fR] ...\fR
 849 .ad
 850 .sp .6
 851 .RS 4n
 852 Clears device errors in a pool. If no arguments are specified, all device
 853 errors within the pool are cleared. If one or more devices is specified, only
 854 those errors associated with the specified device or devices are cleared.
 855 .RE
 856 
 857 .sp
 858 .ne 2
 859 .na
 860 \fB\fBzpool create\fR [\fB-fn\fR] [\fB-o\fR \fIproperty=value\fR] ... [\fB-O\fR
 861 \fIfile-system-property=value\fR] ... [\fB-m\fR \fImountpoint\fR] [\fB-R\fR
 862 \fIroot\fR] \fIpool\fR \fIvdev\fR ...\fR
 863 .ad
 864 .sp .6
 865 .RS 4n
 866 Creates a new storage pool containing the virtual devices specified on the
 867 command line. The pool name must begin with a letter, and can only contain
 868 alphanumeric characters as well as underscore ("_"), dash ("-"), and period
 869 ("."). The pool names "mirror", "raidz", "spare" and "log" are reserved, as are
 870 names beginning with the pattern "c[0-9]". The \fBvdev\fR specification is
 871 described in the "Virtual Devices" section.
 872 .sp
 873 The command verifies that each device specified is accessible and not currently
 874 in use by another subsystem. There are some uses, such as being currently
 875 mounted, or specified as the dedicated dump device, that prevents a device from
 876 ever being used by \fBZFS\fR. Other uses, such as having a preexisting
 877 \fBUFS\fR file system, can be overridden with the \fB-f\fR option.
 878 .sp
 879 The command also checks that the replication strategy for the pool is
 880 consistent. An attempt to combine redundant and non-redundant storage in a
 881 single pool, or to mix disks and files, results in an error unless \fB-f\fR is
 882 specified. The use of differently sized devices within a single \fBraidz\fR or
 883 mirror group is also flagged as an error unless \fB-f\fR is specified.
 884 .sp
 885 Unless the \fB-R\fR option is specified, the default mount point is
 886 "/\fIpool\fR". The mount point must not exist or must be empty, or else the
 887 root dataset cannot be mounted. This can be overridden with the \fB-m\fR
 888 option.
 889 .sp
 890 .ne 2
 891 .na
 892 \fB\fB-f\fR\fR
 893 .ad
 894 .sp .6
 895 .RS 4n
 896 Forces use of \fBvdev\fRs, even if they appear in use or specify a conflicting
 897 replication level. Not all devices can be overridden in this manner.
 898 .RE
 899 
 900 .sp
 901 .ne 2
 902 .na
 903 \fB\fB-n\fR\fR
 904 .ad
 905 .sp .6
 906 .RS 4n
 907 Displays the configuration that would be used without actually creating the
 908 pool. The actual pool creation can still fail due to insufficient privileges or
 909 device sharing.
 910 .RE
 911 
 912 .sp
 913 .ne 2
 914 .na
 915 \fB\fB-o\fR \fIproperty=value\fR [\fB-o\fR \fIproperty=value\fR] ...\fR
 916 .ad
 917 .sp .6
 918 .RS 4n
 919 Sets the given pool properties. See the "Properties" section for a list of
 920 valid properties that can be set.
 921 .RE
 922 
 923 .sp
 924 .ne 2
 925 .na
 926 \fB\fB-O\fR \fIfile-system-property=value\fR\fR
 927 .ad
 928 .br
 929 .na
 930 \fB[\fB-O\fR \fIfile-system-property=value\fR] ...\fR
 931 .ad
 932 .sp .6
 933 .RS 4n
 934 Sets the given file system properties in the root file system of the pool. See
 935 the "Properties" section of \fBzfs\fR(1M) for a list of valid properties that
 936 can be set.
 937 .RE
 938 
 939 .sp
 940 .ne 2
 941 .na
 942 \fB\fB-R\fR \fIroot\fR\fR
 943 .ad
 944 .sp .6
 945 .RS 4n
 946 Equivalent to "-o cachefile=none,altroot=\fIroot\fR"
 947 .RE
 948 
 949 .sp
 950 .ne 2
 951 .na
 952 \fB\fB-m\fR \fImountpoint\fR\fR
 953 .ad
 954 .sp .6
 955 .RS 4n
 956 Sets the mount point for the root dataset. The default mount point is
 957 "/\fIpool\fR" or "\fBaltroot\fR/\fIpool\fR" if \fBaltroot\fR is specified. The
 958 mount point must be an absolute path, "\fBlegacy\fR", or "\fBnone\fR". For more
 959 information on dataset mount points, see \fBzfs\fR(1M).
 960 .RE
 961 
 962 .RE
 963 
 964 .sp
 965 .ne 2
 966 .na
 967 \fB\fBzpool destroy\fR [\fB-f\fR] \fIpool\fR\fR
 968 .ad
 969 .sp .6
 970 .RS 4n
 971 Destroys the given pool, freeing up any devices for other use. This command
 972 tries to unmount any active datasets before destroying the pool.
 973 .sp
 974 .ne 2
 975 .na
 976 \fB\fB-f\fR\fR
 977 .ad
 978 .RS 6n
 979 Forces any active datasets contained within the pool to be unmounted.
 980 .RE
 981 
 982 .RE
 983 
 984 .sp
 985 .ne 2
 986 .na
 987 \fB\fBzpool detach\fR \fIpool\fR \fIdevice\fR\fR
 988 .ad
 989 .sp .6
 990 .RS 4n
 991 Detaches \fIdevice\fR from a mirror. The operation is refused if there are no
 992 other valid replicas of the data.
 993 .RE
 994 
 995 .sp
 996 .ne 2
 997 .na
 998 \fB\fBzpool export\fR [\fB-f\fR] \fIpool\fR ...\fR
 999 .ad
1000 .sp .6
1001 .RS 4n
1002 Exports the given pools from the system. All devices are marked as exported,
1003 but are still considered in use by other subsystems. The devices can be moved
1004 between systems (even those of different endianness) and imported as long as a
1005 sufficient number of devices are present.
1006 .sp
1007 Before exporting the pool, all datasets within the pool are unmounted. A pool
1008 can not be exported if it has a shared spare that is currently being used.
1009 .sp
1010 For pools to be portable, you must give the \fBzpool\fR command whole disks,
1011 not just slices, so that \fBZFS\fR can label the disks with portable \fBEFI\fR
1012 labels. Otherwise, disk drivers on platforms of different endianness will not
1013 recognize the disks.
1014 .sp
1015 .ne 2
1016 .na
1017 \fB\fB-f\fR\fR
1018 .ad
1019 .RS 6n
1020 Forcefully unmount all datasets, using the "\fBunmount -f\fR" command.
1021 .sp
1022 This command will forcefully export the pool even if it has a shared spare that
1023 is currently being used. This may lead to potential data corruption.
1024 .RE
1025 
1026 .RE
1027 
1028 .sp
1029 .ne 2
1030 .na
1031 \fB\fBzpool get\fR "\fIall\fR" | \fIproperty\fR[,...] \fIpool\fR ...\fR
1032 .ad
1033 .sp .6
1034 .RS 4n
1035 Retrieves the given list of properties (or all properties if "\fBall\fR" is
1036 used) for the specified storage pool(s). These properties are displayed with
1037 the following fields:
1038 .sp
1039 .in +2
1040 .nf
1041        name          Name of storage pool
1042         property      Property name
1043         value         Property value
1044         source        Property source, either 'default' or 'local'.
1045 .fi
1046 .in -2
1047 .sp
1048 
1049 See the "Properties" section for more information on the available pool
1050 properties.
1051 .RE
1052 
1053 .sp
1054 .ne 2
1055 .na
1056 \fB\fBzpool history\fR [\fB-il\fR] [\fIpool\fR] ...\fR
1057 .ad
1058 .sp .6
1059 .RS 4n
1060 Displays the command history of the specified pools or all pools if no pool is
1061 specified.
1062 .sp
1063 .ne 2
1064 .na
1065 \fB\fB-i\fR\fR
1066 .ad
1067 .RS 6n
1068 Displays internally logged \fBZFS\fR events in addition to user initiated
1069 events.
1070 .RE
1071 
1072 .sp
1073 .ne 2
1074 .na
1075 \fB\fB-l\fR\fR
1076 .ad
1077 .RS 6n
1078 Displays log records in long format, which in addition to standard format
1079 includes, the user name, the hostname, and the zone in which the operation was
1080 performed.
1081 .RE
1082 
1083 .RE
1084 
1085 .sp
1086 .ne 2
1087 .na
1088 \fB\fBzpool import\fR [\fB-d\fR \fIdir\fR | \fB-c\fR \fIcachefile\fR]
1089 [\fB-D\fR]\fR
1090 .ad
1091 .sp .6
1092 .RS 4n
1093 Lists pools available to import. If the \fB-d\fR option is not specified, this
1094 command searches for devices in "/dev/dsk". The \fB-d\fR option can be
1095 specified multiple times, and all directories are searched. If the device
1096 appears to be part of an exported pool, this command displays a summary of the
1097 pool with the name of the pool, a numeric identifier, as well as the \fIvdev\fR
1098 layout and current health of the device for each device or file. Destroyed
1099 pools, pools that were previously destroyed with the "\fBzpool destroy\fR"
1100 command, are not listed unless the \fB-D\fR option is specified.
1101 .sp
1102 The numeric identifier is unique, and can be used instead of the pool name when
1103 multiple exported pools of the same name are available.
1104 .sp
1105 .ne 2
1106 .na
1107 \fB\fB-c\fR \fIcachefile\fR\fR
1108 .ad
1109 .RS 16n
1110 Reads configuration from the given \fBcachefile\fR that was created with the
1111 "\fBcachefile\fR" pool property. This \fBcachefile\fR is used instead of
1112 searching for devices.
1113 .RE
1114 
1115 .sp
1116 .ne 2
1117 .na
1118 \fB\fB-d\fR \fIdir\fR\fR
1119 .ad
1120 .RS 16n
1121 Searches for devices or files in \fIdir\fR. The \fB-d\fR option can be
1122 specified multiple times.
1123 .RE
1124 
1125 .sp
1126 .ne 2
1127 .na
1128 \fB\fB-D\fR\fR
1129 .ad
1130 .RS 16n
1131 Lists destroyed pools only.
1132 .RE
1133 
1134 .RE
1135 
1136 .sp
1137 .ne 2
1138 .na
1139 \fB\fBzpool import\fR [\fB-o\fR \fImntopts\fR] [ \fB-o\fR
1140 \fIproperty\fR=\fIvalue\fR] ... [\fB-d\fR \fIdir\fR | \fB-c\fR \fIcachefile\fR]
1141 [\fB-D\fR] [\fB-f\fR] [\fB-R\fR \fIroot\fR] \fB-a\fR\fR
1142 .ad
1143 .sp .6
1144 .RS 4n
1145 Imports all pools found in the search directories. Identical to the previous
1146 command, except that all pools with a sufficient number of devices available
1147 are imported. Destroyed pools, pools that were previously destroyed with the
1148 "\fBzpool destroy\fR" command, will not be imported unless the \fB-D\fR option
1149 is specified.
1150 .sp
1151 .ne 2
1152 .na
1153 \fB\fB-o\fR \fImntopts\fR\fR
1154 .ad
1155 .RS 21n
1156 Comma-separated list of mount options to use when mounting datasets within the
1157 pool. See \fBzfs\fR(1M) for a description of dataset properties and mount
1158 options.
1159 .RE
1160 
1161 .sp
1162 .ne 2
1163 .na
1164 \fB\fB-o\fR \fIproperty=value\fR\fR
1165 .ad
1166 .RS 21n
1167 Sets the specified property on the imported pool. See the "Properties" section
1168 for more information on the available pool properties.
1169 .RE
1170 
1171 .sp
1172 .ne 2
1173 .na
1174 \fB\fB-c\fR \fIcachefile\fR\fR
1175 .ad
1176 .RS 21n
1177 Reads configuration from the given \fBcachefile\fR that was created with the
1178 "\fBcachefile\fR" pool property. This \fBcachefile\fR is used instead of
1179 searching for devices.
1180 .RE
1181 
1182 .sp
1183 .ne 2
1184 .na
1185 \fB\fB-d\fR \fIdir\fR\fR
1186 .ad
1187 .RS 21n
1188 Searches for devices or files in \fIdir\fR. The \fB-d\fR option can be
1189 specified multiple times. This option is incompatible with the \fB-c\fR option.
1190 .RE
1191 
1192 .sp
1193 .ne 2
1194 .na
1195 \fB\fB-D\fR\fR
1196 .ad
1197 .RS 21n
1198 Imports destroyed pools only. The \fB-f\fR option is also required.
1199 .RE
1200 
1201 .sp
1202 .ne 2
1203 .na
1204 \fB\fB-f\fR\fR
1205 .ad
1206 .RS 21n
1207 Forces import, even if the pool appears to be potentially active.
1208 .RE
1209 
1210 .sp
1211 .ne 2
1212 .na
1213 \fB\fB-a\fR\fR
1214 .ad
1215 .RS 21n
1216 Searches for and imports all pools found.
1217 .RE
1218 
1219 .sp
1220 .ne 2
1221 .na
1222 \fB\fB-R\fR \fIroot\fR\fR
1223 .ad
1224 .RS 21n
1225 Sets the "\fBcachefile\fR" property to "\fBnone\fR" and the "\fIaltroot\fR"
1226 property to "\fIroot\fR".
1227 .RE
1228 
1229 .RE
1230 
1231 .sp
1232 .ne 2
1233 .na
1234 \fB\fBzpool import\fR [\fB-o\fR \fImntopts\fR] [ \fB-o\fR
1235 \fIproperty\fR=\fIvalue\fR] ... [\fB-d\fR \fIdir\fR | \fB-c\fR \fIcachefile\fR]
1236 [\fB-D\fR] [\fB-f\fR] [\fB-R\fR \fIroot\fR] \fIpool\fR | \fIid\fR
1237 [\fInewpool\fR]\fR
1238 .ad
1239 .sp .6
1240 .RS 4n
1241 Imports a specific pool. A pool can be identified by its name or the numeric
1242 identifier. If \fInewpool\fR is specified, the pool is imported using the name
1243 \fInewpool\fR. Otherwise, it is imported with the same name as its exported
1244 name.
1245 .sp
1246 If a device is removed from a system without running "\fBzpool export\fR"
1247 first, the device appears as potentially active. It cannot be determined if
1248 this was a failed export, or whether the device is really in use from another
1249 host. To import a pool in this state, the \fB-f\fR option is required.
1250 .sp
1251 .ne 2
1252 .na
1253 \fB\fB-o\fR \fImntopts\fR\fR
1254 .ad
1255 .sp .6
1256 .RS 4n
1257 Comma-separated list of mount options to use when mounting datasets within the
1258 pool. See \fBzfs\fR(1M) for a description of dataset properties and mount
1259 options.
1260 .RE
1261 
1262 .sp
1263 .ne 2
1264 .na
1265 \fB\fB-o\fR \fIproperty=value\fR\fR
1266 .ad
1267 .sp .6
1268 .RS 4n
1269 Sets the specified property on the imported pool. See the "Properties" section
1270 for more information on the available pool properties.
1271 .RE
1272 
1273 .sp
1274 .ne 2
1275 .na
1276 \fB\fB-c\fR \fIcachefile\fR\fR
1277 .ad
1278 .sp .6
1279 .RS 4n
1280 Reads configuration from the given \fBcachefile\fR that was created with the
1281 "\fBcachefile\fR" pool property. This \fBcachefile\fR is used instead of
1282 searching for devices.
1283 .RE
1284 
1285 .sp
1286 .ne 2
1287 .na
1288 \fB\fB-d\fR \fIdir\fR\fR
1289 .ad
1290 .sp .6
1291 .RS 4n
1292 Searches for devices or files in \fIdir\fR. The \fB-d\fR option can be
1293 specified multiple times. This option is incompatible with the \fB-c\fR option.
1294 .RE
1295 
1296 .sp
1297 .ne 2
1298 .na
1299 \fB\fB-D\fR\fR
1300 .ad
1301 .sp .6
1302 .RS 4n
1303 Imports destroyed pool. The \fB-f\fR option is also required.
1304 .RE
1305 
1306 .sp
1307 .ne 2
1308 .na
1309 \fB\fB-f\fR\fR
1310 .ad
1311 .sp .6
1312 .RS 4n
1313 Forces import, even if the pool appears to be potentially active.
1314 .RE
1315 
1316 .sp
1317 .ne 2
1318 .na
1319 \fB\fB-R\fR \fIroot\fR\fR
1320 .ad
1321 .sp .6
1322 .RS 4n
1323 Sets the "\fBcachefile\fR" property to "\fBnone\fR" and the "\fIaltroot\fR"
1324 property to "\fIroot\fR".
1325 .RE
1326 
1327 .RE
1328 
1329 .sp
1330 .ne 2
1331 .na
1332 \fB\fBzpool iostat\fR [\fB-T\fR \fBu\fR | \fBd\fR] [\fB-v\fR] [\fIpool\fR] ...
1333 [\fIinterval\fR[\fIcount\fR]]\fR
1334 .ad
1335 .sp .6
1336 .RS 4n
1337 Displays \fBI/O\fR statistics for the given pools. When given an interval, the
1338 statistics are printed every \fIinterval\fR seconds until \fBCtrl-C\fR is
1339 pressed. If no \fIpools\fR are specified, statistics for every pool in the
1340 system is shown. If \fIcount\fR is specified, the command exits after
1341 \fIcount\fR reports are printed.
1342 .sp
1343 .ne 2
1344 .na
1345 \fB\fB-T\fR \fBu\fR | \fBd\fR\fR
1346 .ad
1347 .RS 12n
1348 Display a time stamp.
1349 .sp
1350 Specify \fBu\fR for a printed representation of the internal representation of
1351 time. See \fBtime\fR(2). Specify \fBd\fR for standard date format. See
1352 \fBdate\fR(1).
1353 .RE
1354 
1355 .sp
1356 .ne 2
1357 .na
1358 \fB\fB-v\fR\fR
1359 .ad
1360 .RS 12n
1361 Verbose statistics. Reports usage statistics for individual \fIvdevs\fR within
1362 the pool, in addition to the pool-wide statistics.
1363 .RE
1364 
1365 .RE
1366 
1367 .sp
1368 .ne 2
1369 .na
1370 \fB\fBzpool list\fR [\fB-H\fR] [\fB-o\fR \fIprops\fR[,...]] [\fIpool\fR] ...\fR
1371 .ad
1372 .sp .6
1373 .RS 4n
1374 Lists the given pools along with a health status and space usage. When given no
1375 arguments, all pools in the system are listed.
1376 .sp
1377 .ne 2
1378 .na
1379 \fB\fB-H\fR\fR
1380 .ad
1381 .RS 12n
1382 Scripted mode. Do not display headers, and separate fields by a single tab
1383 instead of arbitrary space.
1384 .RE
1385 
1386 .sp
1387 .ne 2
1388 .na
1389 \fB\fB-o\fR \fIprops\fR\fR
1390 .ad
1391 .RS 12n
1392 Comma-separated list of properties to display. See the "Properties" section for
1393 a list of valid properties. The default list is "name, size, used, available,
1394 capacity, health, altroot"
1395 .RE
1396 
1397 .RE
1398 
1399 .sp
1400 .ne 2
1401 .na
1402 \fB\fBzpool offline\fR [\fB-t\fR] \fIpool\fR \fIdevice\fR ...\fR
1403 .ad
1404 .sp .6
1405 .RS 4n
1406 Takes the specified physical device offline. While the \fIdevice\fR is offline,
1407 no attempt is made to read or write to the device.
1408 .sp
1409 This command is not applicable to spares or cache devices.
1410 .sp
1411 .ne 2
1412 .na
1413 \fB\fB-t\fR\fR
1414 .ad
1415 .RS 6n
1416 Temporary. Upon reboot, the specified physical device reverts to its previous
1417 state.
1418 .RE
1419 
1420 .RE
1421 
1422 .sp
1423 .ne 2
1424 .na
1425 \fB\fBzpool online\fR [\fB-e\fR] \fIpool\fR \fIdevice\fR...\fR
1426 .ad
1427 .sp .6
1428 .RS 4n
1429 Brings the specified physical device online.
1430 .sp
1431 This command is not applicable to spares or cache devices.
1432 .sp
1433 .ne 2
1434 .na
1435 \fB\fB-e\fR\fR
1436 .ad
1437 .RS 6n
1438 Expand the device to use all available space. If the device is part of a mirror
1439 or \fBraidz\fR then all devices must be expanded before the new space will
1440 become available to the pool.
1441 .RE
1442 
1443 .RE
1444 
1445 .sp
1446 .ne 2
1447 .na
1448 \fB\fBzpool reguid\fR \fIpool\fR
1449 .ad
1450 .sp .6
1451 .RS 4n
1452 Generates a new unique identifier for the pool.  You must ensure that all devices in this pool are online and
1453 healthy before performing this action.
1454 .RE
1455 
1456 .sp
1457 .ne 2
1458 .na
1459 \fB\fBzpool remove\fR \fIpool\fR \fIdevice\fR ...\fR
1460 .ad
1461 .sp .6
1462 .RS 4n
1463 Removes the specified device from the pool. This command currently only
1464 supports removing hot spares, cache, and log devices. A mirrored log device can
1465 be removed by specifying the top-level mirror for the log. Non-log devices that
1466 are part of a mirrored configuration can be removed using the \fBzpool
1467 detach\fR command. Non-redundant and \fBraidz\fR devices cannot be removed from
1468 a pool.
1469 .RE
1470 
1471 .sp
1472 .ne 2
1473 .na
1474 \fB\fBzpool replace\fR [\fB-f\fR] \fIpool\fR \fIold_device\fR
1475 [\fInew_device\fR]\fR
1476 .ad
1477 .sp .6
1478 .RS 4n
1479 Replaces \fIold_device\fR with \fInew_device\fR. This is equivalent to
1480 attaching \fInew_device\fR, waiting for it to resilver, and then detaching
1481 \fIold_device\fR.
1482 .sp
1483 The size of \fInew_device\fR must be greater than or equal to the minimum size
1484 of all the devices in a mirror or \fBraidz\fR configuration.
1485 .sp
1486 \fInew_device\fR is required if the pool is not redundant. If \fInew_device\fR
1487 is not specified, it defaults to \fIold_device\fR. This form of replacement is
1488 useful after an existing disk has failed and has been physically replaced. In
1489 this case, the new disk may have the same \fB/dev/dsk\fR path as the old
1490 device, even though it is actually a different disk. \fBZFS\fR recognizes this.
1491 .sp
1492 .ne 2
1493 .na
1494 \fB\fB-f\fR\fR
1495 .ad
1496 .RS 6n
1497 Forces use of \fInew_device\fR, even if its appears to be in use. Not all
1498 devices can be overridden in this manner.
1499 .RE
1500 
1501 .RE
1502 
1503 .sp
1504 .ne 2
1505 .na
1506 \fB\fBzpool scrub\fR [\fB-s\fR] \fIpool\fR ...\fR
1507 .ad
1508 .sp .6
1509 .RS 4n
1510 Begins a scrub. The scrub examines all data in the specified pools to verify
1511 that it checksums correctly. For replicated (mirror or \fBraidz\fR) devices,
1512 \fBZFS\fR automatically repairs any damage discovered during the scrub. The
1513 "\fBzpool status\fR" command reports the progress of the scrub and summarizes
1514 the results of the scrub upon completion.
1515 .sp
1516 Scrubbing and resilvering are very similar operations. The difference is that
1517 resilvering only examines data that \fBZFS\fR knows to be out of date (for
1518 example, when attaching a new device to a mirror or replacing an existing
1519 device), whereas scrubbing examines all data to discover silent errors due to
1520 hardware faults or disk failure.
1521 .sp
1522 Because scrubbing and resilvering are \fBI/O\fR-intensive operations, \fBZFS\fR
1523 only allows one at a time. If a scrub is already in progress, the "\fBzpool
1524 scrub\fR" command terminates it and starts a new scrub. If a resilver is in
1525 progress, \fBZFS\fR does not allow a scrub to be started until the resilver
1526 completes.
1527 .sp
1528 .ne 2
1529 .na
1530 \fB\fB-s\fR\fR
1531 .ad
1532 .RS 6n
1533 Stop scrubbing.
1534 .RE
1535 
1536 .RE
1537 
1538 .sp
1539 .ne 2
1540 .na
1541 \fB\fBzpool set\fR \fIproperty\fR=\fIvalue\fR \fIpool\fR\fR
1542 .ad
1543 .sp .6
1544 .RS 4n
1545 Sets the given property on the specified pool. See the "Properties" section for
1546 more information on what properties can be set and acceptable values.
1547 .RE
1548 
1549 .sp
1550 .ne 2
1551 .na
1552 \fB\fBzpool status\fR [\fB-xv\fR] [\fIpool\fR] ...\fR
1553 .ad
1554 .sp .6
1555 .RS 4n
1556 Displays the detailed health status for the given pools. If no \fIpool\fR is
1557 specified, then the status of each pool in the system is displayed. For more
1558 information on pool and device health, see the "Device Failure and Recovery"
1559 section.
1560 .sp
1561 If a scrub or resilver is in progress, this command reports the percentage done
1562 and the estimated time to completion. Both of these are only approximate,
1563 because the amount of data in the pool and the other workloads on the system
1564 can change.
1565 .sp
1566 .ne 2
1567 .na
1568 \fB\fB-x\fR\fR
1569 .ad
1570 .RS 6n
1571 Only display status for pools that are exhibiting errors or are otherwise
1572 unavailable.
1573 .RE
1574 
1575 .sp
1576 .ne 2
1577 .na
1578 \fB\fB-v\fR\fR
1579 .ad
1580 .RS 6n
1581 Displays verbose data error information, printing out a complete list of all
1582 data errors since the last complete pool scrub.
1583 .RE
1584 
1585 .RE
1586 
1587 .sp
1588 .ne 2
1589 .na
1590 \fB\fBzpool upgrade\fR\fR
1591 .ad
1592 .sp .6
1593 .RS 4n
1594 Displays all pools formatted using a different \fBZFS\fR on-disk version. Older
1595 versions can continue to be used, but some features may not be available. These
1596 pools can be upgraded using "\fBzpool upgrade -a\fR". Pools that are formatted
1597 with a more recent version are also displayed, although these pools will be
1598 inaccessible on the system.
1599 .RE
1600 
1601 .sp
1602 .ne 2
1603 .na
1604 \fB\fBzpool upgrade\fR \fB-v\fR\fR
1605 .ad
1606 .sp .6
1607 .RS 4n
1608 Displays \fBZFS\fR versions supported by the current software. The current
1609 \fBZFS\fR versions and all previous supported versions are displayed, along
1610 with an explanation of the features provided with each version.
1611 .RE
1612 
1613 .sp
1614 .ne 2
1615 .na
1616 \fB\fBzpool upgrade\fR [\fB-V\fR \fIversion\fR] \fB-a\fR | \fIpool\fR ...\fR
1617 .ad
1618 .sp .6
1619 .RS 4n
1620 Upgrades the given pool to the latest on-disk version. Once this is done, the
1621 pool will no longer be accessible on systems running older versions of the
1622 software.
1623 .sp
1624 .ne 2
1625 .na
1626 \fB\fB-a\fR\fR
1627 .ad
1628 .RS 14n
1629 Upgrades all pools.
1630 .RE
1631 
1632 .sp
1633 .ne 2
1634 .na
1635 \fB\fB-V\fR \fIversion\fR\fR
1636 .ad
1637 .RS 14n
1638 Upgrade to the specified version. If the \fB-V\fR flag is not specified, the
1639 pool is upgraded to the most recent version. This option can only be used to
1640 increase the version number, and only up to the most recent version supported
1641 by this software.
1642 .RE
1643 
1644 .RE
1645 
1646 .SH EXAMPLES
1647 .LP
1648 \fBExample 1 \fRCreating a RAID-Z Storage Pool
1649 .sp
1650 .LP
1651 The following command creates a pool with a single \fBraidz\fR root \fIvdev\fR
1652 that consists of six disks.
1653 
1654 .sp
1655 .in +2
1656 .nf
1657 # \fBzpool create tank raidz c0t0d0 c0t1d0 c0t2d0 c0t3d0 c0t4d0 c0t5d0\fR
1658 .fi
1659 .in -2
1660 .sp
1661 
1662 .LP
1663 \fBExample 2 \fRCreating a Mirrored Storage Pool
1664 .sp
1665 .LP
1666 The following command creates a pool with two mirrors, where each mirror
1667 contains two disks.
1668 
1669 .sp
1670 .in +2
1671 .nf
1672 # \fBzpool create tank mirror c0t0d0 c0t1d0 mirror c0t2d0 c0t3d0\fR
1673 .fi
1674 .in -2
1675 .sp
1676 
1677 .LP
1678 \fBExample 3 \fRCreating a ZFS Storage Pool by Using Slices
1679 .sp
1680 .LP
1681 The following command creates an unmirrored pool using two disk slices.
1682 
1683 .sp
1684 .in +2
1685 .nf
1686 # \fBzpool create tank /dev/dsk/c0t0d0s1 c0t1d0s4\fR
1687 .fi
1688 .in -2
1689 .sp
1690 
1691 .LP
1692 \fBExample 4 \fRCreating a ZFS Storage Pool by Using Files
1693 .sp
1694 .LP
1695 The following command creates an unmirrored pool using files. While not
1696 recommended, a pool based on files can be useful for experimental purposes.
1697 
1698 .sp
1699 .in +2
1700 .nf
1701 # \fBzpool create tank /path/to/file/a /path/to/file/b\fR
1702 .fi
1703 .in -2
1704 .sp
1705 
1706 .LP
1707 \fBExample 5 \fRAdding a Mirror to a ZFS Storage Pool
1708 .sp
1709 .LP
1710 The following command adds two mirrored disks to the pool "\fItank\fR",
1711 assuming the pool is already made up of two-way mirrors. The additional space
1712 is immediately available to any datasets within the pool.
1713 
1714 .sp
1715 .in +2
1716 .nf
1717 # \fBzpool add tank mirror c1t0d0 c1t1d0\fR
1718 .fi
1719 .in -2
1720 .sp
1721 
1722 .LP
1723 \fBExample 6 \fRListing Available ZFS Storage Pools
1724 .sp
1725 .LP
1726 The following command lists all available pools on the system. In this case,
1727 the pool \fIzion\fR is faulted due to a missing device.
1728 
1729 .sp
1730 .LP
1731 The results from this command are similar to the following:
1732 
1733 .sp
1734 .in +2
1735 .nf
1736 # \fBzpool list\fR
1737      NAME              SIZE    USED   AVAIL    CAP  HEALTH     ALTROOT
1738      pool             67.5G   2.92M   67.5G     0%  ONLINE     -
1739      tank             67.5G   2.92M   67.5G     0%  ONLINE     -
1740      zion                 -       -       -     0%  FAULTED    -
1741 .fi
1742 .in -2
1743 .sp
1744 
1745 .LP
1746 \fBExample 7 \fRDestroying a ZFS Storage Pool
1747 .sp
1748 .LP
1749 The following command destroys the pool "\fItank\fR" and any datasets contained
1750 within.
1751 
1752 .sp
1753 .in +2
1754 .nf
1755 # \fBzpool destroy -f tank\fR
1756 .fi
1757 .in -2
1758 .sp
1759 
1760 .LP
1761 \fBExample 8 \fRExporting a ZFS Storage Pool
1762 .sp
1763 .LP
1764 The following command exports the devices in pool \fItank\fR so that they can
1765 be relocated or later imported.
1766 
1767 .sp
1768 .in +2
1769 .nf
1770 # \fBzpool export tank\fR
1771 .fi
1772 .in -2
1773 .sp
1774 
1775 .LP
1776 \fBExample 9 \fRImporting a ZFS Storage Pool
1777 .sp
1778 .LP
1779 The following command displays available pools, and then imports the pool
1780 "tank" for use on the system.
1781 
1782 .sp
1783 .LP
1784 The results from this command are similar to the following:
1785 
1786 .sp
1787 .in +2
1788 .nf
1789 # \fBzpool import\fR
1790   pool: tank
1791     id: 15451357997522795478
1792  state: ONLINE
1793 action: The pool can be imported using its name or numeric identifier.
1794 config:
1795 
1796         tank        ONLINE
1797           mirror    ONLINE
1798             c1t2d0  ONLINE
1799             c1t3d0  ONLINE
1800 
1801 # \fBzpool import tank\fR
1802 .fi
1803 .in -2
1804 .sp
1805 
1806 .LP
1807 \fBExample 10 \fRUpgrading All ZFS Storage Pools to the Current Version
1808 .sp
1809 .LP
1810 The following command upgrades all ZFS Storage pools to the current version of
1811 the software.
1812 
1813 .sp
1814 .in +2
1815 .nf
1816 # \fBzpool upgrade -a\fR
1817 This system is currently running ZFS version 2.
1818 .fi
1819 .in -2
1820 .sp
1821 
1822 .LP
1823 \fBExample 11 \fRManaging Hot Spares
1824 .sp
1825 .LP
1826 The following command creates a new pool with an available hot spare:
1827 
1828 .sp
1829 .in +2
1830 .nf
1831 # \fBzpool create tank mirror c0t0d0 c0t1d0 spare c0t2d0\fR
1832 .fi
1833 .in -2
1834 .sp
1835 
1836 .sp
1837 .LP
1838 If one of the disks were to fail, the pool would be reduced to the degraded
1839 state. The failed device can be replaced using the following command:
1840 
1841 .sp
1842 .in +2
1843 .nf
1844 # \fBzpool replace tank c0t0d0 c0t3d0\fR
1845 .fi
1846 .in -2
1847 .sp
1848 
1849 .sp
1850 .LP
1851 Once the data has been resilvered, the spare is automatically removed and is
1852 made available should another device fails. The hot spare can be permanently
1853 removed from the pool using the following command:
1854 
1855 .sp
1856 .in +2
1857 .nf
1858 # \fBzpool remove tank c0t2d0\fR
1859 .fi
1860 .in -2
1861 .sp
1862 
1863 .LP
1864 \fBExample 12 \fRCreating a ZFS Pool with Mirrored Separate Intent Logs
1865 .sp
1866 .LP
1867 The following command creates a ZFS storage pool consisting of two, two-way
1868 mirrors and mirrored log devices:
1869 
1870 .sp
1871 .in +2
1872 .nf
1873 # \fBzpool create pool mirror c0d0 c1d0 mirror c2d0 c3d0 log mirror \e
1874    c4d0 c5d0\fR
1875 .fi
1876 .in -2
1877 .sp
1878 
1879 .LP
1880 \fBExample 13 \fRAdding Cache Devices to a ZFS Pool
1881 .sp
1882 .LP
1883 The following command adds two disks for use as cache devices to a ZFS storage
1884 pool:
1885 
1886 .sp
1887 .in +2
1888 .nf
1889 # \fBzpool add pool cache c2d0 c3d0\fR
1890 .fi
1891 .in -2
1892 .sp
1893 
1894 .sp
1895 .LP
1896 Once added, the cache devices gradually fill with content from main memory.
1897 Depending on the size of your cache devices, it could take over an hour for
1898 them to fill. Capacity and reads can be monitored using the \fBiostat\fR option
1899 as follows:
1900 
1901 .sp
1902 .in +2
1903 .nf
1904 # \fBzpool iostat -v pool 5\fR
1905 .fi
1906 .in -2
1907 .sp
1908 
1909 .LP
1910 \fBExample 14 \fRRemoving a Mirrored Log Device
1911 .sp
1912 .LP
1913 The following command removes the mirrored log device \fBmirror-2\fR.
1914 
1915 .sp
1916 .LP
1917 Given this configuration:
1918 
1919 .sp
1920 .in +2
1921 .nf
1922    pool: tank
1923   state: ONLINE
1924   scrub: none requested
1925 config:
1926 
1927          NAME        STATE     READ WRITE CKSUM
1928          tank        ONLINE       0     0     0
1929            mirror-0  ONLINE       0     0     0
1930              c6t0d0  ONLINE       0     0     0
1931              c6t1d0  ONLINE       0     0     0
1932            mirror-1  ONLINE       0     0     0
1933              c6t2d0  ONLINE       0     0     0
1934              c6t3d0  ONLINE       0     0     0
1935          logs
1936            mirror-2  ONLINE       0     0     0
1937              c4t0d0  ONLINE       0     0     0
1938              c4t1d0  ONLINE       0     0     0
1939 .fi
1940 .in -2
1941 .sp
1942 
1943 .sp
1944 .LP
1945 The command to remove the mirrored log \fBmirror-2\fR is:
1946 
1947 .sp
1948 .in +2
1949 .nf
1950 # \fBzpool remove tank mirror-2\fR
1951 .fi
1952 .in -2
1953 .sp
1954 
1955 .SH EXIT STATUS
1956 .sp
1957 .LP
1958 The following exit values are returned:
1959 .sp
1960 .ne 2
1961 .na
1962 \fB\fB0\fR\fR
1963 .ad
1964 .RS 5n
1965 Successful completion.
1966 .RE
1967 
1968 .sp
1969 .ne 2
1970 .na
1971 \fB\fB1\fR\fR
1972 .ad
1973 .RS 5n
1974 An error occurred.
1975 .RE
1976 
1977 .sp
1978 .ne 2
1979 .na
1980 \fB\fB2\fR\fR
1981 .ad
1982 .RS 5n
1983 Invalid command line options were specified.
1984 .RE
1985 
1986 .SH ATTRIBUTES
1987 .sp
1988 .LP
1989 See \fBattributes\fR(5) for descriptions of the following attributes:
1990 .sp
1991 
1992 .sp
1993 .TS
1994 box;
1995 c | c
1996 l | l .
1997 ATTRIBUTE TYPE  ATTRIBUTE VALUE
1998 _
1999 Interface Stability     Evolving
2000 .TE
2001 
2002 .SH SEE ALSO
2003 .sp
2004 .LP
2005 \fBzfs\fR(1M), \fBattributes\fR(5)