1 '\" te
   2 .\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
   3 .\" Copyright 2011, Nexenta Systems, Inc. All Rights Reserved.
   4 .\" The contents of this file are subject to the terms of the Common Development and Distribution License (the "License"). You may not use this file except in compliance with the License. You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing.
   5 .\" See the License for the specific language governing permissions and limitations under the License. When distributing Covered Code, include this CDDL HEADER in each file and include the License file at usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this CDDL HEADER, with the
   6 .\" fields enclosed by brackets "[]" replaced with your own identifying information: Portions Copyright [yyyy] [name of copyright owner]
   7 .TH ZPOOL 1M "Nov 14, 2011"
   8 .SH NAME
   9 zpool \- configures ZFS storage pools
  10 .SH SYNOPSIS
  11 .LP
  12 .nf
  13 \fBzpool\fR [\fB-?\fR]
  14 .fi
  15 
  16 .LP
  17 .nf
  18 \fBzpool add\fR [\fB-fn\fR] \fIpool\fR \fIvdev\fR ...
  19 .fi
  20 
  21 .LP
  22 .nf
  23 \fBzpool attach\fR [\fB-f\fR] \fIpool\fR \fIdevice\fR \fInew_device\fR
  24 .fi
  25 
  26 .LP
  27 .nf
  28 \fBzpool clear\fR \fIpool\fR [\fIdevice\fR]
  29 .fi
  30 
  31 .LP
  32 .nf
  33 \fBzpool create\fR [\fB-fn\fR] [\fB-o\fR \fIproperty=value\fR] ... [\fB-O\fR \fIfile-system-property=value\fR]
  34      ... [\fB-m\fR \fImountpoint\fR] [\fB-R\fR \fIroot\fR] \fIpool\fR \fIvdev\fR ...
  35 .fi
  36 
  37 .LP
  38 .nf
  39 \fBzpool destroy\fR [\fB-f\fR] \fIpool\fR
  40 .fi
  41 
  42 .LP
  43 .nf
  44 \fBzpool detach\fR \fIpool\fR \fIdevice\fR
  45 .fi
  46 
  47 .LP
  48 .nf
  49 \fBzpool export\fR [\fB-f\fR] \fIpool\fR ...
  50 .fi
  51 
  52 .LP
  53 .nf
  54 \fBzpool get\fR "\fIall\fR" | \fIproperty\fR[,...] \fIpool\fR ...
  55 .fi
  56 
  57 .LP
  58 .nf
  59 \fBzpool history\fR [\fB-il\fR] [\fIpool\fR] ...
  60 .fi
  61 
  62 .LP
  63 .nf
  64 \fBzpool import\fR [\fB-d\fR \fIdir\fR] [\fB-D\fR]
  65 .fi
  66 
  67 .LP
  68 .nf
  69 \fBzpool import\fR [\fB-o \fImntopts\fR\fR] [\fB-o\fR \fIproperty=value\fR] ... [\fB-d\fR \fIdir\fR | \fB-c\fR \fIcachefile\fR]
  70      [\fB-D\fR] [\fB-f\fR] [\fB-R\fR \fIroot\fR] \fB-a\fR
  71 .fi
  72 
  73 .LP
  74 .nf
  75 \fBzpool import\fR [\fB-o \fImntopts\fR\fR] [\fB-o\fR \fIproperty=value\fR] ... [\fB-d\fR \fIdir\fR | \fB-c\fR \fIcachefile\fR]
  76      [\fB-D\fR] [\fB-f\fR] [\fB-R\fR \fIroot\fR] \fIpool\fR |\fIid\fR [\fInewpool\fR]
  77 .fi
  78 
  79 .LP
  80 .nf
  81 \fBzpool iostat\fR [\fB-T\fR u | d ] [\fB-v\fR] [\fIpool\fR] ... [\fIinterval\fR[\fIcount\fR]]
  82 .fi
  83 
  84 .LP
  85 .nf
  86 \fBzpool list\fR [\fB-H\fR] [\fB-o\fR \fIproperty\fR[,...]] [\fIpool\fR] ...
  87 .fi
  88 
  89 .LP
  90 .nf
  91 \fBzpool offline\fR [\fB-t\fR] \fIpool\fR \fIdevice\fR ...
  92 .fi
  93 
  94 .LP
  95 .nf
  96 \fBzpool online\fR \fIpool\fR \fIdevice\fR ...
  97 .fi
  98 
  99 .LP
 100 .nf
 101 \fBzpool reguid\fR \fIpool\fR
 102 .fi
 103 
 104 .LP
 105 .nf
 106 \fBzpool remove\fR \fIpool\fR \fIdevice\fR ...
 107 .fi
 108 
 109 .LP
 110 .nf
 111 \fBzpool replace\fR [\fB-f\fR] \fIpool\fR \fIdevice\fR [\fInew_device\fR]
 112 .fi
 113 
 114 .LP
 115 .nf
 116 \fBzpool scrub\fR [\fB-s\fR] \fIpool\fR ...
 117 .fi
 118 
 119 .LP
 120 .nf
 121 \fBzpool set\fR \fIproperty\fR=\fIvalue\fR \fIpool\fR
 122 .fi
 123 
 124 .LP
 125 .nf
 126 \fBzpool status\fR [\fB-xv\fR] [\fIpool\fR] ...
 127 .fi
 128 
 129 .LP
 130 .nf
 131 \fBzpool upgrade\fR
 132 .fi
 133 
 134 .LP
 135 .nf
 136 \fBzpool upgrade\fR \fB-v\fR
 137 .fi
 138 
 139 .LP
 140 .nf
 141 \fBzpool upgrade\fR [\fB-V\fR \fIversion\fR] \fB-a\fR | \fIpool\fR ...
 142 .fi
 143 
 144 .SH DESCRIPTION
 145 .sp
 146 .LP
 147 The \fBzpool\fR command configures \fBZFS\fR storage pools. A storage pool is a
 148 collection of devices that provides physical storage and data replication for
 149 \fBZFS\fR datasets.
 150 .sp
 151 .LP
 152 All datasets within a storage pool share the same space. See \fBzfs\fR(1M) for
 153 information on managing datasets.
 154 .SS "Virtual Devices (\fBvdev\fRs)"
 155 .sp
 156 .LP
 157 A "virtual device" describes a single device or a collection of devices
 158 organized according to certain performance and fault characteristics. The
 159 following virtual devices are supported:
 160 .sp
 161 .ne 2
 162 .na
 163 \fB\fBdisk\fR\fR
 164 .ad
 165 .RS 10n
 166 A block device, typically located under \fB/dev/dsk\fR. \fBZFS\fR can use
 167 individual slices or partitions, though the recommended mode of operation is to
 168 use whole disks. A disk can be specified by a full path, or it can be a
 169 shorthand name (the relative portion of the path under "/dev/dsk"). A whole
 170 disk can be specified by omitting the slice or partition designation. For
 171 example, "c0t0d0" is equivalent to "/dev/dsk/c0t0d0s2". When given a whole
 172 disk, \fBZFS\fR automatically labels the disk, if necessary.
 173 .RE
 174 
 175 .sp
 176 .ne 2
 177 .na
 178 \fB\fBfile\fR\fR
 179 .ad
 180 .RS 10n
 181 A regular file. The use of files as a backing store is strongly discouraged. It
 182 is designed primarily for experimental purposes, as the fault tolerance of a
 183 file is only as good as the file system of which it is a part. A file must be
 184 specified by a full path.
 185 .RE
 186 
 187 .sp
 188 .ne 2
 189 .na
 190 \fB\fBmirror\fR\fR
 191 .ad
 192 .RS 10n
 193 A mirror of two or more devices. Data is replicated in an identical fashion
 194 across all components of a mirror. A mirror with \fIN\fR disks of size \fIX\fR
 195 can hold \fIX\fR bytes and can withstand (\fIN-1\fR) devices failing before
 196 data integrity is compromised.
 197 .RE
 198 
 199 .sp
 200 .ne 2
 201 .na
 202 \fB\fBraidz\fR\fR
 203 .ad
 204 .br
 205 .na
 206 \fB\fBraidz1\fR\fR
 207 .ad
 208 .br
 209 .na
 210 \fB\fBraidz2\fR\fR
 211 .ad
 212 .br
 213 .na
 214 \fB\fBraidz3\fR\fR
 215 .ad
 216 .RS 10n
 217 A variation on \fBRAID-5\fR that allows for better distribution of parity and
 218 eliminates the "\fBRAID-5\fR write hole" (in which data and parity become
 219 inconsistent after a power loss). Data and parity is striped across all disks
 220 within a \fBraidz\fR group.
 221 .sp
 222 A \fBraidz\fR group can have single-, double- , or triple parity, meaning that
 223 the \fBraidz\fR group can sustain one, two, or three failures, respectively,
 224 without losing any data. The \fBraidz1\fR \fBvdev\fR type specifies a
 225 single-parity \fBraidz\fR group; the \fBraidz2\fR \fBvdev\fR type specifies a
 226 double-parity \fBraidz\fR group; and the \fBraidz3\fR \fBvdev\fR type specifies
 227 a triple-parity \fBraidz\fR group. The \fBraidz\fR \fBvdev\fR type is an alias
 228 for \fBraidz1\fR.
 229 .sp
 230 A \fBraidz\fR group with \fIN\fR disks of size \fIX\fR with \fIP\fR parity
 231 disks can hold approximately (\fIN-P\fR)*\fIX\fR bytes and can withstand
 232 \fIP\fR device(s) failing before data integrity is compromised. The minimum
 233 number of devices in a \fBraidz\fR group is one more than the number of parity
 234 disks. The recommended number is between 3 and 9 to help increase performance.
 235 .RE
 236 
 237 .sp
 238 .ne 2
 239 .na
 240 \fB\fBspare\fR\fR
 241 .ad
 242 .RS 10n
 243 A special pseudo-\fBvdev\fR which keeps track of available hot spares for a
 244 pool. For more information, see the "Hot Spares" section.
 245 .RE
 246 
 247 .sp
 248 .ne 2
 249 .na
 250 \fB\fBlog\fR\fR
 251 .ad
 252 .RS 10n
 253 A separate-intent log device. If more than one log device is specified, then
 254 writes are load-balanced between devices. Log devices can be mirrored. However,
 255 \fBraidz\fR \fBvdev\fR types are not supported for the intent log. For more
 256 information, see the "Intent Log" section.
 257 .RE
 258 
 259 .sp
 260 .ne 2
 261 .na
 262 \fB\fBcache\fR\fR
 263 .ad
 264 .RS 10n
 265 A device used to cache storage pool data. A cache device cannot be cannot be
 266 configured as a mirror or \fBraidz\fR group. For more information, see the
 267 "Cache Devices" section.
 268 .RE
 269 
 270 .sp
 271 .LP
 272 Virtual devices cannot be nested, so a mirror or \fBraidz\fR virtual device can
 273 only contain files or disks. Mirrors of mirrors (or other combinations) are not
 274 allowed.
 275 .sp
 276 .LP
 277 A pool can have any number of virtual devices at the top of the configuration
 278 (known as "root vdevs"). Data is dynamically distributed across all top-level
 279 devices to balance data among devices. As new virtual devices are added,
 280 \fBZFS\fR automatically places data on the newly available devices.
 281 .sp
 282 .LP
 283 Virtual devices are specified one at a time on the command line, separated by
 284 whitespace. The keywords "mirror" and "raidz" are used to distinguish where a
 285 group ends and another begins. For example, the following creates two root
 286 vdevs, each a mirror of two disks:
 287 .sp
 288 .in +2
 289 .nf
 290 # \fBzpool create mypool mirror c0t0d0 c0t1d0 mirror c1t0d0 c1t1d0\fR
 291 .fi
 292 .in -2
 293 .sp
 294 
 295 .SS "Device Failure and Recovery"
 296 .sp
 297 .LP
 298 \fBZFS\fR supports a rich set of mechanisms for handling device failure and
 299 data corruption. All metadata and data is checksummed, and \fBZFS\fR
 300 automatically repairs bad data from a good copy when corruption is detected.
 301 .sp
 302 .LP
 303 In order to take advantage of these features, a pool must make use of some form
 304 of redundancy, using either mirrored or \fBraidz\fR groups. While \fBZFS\fR
 305 supports running in a non-redundant configuration, where each root vdev is
 306 simply a disk or file, this is strongly discouraged. A single case of bit
 307 corruption can render some or all of your data unavailable.
 308 .sp
 309 .LP
 310 A pool's health status is described by one of three states: online, degraded,
 311 or faulted. An online pool has all devices operating normally. A degraded pool
 312 is one in which one or more devices have failed, but the data is still
 313 available due to a redundant configuration. A faulted pool has corrupted
 314 metadata, or one or more faulted devices, and insufficient replicas to continue
 315 functioning.
 316 .sp
 317 .LP
 318 The health of the top-level vdev, such as mirror or \fBraidz\fR device, is
 319 potentially impacted by the state of its associated vdevs, or component
 320 devices. A top-level vdev or component device is in one of the following
 321 states:
 322 .sp
 323 .ne 2
 324 .na
 325 \fB\fBDEGRADED\fR\fR
 326 .ad
 327 .RS 12n
 328 One or more top-level vdevs is in the degraded state because one or more
 329 component devices are offline. Sufficient replicas exist to continue
 330 functioning.
 331 .sp
 332 One or more component devices is in the degraded or faulted state, but
 333 sufficient replicas exist to continue functioning. The underlying conditions
 334 are as follows:
 335 .RS +4
 336 .TP
 337 .ie t \(bu
 338 .el o
 339 The number of checksum errors exceeds acceptable levels and the device is
 340 degraded as an indication that something may be wrong. \fBZFS\fR continues to
 341 use the device as necessary.
 342 .RE
 343 .RS +4
 344 .TP
 345 .ie t \(bu
 346 .el o
 347 The number of I/O errors exceeds acceptable levels. The device could not be
 348 marked as faulted because there are insufficient replicas to continue
 349 functioning.
 350 .RE
 351 .RE
 352 
 353 .sp
 354 .ne 2
 355 .na
 356 \fB\fBFAULTED\fR\fR
 357 .ad
 358 .RS 12n
 359 One or more top-level vdevs is in the faulted state because one or more
 360 component devices are offline. Insufficient replicas exist to continue
 361 functioning.
 362 .sp
 363 One or more component devices is in the faulted state, and insufficient
 364 replicas exist to continue functioning. The underlying conditions are as
 365 follows:
 366 .RS +4
 367 .TP
 368 .ie t \(bu
 369 .el o
 370 The device could be opened, but the contents did not match expected values.
 371 .RE
 372 .RS +4
 373 .TP
 374 .ie t \(bu
 375 .el o
 376 The number of I/O errors exceeds acceptable levels and the device is faulted to
 377 prevent further use of the device.
 378 .RE
 379 .RE
 380 
 381 .sp
 382 .ne 2
 383 .na
 384 \fB\fBOFFLINE\fR\fR
 385 .ad
 386 .RS 12n
 387 The device was explicitly taken offline by the "\fBzpool offline\fR" command.
 388 .RE
 389 
 390 .sp
 391 .ne 2
 392 .na
 393 \fB\fBONLINE\fR\fR
 394 .ad
 395 .RS 12n
 396 The device is online and functioning.
 397 .RE
 398 
 399 .sp
 400 .ne 2
 401 .na
 402 \fB\fBREMOVED\fR\fR
 403 .ad
 404 .RS 12n
 405 The device was physically removed while the system was running. Device removal
 406 detection is hardware-dependent and may not be supported on all platforms.
 407 .RE
 408 
 409 .sp
 410 .ne 2
 411 .na
 412 \fB\fBUNAVAIL\fR\fR
 413 .ad
 414 .RS 12n
 415 The device could not be opened. If a pool is imported when a device was
 416 unavailable, then the device will be identified by a unique identifier instead
 417 of its path since the path was never correct in the first place.
 418 .RE
 419 
 420 .sp
 421 .LP
 422 If a device is removed and later re-attached to the system, \fBZFS\fR attempts
 423 to put the device online automatically. Device attach detection is
 424 hardware-dependent and might not be supported on all platforms.
 425 .SS "Hot Spares"
 426 .sp
 427 .LP
 428 \fBZFS\fR allows devices to be associated with pools as "hot spares". These
 429 devices are not actively used in the pool, but when an active device fails, it
 430 is automatically replaced by a hot spare. To create a pool with hot spares,
 431 specify a "spare" \fBvdev\fR with any number of devices. For example,
 432 .sp
 433 .in +2
 434 .nf
 435 # zpool create pool mirror c0d0 c1d0 spare c2d0 c3d0
 436 .fi
 437 .in -2
 438 .sp
 439 
 440 .sp
 441 .LP
 442 Spares can be shared across multiple pools, and can be added with the "\fBzpool
 443 add\fR" command and removed with the "\fBzpool remove\fR" command. Once a spare
 444 replacement is initiated, a new "spare" \fBvdev\fR is created within the
 445 configuration that will remain there until the original device is replaced. At
 446 this point, the hot spare becomes available again if another device fails.
 447 .sp
 448 .LP
 449 If a pool has a shared spare that is currently being used, the pool can not be
 450 exported since other pools may use this shared spare, which may lead to
 451 potential data corruption.
 452 .sp
 453 .LP
 454 An in-progress spare replacement can be cancelled by detaching the hot spare.
 455 If the original faulted device is detached, then the hot spare assumes its
 456 place in the configuration, and is removed from the spare list of all active
 457 pools.
 458 .sp
 459 .LP
 460 Spares cannot replace log devices.
 461 .SS "Intent Log"
 462 .sp
 463 .LP
 464 The \fBZFS\fR Intent Log (\fBZIL\fR) satisfies \fBPOSIX\fR requirements for
 465 synchronous transactions. For instance, databases often require their
 466 transactions to be on stable storage devices when returning from a system call.
 467 \fBNFS\fR and other applications can also use \fBfsync\fR() to ensure data
 468 stability. By default, the intent log is allocated from blocks within the main
 469 pool. However, it might be possible to get better performance using separate
 470 intent log devices such as \fBNVRAM\fR or a dedicated disk. For example:
 471 .sp
 472 .in +2
 473 .nf
 474 \fB# zpool create pool c0d0 c1d0 log c2d0\fR
 475 .fi
 476 .in -2
 477 .sp
 478 
 479 .sp
 480 .LP
 481 Multiple log devices can also be specified, and they can be mirrored. See the
 482 EXAMPLES section for an example of mirroring multiple log devices.
 483 .sp
 484 .LP
 485 Log devices can be added, replaced, attached, detached, and imported and
 486 exported as part of the larger pool. Mirrored log devices can be removed by
 487 specifying the top-level mirror for the log.
 488 .SS "Cache Devices"
 489 .sp
 490 .LP
 491 Devices can be added to a storage pool as "cache devices." These devices
 492 provide an additional layer of caching between main memory and disk. For
 493 read-heavy workloads, where the working set size is much larger than what can
 494 be cached in main memory, using cache devices allow much more of this working
 495 set to be served from low latency media. Using cache devices provides the
 496 greatest performance improvement for random read-workloads of mostly static
 497 content.
 498 .sp
 499 .LP
 500 To create a pool with cache devices, specify a "cache" \fBvdev\fR with any
 501 number of devices. For example:
 502 .sp
 503 .in +2
 504 .nf
 505 \fB# zpool create pool c0d0 c1d0 cache c2d0 c3d0\fR
 506 .fi
 507 .in -2
 508 .sp
 509 
 510 .sp
 511 .LP
 512 Cache devices cannot be mirrored or part of a \fBraidz\fR configuration. If a
 513 read error is encountered on a cache device, that read \fBI/O\fR is reissued to
 514 the original storage pool device, which might be part of a mirrored or
 515 \fBraidz\fR configuration.
 516 .sp
 517 .LP
 518 The content of the cache devices is considered volatile, as is the case with
 519 other system caches.
 520 .SS "Properties"
 521 .sp
 522 .LP
 523 Each pool has several properties associated with it. Some properties are
 524 read-only statistics while others are configurable and change the behavior of
 525 the pool. The following are read-only properties:
 526 .sp
 527 .ne 2
 528 .na
 529 \fB\fBavailable\fR\fR
 530 .ad
 531 .RS 20n
 532 Amount of storage available within the pool. This property can also be referred
 533 to by its shortened column name, "avail".
 534 .RE
 535 
 536 .sp
 537 .ne 2
 538 .na
 539 \fB\fBcapacity\fR\fR
 540 .ad
 541 .RS 20n
 542 Percentage of pool space used. This property can also be referred to by its
 543 shortened column name, "cap".
 544 .RE
 545 
 546 .sp
 547 .ne 2
 548 .na
 549 \fB\fBcomment\fR\fR
 550 .ad
 551 .RS 20n
 552 A text string consiting of printable ASCII characters that will be stored
 553 such that it is available even if the pool becomes faulted.  An administrator
 554 can provide additional information about a pool using this property.
 555 .RE
 556 
 557 .sp
 558 .ne 2
 559 .na
 560 \fB\fBhealth\fR\fR
 561 .ad
 562 .RS 20n
 563 The current health of the pool. Health can be "\fBONLINE\fR", "\fBDEGRADED\fR",
 564 "\fBFAULTED\fR", " \fBOFFLINE\fR", "\fBREMOVED\fR", or "\fBUNAVAIL\fR".
 565 .RE
 566 
 567 .sp
 568 .ne 2
 569 .na
 570 \fB\fBguid\fR\fR
 571 .ad
 572 .RS 20n
 573 A unique identifier for the pool.
 574 .RE
 575 
 576 .sp
 577 .ne 2
 578 .na
 579 \fB\fBsize\fR\fR
 580 .ad
 581 .RS 20n
 582 Total size of the storage pool.
 583 .RE
 584 
 585 .sp
 586 .ne 2
 587 .na
 588 \fB\fBused\fR\fR
 589 .ad
 590 .RS 20n
 591 Amount of storage space used within the pool.
 592 .RE
 593 
 594 .sp
 595 .LP
 596 These space usage properties report actual physical space available to the
 597 storage pool. The physical space can be different from the total amount of
 598 space that any contained datasets can actually use. The amount of space used in
 599 a \fBraidz\fR configuration depends on the characteristics of the data being
 600 written. In addition, \fBZFS\fR reserves some space for internal accounting
 601 that the \fBzfs\fR(1M) command takes into account, but the \fBzpool\fR command
 602 does not. For non-full pools of a reasonable size, these effects should be
 603 invisible. For small pools, or pools that are close to being completely full,
 604 these discrepancies may become more noticeable.
 605 .sp
 606 .LP
 607 The following property can be set at creation time and import time:
 608 .sp
 609 .ne 2
 610 .na
 611 \fB\fBaltroot\fR\fR
 612 .ad
 613 .sp .6
 614 .RS 4n
 615 Alternate root directory. If set, this directory is prepended to any mount
 616 points within the pool. This can be used when examining an unknown pool where
 617 the mount points cannot be trusted, or in an alternate boot environment, where
 618 the typical paths are not valid. \fBaltroot\fR is not a persistent property. It
 619 is valid only while the system is up. Setting \fBaltroot\fR defaults to using
 620 \fBcachefile\fR=none, though this may be overridden using an explicit setting.
 621 .RE
 622 
 623 .sp
 624 .LP
 625 The following properties can be set at creation time and import time, and later
 626 changed with the \fBzpool set\fR command:
 627 .sp
 628 .ne 2
 629 .na
 630 \fB\fBautoexpand\fR=\fBon\fR | \fBoff\fR\fR
 631 .ad
 632 .sp .6
 633 .RS 4n
 634 Controls automatic pool expansion when the underlying LUN is grown. If set to
 635 \fBon\fR, the pool will be resized according to the size of the expanded
 636 device. If the device is part of a mirror or \fBraidz\fR then all devices
 637 within that mirror/\fBraidz\fR group must be expanded before the new space is
 638 made available to the pool. The default behavior is \fBoff\fR. This property
 639 can also be referred to by its shortened column name, \fBexpand\fR.
 640 .RE
 641 
 642 .sp
 643 .ne 2
 644 .na
 645 \fB\fBautoreplace\fR=\fBon\fR | \fBoff\fR\fR
 646 .ad
 647 .sp .6
 648 .RS 4n
 649 Controls automatic device replacement. If set to "\fBoff\fR", device
 650 replacement must be initiated by the administrator by using the "\fBzpool
 651 replace\fR" command. If set to "\fBon\fR", any new device, found in the same
 652 physical location as a device that previously belonged to the pool, is
 653 automatically formatted and replaced. The default behavior is "\fBoff\fR". This
 654 property can also be referred to by its shortened column name, "replace".
 655 .RE
 656 
 657 .sp
 658 .ne 2
 659 .na
 660 \fB\fBbootfs\fR=\fIpool\fR/\fIdataset\fR\fR
 661 .ad
 662 .sp .6
 663 .RS 4n
 664 Identifies the default bootable dataset for the root pool. This property is
 665 expected to be set mainly by the installation and upgrade programs.
 666 .RE
 667 
 668 .sp
 669 .ne 2
 670 .na
 671 \fB\fBcachefile\fR=\fIpath\fR | \fBnone\fR\fR
 672 .ad
 673 .sp .6
 674 .RS 4n
 675 Controls the location of where the pool configuration is cached. Discovering
 676 all pools on system startup requires a cached copy of the configuration data
 677 that is stored on the root file system. All pools in this cache are
 678 automatically imported when the system boots. Some environments, such as
 679 install and clustering, need to cache this information in a different location
 680 so that pools are not automatically imported. Setting this property caches the
 681 pool configuration in a different location that can later be imported with
 682 "\fBzpool import -c\fR". Setting it to the special value "\fBnone\fR" creates a
 683 temporary pool that is never cached, and the special value \fB\&''\fR (empty
 684 string) uses the default location.
 685 .sp
 686 Multiple pools can share the same cache file. Because the kernel destroys and
 687 recreates this file when pools are added and removed, care should be taken when
 688 attempting to access this file. When the last pool using a \fBcachefile\fR is
 689 exported or destroyed, the file is removed.
 690 .RE
 691 
 692 .sp
 693 .ne 2
 694 .na
 695 \fB\fBdelegation\fR=\fBon\fR | \fBoff\fR\fR
 696 .ad
 697 .sp .6
 698 .RS 4n
 699 Controls whether a non-privileged user is granted access based on the dataset
 700 permissions defined on the dataset. See \fBzfs\fR(1M) for more information on
 701 \fBZFS\fR delegated administration.
 702 .RE
 703 
 704 .sp
 705 .ne 2
 706 .na
 707 \fB\fBfailmode\fR=\fBwait\fR | \fBcontinue\fR | \fBpanic\fR\fR
 708 .ad
 709 .sp .6
 710 .RS 4n
 711 Controls the system behavior in the event of catastrophic pool failure. This
 712 condition is typically a result of a loss of connectivity to the underlying
 713 storage device(s) or a failure of all devices within the pool. The behavior of
 714 such an event is determined as follows:
 715 .sp
 716 .ne 2
 717 .na
 718 \fB\fBwait\fR\fR
 719 .ad
 720 .RS 12n
 721 Blocks all \fBI/O\fR access until the device connectivity is recovered and the
 722 errors are cleared. This is the default behavior.
 723 .RE
 724 
 725 .sp
 726 .ne 2
 727 .na
 728 \fB\fBcontinue\fR\fR
 729 .ad
 730 .RS 12n
 731 Returns \fBEIO\fR to any new write \fBI/O\fR requests but allows reads to any
 732 of the remaining healthy devices. Any write requests that have yet to be
 733 committed to disk would be blocked.
 734 .RE
 735 
 736 .sp
 737 .ne 2
 738 .na
 739 \fB\fBpanic\fR\fR
 740 .ad
 741 .RS 12n
 742 Prints out a message to the console and generates a system crash dump.
 743 .RE
 744 
 745 .RE
 746 
 747 .sp
 748 .ne 2
 749 .na
 750 \fB\fBlistsnaps\fR=on | off\fR
 751 .ad
 752 .sp .6
 753 .RS 4n
 754 Controls whether information about snapshots associated with this pool is
 755 output when "\fBzfs list\fR" is run without the \fB-t\fR option. The default
 756 value is "off".
 757 .RE
 758 
 759 .sp
 760 .ne 2
 761 .na
 762 \fB\fBversion\fR=\fIversion\fR\fR
 763 .ad
 764 .sp .6
 765 .RS 4n
 766 The current on-disk version of the pool. This can be increased, but never
 767 decreased. The preferred method of updating pools is with the "\fBzpool
 768 upgrade\fR" command, though this property can be used when a specific version
 769 is needed for backwards compatibility. This property can be any number between
 770 1 and the current version reported by "\fBzpool upgrade -v\fR".
 771 .RE
 772 
 773 .SS "Subcommands"
 774 .sp
 775 .LP
 776 All subcommands that modify state are logged persistently to the pool in their
 777 original form.
 778 .sp
 779 .LP
 780 The \fBzpool\fR command provides subcommands to create and destroy storage
 781 pools, add capacity to storage pools, and provide information about the storage
 782 pools. The following subcommands are supported:
 783 .sp
 784 .ne 2
 785 .na
 786 \fB\fBzpool\fR \fB-?\fR\fR
 787 .ad
 788 .sp .6
 789 .RS 4n
 790 Displays a help message.
 791 .RE
 792 
 793 .sp
 794 .ne 2
 795 .na
 796 \fB\fBzpool add\fR [\fB-fn\fR] \fIpool\fR \fIvdev\fR ...\fR
 797 .ad
 798 .sp .6
 799 .RS 4n
 800 Adds the specified virtual devices to the given pool. The \fIvdev\fR
 801 specification is described in the "Virtual Devices" section. The behavior of
 802 the \fB-f\fR option, and the device checks performed are described in the
 803 "zpool create" subcommand.
 804 .sp
 805 .ne 2
 806 .na
 807 \fB\fB-f\fR\fR
 808 .ad
 809 .RS 6n
 810 Forces use of \fBvdev\fRs, even if they appear in use or specify a conflicting
 811 replication level. Not all devices can be overridden in this manner.
 812 .RE
 813 
 814 .sp
 815 .ne 2
 816 .na
 817 \fB\fB-n\fR\fR
 818 .ad
 819 .RS 6n
 820 Displays the configuration that would be used without actually adding the
 821 \fBvdev\fRs. The actual pool creation can still fail due to insufficient
 822 privileges or device sharing.
 823 .RE
 824 
 825 Do not add a disk that is currently configured as a quorum device to a zpool.
 826 After a disk is in the pool, that disk can then be configured as a quorum
 827 device.
 828 .RE
 829 
 830 .sp
 831 .ne 2
 832 .na
 833 \fB\fBzpool attach\fR [\fB-f\fR] \fIpool\fR \fIdevice\fR \fInew_device\fR\fR
 834 .ad
 835 .sp .6
 836 .RS 4n
 837 Attaches \fInew_device\fR to an existing \fBzpool\fR device. The existing
 838 device cannot be part of a \fBraidz\fR configuration. If \fIdevice\fR is not
 839 currently part of a mirrored configuration, \fIdevice\fR automatically
 840 transforms into a two-way mirror of \fIdevice\fR and \fInew_device\fR. If
 841 \fIdevice\fR is part of a two-way mirror, attaching \fInew_device\fR creates a
 842 three-way mirror, and so on. In either case, \fInew_device\fR begins to
 843 resilver immediately.
 844 .sp
 845 .ne 2
 846 .na
 847 \fB\fB-f\fR\fR
 848 .ad
 849 .RS 6n
 850 Forces use of \fInew_device\fR, even if its appears to be in use. Not all
 851 devices can be overridden in this manner.
 852 .RE
 853 
 854 .RE
 855 
 856 .sp
 857 .ne 2
 858 .na
 859 \fB\fBzpool clear\fR \fIpool\fR [\fIdevice\fR] ...\fR
 860 .ad
 861 .sp .6
 862 .RS 4n
 863 Clears device errors in a pool. If no arguments are specified, all device
 864 errors within the pool are cleared. If one or more devices is specified, only
 865 those errors associated with the specified device or devices are cleared.
 866 .RE
 867 
 868 .sp
 869 .ne 2
 870 .na
 871 \fB\fBzpool create\fR [\fB-fn\fR] [\fB-o\fR \fIproperty=value\fR] ... [\fB-O\fR
 872 \fIfile-system-property=value\fR] ... [\fB-m\fR \fImountpoint\fR] [\fB-R\fR
 873 \fIroot\fR] \fIpool\fR \fIvdev\fR ...\fR
 874 .ad
 875 .sp .6
 876 .RS 4n
 877 Creates a new storage pool containing the virtual devices specified on the
 878 command line. The pool name must begin with a letter, and can only contain
 879 alphanumeric characters as well as underscore ("_"), dash ("-"), and period
 880 ("."). The pool names "mirror", "raidz", "spare" and "log" are reserved, as are
 881 names beginning with the pattern "c[0-9]". The \fBvdev\fR specification is
 882 described in the "Virtual Devices" section.
 883 .sp
 884 The command verifies that each device specified is accessible and not currently
 885 in use by another subsystem. There are some uses, such as being currently
 886 mounted, or specified as the dedicated dump device, that prevents a device from
 887 ever being used by \fBZFS\fR. Other uses, such as having a preexisting
 888 \fBUFS\fR file system, can be overridden with the \fB-f\fR option.
 889 .sp
 890 The command also checks that the replication strategy for the pool is
 891 consistent. An attempt to combine redundant and non-redundant storage in a
 892 single pool, or to mix disks and files, results in an error unless \fB-f\fR is
 893 specified. The use of differently sized devices within a single \fBraidz\fR or
 894 mirror group is also flagged as an error unless \fB-f\fR is specified.
 895 .sp
 896 Unless the \fB-R\fR option is specified, the default mount point is
 897 "/\fIpool\fR". The mount point must not exist or must be empty, or else the
 898 root dataset cannot be mounted. This can be overridden with the \fB-m\fR
 899 option.
 900 .sp
 901 .ne 2
 902 .na
 903 \fB\fB-f\fR\fR
 904 .ad
 905 .sp .6
 906 .RS 4n
 907 Forces use of \fBvdev\fRs, even if they appear in use or specify a conflicting
 908 replication level. Not all devices can be overridden in this manner.
 909 .RE
 910 
 911 .sp
 912 .ne 2
 913 .na
 914 \fB\fB-n\fR\fR
 915 .ad
 916 .sp .6
 917 .RS 4n
 918 Displays the configuration that would be used without actually creating the
 919 pool. The actual pool creation can still fail due to insufficient privileges or
 920 device sharing.
 921 .RE
 922 
 923 .sp
 924 .ne 2
 925 .na
 926 \fB\fB-o\fR \fIproperty=value\fR [\fB-o\fR \fIproperty=value\fR] ...\fR
 927 .ad
 928 .sp .6
 929 .RS 4n
 930 Sets the given pool properties. See the "Properties" section for a list of
 931 valid properties that can be set.
 932 .RE
 933 
 934 .sp
 935 .ne 2
 936 .na
 937 \fB\fB-O\fR \fIfile-system-property=value\fR\fR
 938 .ad
 939 .br
 940 .na
 941 \fB[\fB-O\fR \fIfile-system-property=value\fR] ...\fR
 942 .ad
 943 .sp .6
 944 .RS 4n
 945 Sets the given file system properties in the root file system of the pool. See
 946 the "Properties" section of \fBzfs\fR(1M) for a list of valid properties that
 947 can be set.
 948 .RE
 949 
 950 .sp
 951 .ne 2
 952 .na
 953 \fB\fB-R\fR \fIroot\fR\fR
 954 .ad
 955 .sp .6
 956 .RS 4n
 957 Equivalent to "-o cachefile=none,altroot=\fIroot\fR"
 958 .RE
 959 
 960 .sp
 961 .ne 2
 962 .na
 963 \fB\fB-m\fR \fImountpoint\fR\fR
 964 .ad
 965 .sp .6
 966 .RS 4n
 967 Sets the mount point for the root dataset. The default mount point is
 968 "/\fIpool\fR" or "\fBaltroot\fR/\fIpool\fR" if \fBaltroot\fR is specified. The
 969 mount point must be an absolute path, "\fBlegacy\fR", or "\fBnone\fR". For more
 970 information on dataset mount points, see \fBzfs\fR(1M).
 971 .RE
 972 
 973 .RE
 974 
 975 .sp
 976 .ne 2
 977 .na
 978 \fB\fBzpool destroy\fR [\fB-f\fR] \fIpool\fR\fR
 979 .ad
 980 .sp .6
 981 .RS 4n
 982 Destroys the given pool, freeing up any devices for other use. This command
 983 tries to unmount any active datasets before destroying the pool.
 984 .sp
 985 .ne 2
 986 .na
 987 \fB\fB-f\fR\fR
 988 .ad
 989 .RS 6n
 990 Forces any active datasets contained within the pool to be unmounted.
 991 .RE
 992 
 993 .RE
 994 
 995 .sp
 996 .ne 2
 997 .na
 998 \fB\fBzpool detach\fR \fIpool\fR \fIdevice\fR\fR
 999 .ad
1000 .sp .6
1001 .RS 4n
1002 Detaches \fIdevice\fR from a mirror. The operation is refused if there are no
1003 other valid replicas of the data.
1004 .RE
1005 
1006 .sp
1007 .ne 2
1008 .na
1009 \fB\fBzpool export\fR [\fB-f\fR] \fIpool\fR ...\fR
1010 .ad
1011 .sp .6
1012 .RS 4n
1013 Exports the given pools from the system. All devices are marked as exported,
1014 but are still considered in use by other subsystems. The devices can be moved
1015 between systems (even those of different endianness) and imported as long as a
1016 sufficient number of devices are present.
1017 .sp
1018 Before exporting the pool, all datasets within the pool are unmounted. A pool
1019 can not be exported if it has a shared spare that is currently being used.
1020 .sp
1021 For pools to be portable, you must give the \fBzpool\fR command whole disks,
1022 not just slices, so that \fBZFS\fR can label the disks with portable \fBEFI\fR
1023 labels. Otherwise, disk drivers on platforms of different endianness will not
1024 recognize the disks.
1025 .sp
1026 .ne 2
1027 .na
1028 \fB\fB-f\fR\fR
1029 .ad
1030 .RS 6n
1031 Forcefully unmount all datasets, using the "\fBunmount -f\fR" command.
1032 .sp
1033 This command will forcefully export the pool even if it has a shared spare that
1034 is currently being used. This may lead to potential data corruption.
1035 .RE
1036 
1037 .RE
1038 
1039 .sp
1040 .ne 2
1041 .na
1042 \fB\fBzpool get\fR "\fIall\fR" | \fIproperty\fR[,...] \fIpool\fR ...\fR
1043 .ad
1044 .sp .6
1045 .RS 4n
1046 Retrieves the given list of properties (or all properties if "\fBall\fR" is
1047 used) for the specified storage pool(s). These properties are displayed with
1048 the following fields:
1049 .sp
1050 .in +2
1051 .nf
1052        name          Name of storage pool
1053         property      Property name
1054         value         Property value
1055         source        Property source, either 'default' or 'local'.
1056 .fi
1057 .in -2
1058 .sp
1059 
1060 See the "Properties" section for more information on the available pool
1061 properties.
1062 .RE
1063 
1064 .sp
1065 .ne 2
1066 .na
1067 \fB\fBzpool history\fR [\fB-il\fR] [\fIpool\fR] ...\fR
1068 .ad
1069 .sp .6
1070 .RS 4n
1071 Displays the command history of the specified pools or all pools if no pool is
1072 specified.
1073 .sp
1074 .ne 2
1075 .na
1076 \fB\fB-i\fR\fR
1077 .ad
1078 .RS 6n
1079 Displays internally logged \fBZFS\fR events in addition to user initiated
1080 events.
1081 .RE
1082 
1083 .sp
1084 .ne 2
1085 .na
1086 \fB\fB-l\fR\fR
1087 .ad
1088 .RS 6n
1089 Displays log records in long format, which in addition to standard format
1090 includes, the user name, the hostname, and the zone in which the operation was
1091 performed.
1092 .RE
1093 
1094 .RE
1095 
1096 .sp
1097 .ne 2
1098 .na
1099 \fB\fBzpool import\fR [\fB-d\fR \fIdir\fR | \fB-c\fR \fIcachefile\fR]
1100 [\fB-D\fR]\fR
1101 .ad
1102 .sp .6
1103 .RS 4n
1104 Lists pools available to import. If the \fB-d\fR option is not specified, this
1105 command searches for devices in "/dev/dsk". The \fB-d\fR option can be
1106 specified multiple times, and all directories are searched. If the device
1107 appears to be part of an exported pool, this command displays a summary of the
1108 pool with the name of the pool, a numeric identifier, as well as the \fIvdev\fR
1109 layout and current health of the device for each device or file. Destroyed
1110 pools, pools that were previously destroyed with the "\fBzpool destroy\fR"
1111 command, are not listed unless the \fB-D\fR option is specified.
1112 .sp
1113 The numeric identifier is unique, and can be used instead of the pool name when
1114 multiple exported pools of the same name are available.
1115 .sp
1116 .ne 2
1117 .na
1118 \fB\fB-c\fR \fIcachefile\fR\fR
1119 .ad
1120 .RS 16n
1121 Reads configuration from the given \fBcachefile\fR that was created with the
1122 "\fBcachefile\fR" pool property. This \fBcachefile\fR is used instead of
1123 searching for devices.
1124 .RE
1125 
1126 .sp
1127 .ne 2
1128 .na
1129 \fB\fB-d\fR \fIdir\fR\fR
1130 .ad
1131 .RS 16n
1132 Searches for devices or files in \fIdir\fR. The \fB-d\fR option can be
1133 specified multiple times.
1134 .RE
1135 
1136 .sp
1137 .ne 2
1138 .na
1139 \fB\fB-D\fR\fR
1140 .ad
1141 .RS 16n
1142 Lists destroyed pools only.
1143 .RE
1144 
1145 .RE
1146 
1147 .sp
1148 .ne 2
1149 .na
1150 \fB\fBzpool import\fR [\fB-o\fR \fImntopts\fR] [ \fB-o\fR
1151 \fIproperty\fR=\fIvalue\fR] ... [\fB-d\fR \fIdir\fR | \fB-c\fR \fIcachefile\fR]
1152 [\fB-D\fR] [\fB-f\fR] [\fB-R\fR \fIroot\fR] \fB-a\fR\fR
1153 .ad
1154 .sp .6
1155 .RS 4n
1156 Imports all pools found in the search directories. Identical to the previous
1157 command, except that all pools with a sufficient number of devices available
1158 are imported. Destroyed pools, pools that were previously destroyed with the
1159 "\fBzpool destroy\fR" command, will not be imported unless the \fB-D\fR option
1160 is specified.
1161 .sp
1162 .ne 2
1163 .na
1164 \fB\fB-o\fR \fImntopts\fR\fR
1165 .ad
1166 .RS 21n
1167 Comma-separated list of mount options to use when mounting datasets within the
1168 pool. See \fBzfs\fR(1M) for a description of dataset properties and mount
1169 options.
1170 .RE
1171 
1172 .sp
1173 .ne 2
1174 .na
1175 \fB\fB-o\fR \fIproperty=value\fR\fR
1176 .ad
1177 .RS 21n
1178 Sets the specified property on the imported pool. See the "Properties" section
1179 for more information on the available pool properties.
1180 .RE
1181 
1182 .sp
1183 .ne 2
1184 .na
1185 \fB\fB-c\fR \fIcachefile\fR\fR
1186 .ad
1187 .RS 21n
1188 Reads configuration from the given \fBcachefile\fR that was created with the
1189 "\fBcachefile\fR" pool property. This \fBcachefile\fR is used instead of
1190 searching for devices.
1191 .RE
1192 
1193 .sp
1194 .ne 2
1195 .na
1196 \fB\fB-d\fR \fIdir\fR\fR
1197 .ad
1198 .RS 21n
1199 Searches for devices or files in \fIdir\fR. The \fB-d\fR option can be
1200 specified multiple times. This option is incompatible with the \fB-c\fR option.
1201 .RE
1202 
1203 .sp
1204 .ne 2
1205 .na
1206 \fB\fB-D\fR\fR
1207 .ad
1208 .RS 21n
1209 Imports destroyed pools only. The \fB-f\fR option is also required.
1210 .RE
1211 
1212 .sp
1213 .ne 2
1214 .na
1215 \fB\fB-f\fR\fR
1216 .ad
1217 .RS 21n
1218 Forces import, even if the pool appears to be potentially active.
1219 .RE
1220 
1221 .sp
1222 .ne 2
1223 .na
1224 \fB\fB-a\fR\fR
1225 .ad
1226 .RS 21n
1227 Searches for and imports all pools found.
1228 .RE
1229 
1230 .sp
1231 .ne 2
1232 .na
1233 \fB\fB-R\fR \fIroot\fR\fR
1234 .ad
1235 .RS 21n
1236 Sets the "\fBcachefile\fR" property to "\fBnone\fR" and the "\fIaltroot\fR"
1237 property to "\fIroot\fR".
1238 .RE
1239 
1240 .RE
1241 
1242 .sp
1243 .ne 2
1244 .na
1245 \fB\fBzpool import\fR [\fB-o\fR \fImntopts\fR] [ \fB-o\fR
1246 \fIproperty\fR=\fIvalue\fR] ... [\fB-d\fR \fIdir\fR | \fB-c\fR \fIcachefile\fR]
1247 [\fB-D\fR] [\fB-f\fR] [\fB-R\fR \fIroot\fR] \fIpool\fR | \fIid\fR
1248 [\fInewpool\fR]\fR
1249 .ad
1250 .sp .6
1251 .RS 4n
1252 Imports a specific pool. A pool can be identified by its name or the numeric
1253 identifier. If \fInewpool\fR is specified, the pool is imported using the name
1254 \fInewpool\fR. Otherwise, it is imported with the same name as its exported
1255 name.
1256 .sp
1257 If a device is removed from a system without running "\fBzpool export\fR"
1258 first, the device appears as potentially active. It cannot be determined if
1259 this was a failed export, or whether the device is really in use from another
1260 host. To import a pool in this state, the \fB-f\fR option is required.
1261 .sp
1262 .ne 2
1263 .na
1264 \fB\fB-o\fR \fImntopts\fR\fR
1265 .ad
1266 .sp .6
1267 .RS 4n
1268 Comma-separated list of mount options to use when mounting datasets within the
1269 pool. See \fBzfs\fR(1M) for a description of dataset properties and mount
1270 options.
1271 .RE
1272 
1273 .sp
1274 .ne 2
1275 .na
1276 \fB\fB-o\fR \fIproperty=value\fR\fR
1277 .ad
1278 .sp .6
1279 .RS 4n
1280 Sets the specified property on the imported pool. See the "Properties" section
1281 for more information on the available pool properties.
1282 .RE
1283 
1284 .sp
1285 .ne 2
1286 .na
1287 \fB\fB-c\fR \fIcachefile\fR\fR
1288 .ad
1289 .sp .6
1290 .RS 4n
1291 Reads configuration from the given \fBcachefile\fR that was created with the
1292 "\fBcachefile\fR" pool property. This \fBcachefile\fR is used instead of
1293 searching for devices.
1294 .RE
1295 
1296 .sp
1297 .ne 2
1298 .na
1299 \fB\fB-d\fR \fIdir\fR\fR
1300 .ad
1301 .sp .6
1302 .RS 4n
1303 Searches for devices or files in \fIdir\fR. The \fB-d\fR option can be
1304 specified multiple times. This option is incompatible with the \fB-c\fR option.
1305 .RE
1306 
1307 .sp
1308 .ne 2
1309 .na
1310 \fB\fB-D\fR\fR
1311 .ad
1312 .sp .6
1313 .RS 4n
1314 Imports destroyed pool. The \fB-f\fR option is also required.
1315 .RE
1316 
1317 .sp
1318 .ne 2
1319 .na
1320 \fB\fB-f\fR\fR
1321 .ad
1322 .sp .6
1323 .RS 4n
1324 Forces import, even if the pool appears to be potentially active.
1325 .RE
1326 
1327 .sp
1328 .ne 2
1329 .na
1330 \fB\fB-R\fR \fIroot\fR\fR
1331 .ad
1332 .sp .6
1333 .RS 4n
1334 Sets the "\fBcachefile\fR" property to "\fBnone\fR" and the "\fIaltroot\fR"
1335 property to "\fIroot\fR".
1336 .RE
1337 
1338 .RE
1339 
1340 .sp
1341 .ne 2
1342 .na
1343 \fB\fBzpool iostat\fR [\fB-T\fR \fBu\fR | \fBd\fR] [\fB-v\fR] [\fIpool\fR] ...
1344 [\fIinterval\fR[\fIcount\fR]]\fR
1345 .ad
1346 .sp .6
1347 .RS 4n
1348 Displays \fBI/O\fR statistics for the given pools. When given an interval, the
1349 statistics are printed every \fIinterval\fR seconds until \fBCtrl-C\fR is
1350 pressed. If no \fIpools\fR are specified, statistics for every pool in the
1351 system is shown. If \fIcount\fR is specified, the command exits after
1352 \fIcount\fR reports are printed.
1353 .sp
1354 .ne 2
1355 .na
1356 \fB\fB-T\fR \fBu\fR | \fBd\fR\fR
1357 .ad
1358 .RS 12n
1359 Display a time stamp.
1360 .sp
1361 Specify \fBu\fR for a printed representation of the internal representation of
1362 time. See \fBtime\fR(2). Specify \fBd\fR for standard date format. See
1363 \fBdate\fR(1).
1364 .RE
1365 
1366 .sp
1367 .ne 2
1368 .na
1369 \fB\fB-v\fR\fR
1370 .ad
1371 .RS 12n
1372 Verbose statistics. Reports usage statistics for individual \fIvdevs\fR within
1373 the pool, in addition to the pool-wide statistics.
1374 .RE
1375 
1376 .RE
1377 
1378 .sp
1379 .ne 2
1380 .na
1381 \fB\fBzpool list\fR [\fB-H\fR] [\fB-o\fR \fIprops\fR[,...]] [\fIpool\fR] ...\fR
1382 .ad
1383 .sp .6
1384 .RS 4n
1385 Lists the given pools along with a health status and space usage. When given no
1386 arguments, all pools in the system are listed.
1387 .sp
1388 .ne 2
1389 .na
1390 \fB\fB-H\fR\fR
1391 .ad
1392 .RS 12n
1393 Scripted mode. Do not display headers, and separate fields by a single tab
1394 instead of arbitrary space.
1395 .RE
1396 
1397 .sp
1398 .ne 2
1399 .na
1400 \fB\fB-o\fR \fIprops\fR\fR
1401 .ad
1402 .RS 12n
1403 Comma-separated list of properties to display. See the "Properties" section for
1404 a list of valid properties. The default list is "name, size, used, available,
1405 capacity, health, altroot"
1406 .RE
1407 
1408 .RE
1409 
1410 .sp
1411 .ne 2
1412 .na
1413 \fB\fBzpool offline\fR [\fB-t\fR] \fIpool\fR \fIdevice\fR ...\fR
1414 .ad
1415 .sp .6
1416 .RS 4n
1417 Takes the specified physical device offline. While the \fIdevice\fR is offline,
1418 no attempt is made to read or write to the device.
1419 .sp
1420 This command is not applicable to spares or cache devices.
1421 .sp
1422 .ne 2
1423 .na
1424 \fB\fB-t\fR\fR
1425 .ad
1426 .RS 6n
1427 Temporary. Upon reboot, the specified physical device reverts to its previous
1428 state.
1429 .RE
1430 
1431 .RE
1432 
1433 .sp
1434 .ne 2
1435 .na
1436 \fB\fBzpool online\fR [\fB-e\fR] \fIpool\fR \fIdevice\fR...\fR
1437 .ad
1438 .sp .6
1439 .RS 4n
1440 Brings the specified physical device online.
1441 .sp
1442 This command is not applicable to spares or cache devices.
1443 .sp
1444 .ne 2
1445 .na
1446 \fB\fB-e\fR\fR
1447 .ad
1448 .RS 6n
1449 Expand the device to use all available space. If the device is part of a mirror
1450 or \fBraidz\fR then all devices must be expanded before the new space will
1451 become available to the pool.
1452 .RE
1453 
1454 .RE
1455 
1456 .sp
1457 .ne 2
1458 .na
1459 \fB\fBzpool reguid\fR \fIpool\fR
1460 .ad
1461 .sp .6
1462 .RS 4n
1463 Generates a new unique identifier for the pool.  You must ensure that all devices in this pool are online and
1464 healthy before performing this action.
1465 .RE
1466 
1467 .sp
1468 .ne 2
1469 .na
1470 \fB\fBzpool remove\fR \fIpool\fR \fIdevice\fR ...\fR
1471 .ad
1472 .sp .6
1473 .RS 4n
1474 Removes the specified device from the pool. This command currently only
1475 supports removing hot spares, cache, and log devices. A mirrored log device can
1476 be removed by specifying the top-level mirror for the log. Non-log devices that
1477 are part of a mirrored configuration can be removed using the \fBzpool
1478 detach\fR command. Non-redundant and \fBraidz\fR devices cannot be removed from
1479 a pool.
1480 .RE
1481 
1482 .sp
1483 .ne 2
1484 .na
1485 \fB\fBzpool replace\fR [\fB-f\fR] \fIpool\fR \fIold_device\fR
1486 [\fInew_device\fR]\fR
1487 .ad
1488 .sp .6
1489 .RS 4n
1490 Replaces \fIold_device\fR with \fInew_device\fR. This is equivalent to
1491 attaching \fInew_device\fR, waiting for it to resilver, and then detaching
1492 \fIold_device\fR.
1493 .sp
1494 The size of \fInew_device\fR must be greater than or equal to the minimum size
1495 of all the devices in a mirror or \fBraidz\fR configuration.
1496 .sp
1497 \fInew_device\fR is required if the pool is not redundant. If \fInew_device\fR
1498 is not specified, it defaults to \fIold_device\fR. This form of replacement is
1499 useful after an existing disk has failed and has been physically replaced. In
1500 this case, the new disk may have the same \fB/dev/dsk\fR path as the old
1501 device, even though it is actually a different disk. \fBZFS\fR recognizes this.
1502 .sp
1503 .ne 2
1504 .na
1505 \fB\fB-f\fR\fR
1506 .ad
1507 .RS 6n
1508 Forces use of \fInew_device\fR, even if its appears to be in use. Not all
1509 devices can be overridden in this manner.
1510 .RE
1511 
1512 .RE
1513 
1514 .sp
1515 .ne 2
1516 .na
1517 \fB\fBzpool scrub\fR [\fB-s\fR] \fIpool\fR ...\fR
1518 .ad
1519 .sp .6
1520 .RS 4n
1521 Begins a scrub. The scrub examines all data in the specified pools to verify
1522 that it checksums correctly. For replicated (mirror or \fBraidz\fR) devices,
1523 \fBZFS\fR automatically repairs any damage discovered during the scrub. The
1524 "\fBzpool status\fR" command reports the progress of the scrub and summarizes
1525 the results of the scrub upon completion.
1526 .sp
1527 Scrubbing and resilvering are very similar operations. The difference is that
1528 resilvering only examines data that \fBZFS\fR knows to be out of date (for
1529 example, when attaching a new device to a mirror or replacing an existing
1530 device), whereas scrubbing examines all data to discover silent errors due to
1531 hardware faults or disk failure.
1532 .sp
1533 Because scrubbing and resilvering are \fBI/O\fR-intensive operations, \fBZFS\fR
1534 only allows one at a time. If a scrub is already in progress, the "\fBzpool
1535 scrub\fR" command terminates it and starts a new scrub. If a resilver is in
1536 progress, \fBZFS\fR does not allow a scrub to be started until the resilver
1537 completes.
1538 .sp
1539 .ne 2
1540 .na
1541 \fB\fB-s\fR\fR
1542 .ad
1543 .RS 6n
1544 Stop scrubbing.
1545 .RE
1546 
1547 .RE
1548 
1549 .sp
1550 .ne 2
1551 .na
1552 \fB\fBzpool set\fR \fIproperty\fR=\fIvalue\fR \fIpool\fR\fR
1553 .ad
1554 .sp .6
1555 .RS 4n
1556 Sets the given property on the specified pool. See the "Properties" section for
1557 more information on what properties can be set and acceptable values.
1558 .RE
1559 
1560 .sp
1561 .ne 2
1562 .na
1563 \fB\fBzpool status\fR [\fB-xv\fR] [\fIpool\fR] ...\fR
1564 .ad
1565 .sp .6
1566 .RS 4n
1567 Displays the detailed health status for the given pools. If no \fIpool\fR is
1568 specified, then the status of each pool in the system is displayed. For more
1569 information on pool and device health, see the "Device Failure and Recovery"
1570 section.
1571 .sp
1572 If a scrub or resilver is in progress, this command reports the percentage done
1573 and the estimated time to completion. Both of these are only approximate,
1574 because the amount of data in the pool and the other workloads on the system
1575 can change.
1576 .sp
1577 .ne 2
1578 .na
1579 \fB\fB-x\fR\fR
1580 .ad
1581 .RS 6n
1582 Only display status for pools that are exhibiting errors or are otherwise
1583 unavailable.
1584 .RE
1585 
1586 .sp
1587 .ne 2
1588 .na
1589 \fB\fB-v\fR\fR
1590 .ad
1591 .RS 6n
1592 Displays verbose data error information, printing out a complete list of all
1593 data errors since the last complete pool scrub.
1594 .RE
1595 
1596 .RE
1597 
1598 .sp
1599 .ne 2
1600 .na
1601 \fB\fBzpool upgrade\fR\fR
1602 .ad
1603 .sp .6
1604 .RS 4n
1605 Displays all pools formatted using a different \fBZFS\fR on-disk version. Older
1606 versions can continue to be used, but some features may not be available. These
1607 pools can be upgraded using "\fBzpool upgrade -a\fR". Pools that are formatted
1608 with a more recent version are also displayed, although these pools will be
1609 inaccessible on the system.
1610 .RE
1611 
1612 .sp
1613 .ne 2
1614 .na
1615 \fB\fBzpool upgrade\fR \fB-v\fR\fR
1616 .ad
1617 .sp .6
1618 .RS 4n
1619 Displays \fBZFS\fR versions supported by the current software. The current
1620 \fBZFS\fR versions and all previous supported versions are displayed, along
1621 with an explanation of the features provided with each version.
1622 .RE
1623 
1624 .sp
1625 .ne 2
1626 .na
1627 \fB\fBzpool upgrade\fR [\fB-V\fR \fIversion\fR] \fB-a\fR | \fIpool\fR ...\fR
1628 .ad
1629 .sp .6
1630 .RS 4n
1631 Upgrades the given pool to the latest on-disk version. Once this is done, the
1632 pool will no longer be accessible on systems running older versions of the
1633 software.
1634 .sp
1635 .ne 2
1636 .na
1637 \fB\fB-a\fR\fR
1638 .ad
1639 .RS 14n
1640 Upgrades all pools.
1641 .RE
1642 
1643 .sp
1644 .ne 2
1645 .na
1646 \fB\fB-V\fR \fIversion\fR\fR
1647 .ad
1648 .RS 14n
1649 Upgrade to the specified version. If the \fB-V\fR flag is not specified, the
1650 pool is upgraded to the most recent version. This option can only be used to
1651 increase the version number, and only up to the most recent version supported
1652 by this software.
1653 .RE
1654 
1655 .RE
1656 
1657 .SH EXAMPLES
1658 .LP
1659 \fBExample 1 \fRCreating a RAID-Z Storage Pool
1660 .sp
1661 .LP
1662 The following command creates a pool with a single \fBraidz\fR root \fIvdev\fR
1663 that consists of six disks.
1664 
1665 .sp
1666 .in +2
1667 .nf
1668 # \fBzpool create tank raidz c0t0d0 c0t1d0 c0t2d0 c0t3d0 c0t4d0 c0t5d0\fR
1669 .fi
1670 .in -2
1671 .sp
1672 
1673 .LP
1674 \fBExample 2 \fRCreating a Mirrored Storage Pool
1675 .sp
1676 .LP
1677 The following command creates a pool with two mirrors, where each mirror
1678 contains two disks.
1679 
1680 .sp
1681 .in +2
1682 .nf
1683 # \fBzpool create tank mirror c0t0d0 c0t1d0 mirror c0t2d0 c0t3d0\fR
1684 .fi
1685 .in -2
1686 .sp
1687 
1688 .LP
1689 \fBExample 3 \fRCreating a ZFS Storage Pool by Using Slices
1690 .sp
1691 .LP
1692 The following command creates an unmirrored pool using two disk slices.
1693 
1694 .sp
1695 .in +2
1696 .nf
1697 # \fBzpool create tank /dev/dsk/c0t0d0s1 c0t1d0s4\fR
1698 .fi
1699 .in -2
1700 .sp
1701 
1702 .LP
1703 \fBExample 4 \fRCreating a ZFS Storage Pool by Using Files
1704 .sp
1705 .LP
1706 The following command creates an unmirrored pool using files. While not
1707 recommended, a pool based on files can be useful for experimental purposes.
1708 
1709 .sp
1710 .in +2
1711 .nf
1712 # \fBzpool create tank /path/to/file/a /path/to/file/b\fR
1713 .fi
1714 .in -2
1715 .sp
1716 
1717 .LP
1718 \fBExample 5 \fRAdding a Mirror to a ZFS Storage Pool
1719 .sp
1720 .LP
1721 The following command adds two mirrored disks to the pool "\fItank\fR",
1722 assuming the pool is already made up of two-way mirrors. The additional space
1723 is immediately available to any datasets within the pool.
1724 
1725 .sp
1726 .in +2
1727 .nf
1728 # \fBzpool add tank mirror c1t0d0 c1t1d0\fR
1729 .fi
1730 .in -2
1731 .sp
1732 
1733 .LP
1734 \fBExample 6 \fRListing Available ZFS Storage Pools
1735 .sp
1736 .LP
1737 The following command lists all available pools on the system. In this case,
1738 the pool \fIzion\fR is faulted due to a missing device.
1739 
1740 .sp
1741 .LP
1742 The results from this command are similar to the following:
1743 
1744 .sp
1745 .in +2
1746 .nf
1747 # \fBzpool list\fR
1748      NAME              SIZE    USED   AVAIL    CAP  HEALTH     ALTROOT
1749      pool             67.5G   2.92M   67.5G     0%  ONLINE     -
1750      tank             67.5G   2.92M   67.5G     0%  ONLINE     -
1751      zion                 -       -       -     0%  FAULTED    -
1752 .fi
1753 .in -2
1754 .sp
1755 
1756 .LP
1757 \fBExample 7 \fRDestroying a ZFS Storage Pool
1758 .sp
1759 .LP
1760 The following command destroys the pool "\fItank\fR" and any datasets contained
1761 within.
1762 
1763 .sp
1764 .in +2
1765 .nf
1766 # \fBzpool destroy -f tank\fR
1767 .fi
1768 .in -2
1769 .sp
1770 
1771 .LP
1772 \fBExample 8 \fRExporting a ZFS Storage Pool
1773 .sp
1774 .LP
1775 The following command exports the devices in pool \fItank\fR so that they can
1776 be relocated or later imported.
1777 
1778 .sp
1779 .in +2
1780 .nf
1781 # \fBzpool export tank\fR
1782 .fi
1783 .in -2
1784 .sp
1785 
1786 .LP
1787 \fBExample 9 \fRImporting a ZFS Storage Pool
1788 .sp
1789 .LP
1790 The following command displays available pools, and then imports the pool
1791 "tank" for use on the system.
1792 
1793 .sp
1794 .LP
1795 The results from this command are similar to the following:
1796 
1797 .sp
1798 .in +2
1799 .nf
1800 # \fBzpool import\fR
1801   pool: tank
1802     id: 15451357997522795478
1803  state: ONLINE
1804 action: The pool can be imported using its name or numeric identifier.
1805 config:
1806 
1807         tank        ONLINE
1808           mirror    ONLINE
1809             c1t2d0  ONLINE
1810             c1t3d0  ONLINE
1811 
1812 # \fBzpool import tank\fR
1813 .fi
1814 .in -2
1815 .sp
1816 
1817 .LP
1818 \fBExample 10 \fRUpgrading All ZFS Storage Pools to the Current Version
1819 .sp
1820 .LP
1821 The following command upgrades all ZFS Storage pools to the current version of
1822 the software.
1823 
1824 .sp
1825 .in +2
1826 .nf
1827 # \fBzpool upgrade -a\fR
1828 This system is currently running ZFS version 2.
1829 .fi
1830 .in -2
1831 .sp
1832 
1833 .LP
1834 \fBExample 11 \fRManaging Hot Spares
1835 .sp
1836 .LP
1837 The following command creates a new pool with an available hot spare:
1838 
1839 .sp
1840 .in +2
1841 .nf
1842 # \fBzpool create tank mirror c0t0d0 c0t1d0 spare c0t2d0\fR
1843 .fi
1844 .in -2
1845 .sp
1846 
1847 .sp
1848 .LP
1849 If one of the disks were to fail, the pool would be reduced to the degraded
1850 state. The failed device can be replaced using the following command:
1851 
1852 .sp
1853 .in +2
1854 .nf
1855 # \fBzpool replace tank c0t0d0 c0t3d0\fR
1856 .fi
1857 .in -2
1858 .sp
1859 
1860 .sp
1861 .LP
1862 Once the data has been resilvered, the spare is automatically removed and is
1863 made available should another device fails. The hot spare can be permanently
1864 removed from the pool using the following command:
1865 
1866 .sp
1867 .in +2
1868 .nf
1869 # \fBzpool remove tank c0t2d0\fR
1870 .fi
1871 .in -2
1872 .sp
1873 
1874 .LP
1875 \fBExample 12 \fRCreating a ZFS Pool with Mirrored Separate Intent Logs
1876 .sp
1877 .LP
1878 The following command creates a ZFS storage pool consisting of two, two-way
1879 mirrors and mirrored log devices:
1880 
1881 .sp
1882 .in +2
1883 .nf
1884 # \fBzpool create pool mirror c0d0 c1d0 mirror c2d0 c3d0 log mirror \e
1885    c4d0 c5d0\fR
1886 .fi
1887 .in -2
1888 .sp
1889 
1890 .LP
1891 \fBExample 13 \fRAdding Cache Devices to a ZFS Pool
1892 .sp
1893 .LP
1894 The following command adds two disks for use as cache devices to a ZFS storage
1895 pool:
1896 
1897 .sp
1898 .in +2
1899 .nf
1900 # \fBzpool add pool cache c2d0 c3d0\fR
1901 .fi
1902 .in -2
1903 .sp
1904 
1905 .sp
1906 .LP
1907 Once added, the cache devices gradually fill with content from main memory.
1908 Depending on the size of your cache devices, it could take over an hour for
1909 them to fill. Capacity and reads can be monitored using the \fBiostat\fR option
1910 as follows:
1911 
1912 .sp
1913 .in +2
1914 .nf
1915 # \fBzpool iostat -v pool 5\fR
1916 .fi
1917 .in -2
1918 .sp
1919 
1920 .LP
1921 \fBExample 14 \fRRemoving a Mirrored Log Device
1922 .sp
1923 .LP
1924 The following command removes the mirrored log device \fBmirror-2\fR.
1925 
1926 .sp
1927 .LP
1928 Given this configuration:
1929 
1930 .sp
1931 .in +2
1932 .nf
1933    pool: tank
1934   state: ONLINE
1935   scrub: none requested
1936 config:
1937 
1938          NAME        STATE     READ WRITE CKSUM
1939          tank        ONLINE       0     0     0
1940            mirror-0  ONLINE       0     0     0
1941              c6t0d0  ONLINE       0     0     0
1942              c6t1d0  ONLINE       0     0     0
1943            mirror-1  ONLINE       0     0     0
1944              c6t2d0  ONLINE       0     0     0
1945              c6t3d0  ONLINE       0     0     0
1946          logs
1947            mirror-2  ONLINE       0     0     0
1948              c4t0d0  ONLINE       0     0     0
1949              c4t1d0  ONLINE       0     0     0
1950 .fi
1951 .in -2
1952 .sp
1953 
1954 .sp
1955 .LP
1956 The command to remove the mirrored log \fBmirror-2\fR is:
1957 
1958 .sp
1959 .in +2
1960 .nf
1961 # \fBzpool remove tank mirror-2\fR
1962 .fi
1963 .in -2
1964 .sp
1965 
1966 .SH EXIT STATUS
1967 .sp
1968 .LP
1969 The following exit values are returned:
1970 .sp
1971 .ne 2
1972 .na
1973 \fB\fB0\fR\fR
1974 .ad
1975 .RS 5n
1976 Successful completion.
1977 .RE
1978 
1979 .sp
1980 .ne 2
1981 .na
1982 \fB\fB1\fR\fR
1983 .ad
1984 .RS 5n
1985 An error occurred.
1986 .RE
1987 
1988 .sp
1989 .ne 2
1990 .na
1991 \fB\fB2\fR\fR
1992 .ad
1993 .RS 5n
1994 Invalid command line options were specified.
1995 .RE
1996 
1997 .SH ATTRIBUTES
1998 .sp
1999 .LP
2000 See \fBattributes\fR(5) for descriptions of the following attributes:
2001 .sp
2002 
2003 .sp
2004 .TS
2005 box;
2006 c | c
2007 l | l .
2008 ATTRIBUTE TYPE  ATTRIBUTE VALUE
2009 _
2010 Interface Stability     Evolving
2011 .TE
2012 
2013 .SH SEE ALSO
2014 .sp
2015 .LP
2016 \fBzfs\fR(1M), \fBattributes\fR(5)