Print this page
NEX-9752 backport illumos 6950 ARC should cache compressed data
Reviewed by: Saso Kiselkov <saso.kiselkov@nexenta.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
6950 ARC should cache compressed data
Reviewed by: Prakash Surya <prakash.surya@delphix.com>
Reviewed by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed by: Matt Ahrens <mahrens@delphix.com>
Reviewed by: Paul Dagnelie <pcd@delphix.com>
Reviewed by: Don Brady <don.brady@intel.com>
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Approved by: Richard Lowe <richlowe@richlowe.net>
6369 remove SVM tests from ZFS test suite
Reviewed by: John Kennedy <john.kennedy@delphix.com>
Reviewed by: Prakash Surya <prakash.surya@delphix.com>
Reviewed by: Albert Lee <trisk@omniti.com>
Approved by: Dan McDonald <danmcd@omniti.com>
6248 zpool_create_008_pos and zpool_create_009_neg can fail intermittently
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: George Wilson <george.wilson@delphix.com>
Approved by: Richard Lowe <richlowe@richlowe.net>
4185 add new cryptographic checksums to ZFS: SHA-512, Skein, Edon-R (fix studio build)
4185 add new cryptographic checksums to ZFS: SHA-512, Skein, Edon-R
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Prakash Surya <prakash.surya@delphix.com>
Reviewed by: Saso Kiselkov <saso.kiselkov@nexenta.com>
Reviewed by: Richard Lowe <richlowe@richlowe.net>
Approved by: Garrett D'Amore <garrett@damore.org>
5767 fix several problems with zfs test suite
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: Christopher Siden <christopher.siden@delphix.com>
Approved by: Gordon Ross <gwr@nexenta.com>
NEX-3974 Remove timing issues so the robot can run consistently
Reviewed by: Steve Peng <steve.peng@nexenta.com>
Reviewed by: Josef Sipek <josef.sipek@nexenta.com>
Reviewed by: Alek Pinchuk <alek.pinchuk@nexenta.com>
NEX-3363 Test suites don't cleanup after themselves properly.
Reviewed by: Josef 'Jeff' Sipek <josef.sipek@nexenta.com>
NEX-3740 The zfs tests use psrinfo instead of /usr/sbin/psrinfo
    Reviewed by: Alek Pinchuk <alek.pinchuk@nexenta.com>
    Reviewed by: Steve Peng <steve.peng@nexenta.com>
NEX-3258 Remove dependency upon SVM from zfs-tests
Reviewed by: Josef Sipek <josef.sipek@nexenta.com>
Reviewed by: Steve Peng <steve.peng@nexenta.com>
Reviewed by: Alek Pinchuk <alek.pinchuk@nexenta.com>
NEX-2744 zfs-tests suite fails rootpool_002_neg
NEX-2739 zfs-tests suite fails link_count_001
NEX-2741 zfs-tests suite fails mmap_write_001_pos
4206 history_003_pos relies on exact size of history log and entries
4207 history_008_pos depends on obsolete internal history log message
4208 Typo in zfs_main.c: "posxiuser"
4209 Populate zfstest with the remainder of the STF tests
Reviewed by: Sonu Pillai <sonu.pillai@delphix.com>
Reviewed by: Will Guyette <will.guyette@delphix.com>
Reviewed by: Eric Diven <eric.diven@delphix.com>
Reviewed by: Christopher Siden <christopher.siden@delphix.com>
Approved by: Richard Lowe <richlowe@richlowe.net>


 247 {
 248         typeset fs_vol=${1:-$TESTFS}
 249         typeset snap=${2:-$TESTSNAP}
 250         typeset bkmark=${3:-$TESTBKMARK}
 251 
 252         [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
 253         [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
 254         [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
 255 
 256         if bkmarkexists $fs_vol#$bkmark; then
 257                 log_fail "$fs_vol#$bkmark already exists."
 258         fi
 259         datasetexists $fs_vol || \
 260                 log_fail "$fs_vol must exist."
 261         snapexists $fs_vol@$snap || \
 262                 log_fail "$fs_vol@$snap must exist."
 263 
 264         log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
 265 }
 266 
 267 #
 268 # Create a temporary clone result of an interrupted resumable 'zfs receive'
 269 # $1 Destination filesystem name. Must not exist, will be created as the result
 270 #    of this function along with its %recv temporary clone
 271 # $2 Source filesystem name. Must not exist, will be created and destroyed
 272 #
 273 function create_recv_clone
 274 {
 275         typeset recvfs="$1"
 276         typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
 277         typeset snap="$sendfs@snap1"
 278         typeset incr="$sendfs@snap2"
 279         typeset mountpoint="$TESTDIR/create_recv_clone"
 280         typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
 281 
 282         [[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
 283 
 284         datasetexists $recvfs && log_fail "Recv filesystem must not exist."
 285         datasetexists $sendfs && log_fail "Send filesystem must not exist."
 286 
 287         log_must zfs create -o mountpoint="$mountpoint" $sendfs
 288         log_must zfs snapshot $snap
 289         log_must eval "zfs send $snap | zfs recv -u $recvfs"
 290         log_must mkfile 1m "$mountpoint/data"
 291         log_must zfs snapshot $incr
 292         log_must eval "zfs send -i $snap $incr | dd bs=10k count=1 > $sendfile"
 293         log_mustnot eval "zfs recv -su $recvfs < $sendfile"
 294         log_must zfs destroy -r $sendfs
 295         log_must rm -f "$sendfile"
 296 
 297         if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
 298                 log_fail "Error creating temporary $recvfs/%recv clone"
 299         fi
 300 }
 301 
 302 function default_mirror_setup
 303 {
 304         default_mirror_setup_noexit $1 $2 $3
 305 
 306         log_pass
 307 }
 308 
 309 #
 310 # Given a pair of disks, set up a storage pool and dataset for the mirror
 311 # @parameters: $1 the primary side of the mirror
 312 #   $2 the secondary side of the mirror
 313 # @uses: ZPOOL ZFS TESTPOOL TESTFS
 314 function default_mirror_setup_noexit
 315 {
 316         readonly func="default_mirror_setup_noexit"
 317         typeset primary=$1
 318         typeset secondary=$2
 319 
 320         [[ -z $primary ]] && \
 321                 log_fail "$func: No parameters passed"


 474                                 enc=$(get_prop encryption $fs)
 475                                 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
 476                                         [[ "$enc" == "off" ]]; then
 477                                         log_must zfs set checksum=on $fs
 478                                 fi
 479                                 log_must zfs set compression=off $fs
 480                                 log_must zfs set atime=on $fs
 481                                 log_must zfs set devices=off $fs
 482                                 log_must zfs set exec=on $fs
 483                                 log_must zfs set setuid=on $fs
 484                                 log_must zfs set readonly=off $fs
 485                                 log_must zfs set snapdir=hidden $fs
 486                                 log_must zfs set aclmode=groupmask $fs
 487                                 log_must zfs set aclinherit=secure $fs
 488                         fi
 489                 done
 490         fi
 491 
 492         [[ -d $TESTDIR ]] && \
 493                 log_must rm -rf $TESTDIR


 494 }
 495 
 496 
 497 #
 498 # Common function used to cleanup storage pools, file systems
 499 # and containers.
 500 #
 501 function default_container_cleanup
 502 {
 503         if ! is_global_zone; then
 504                 reexport_pool
 505         fi
 506 
 507         ismounted $TESTPOOL/$TESTCTR/$TESTFS1
 508         [[ $? -eq 0 ]] && \
 509             log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
 510 
 511         datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
 512             log_must zfs destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
 513 


1167                 log_note "Missing pool name."
1168                 return 1
1169         fi
1170 
1171         if poolexists $pool ; then
1172                 destroy_pool $pool
1173         fi
1174 
1175         if is_global_zone ; then
1176                 [[ -d /$pool ]] && rm -rf /$pool
1177                 log_must zpool create -f $pool $@
1178         fi
1179 
1180         return 0
1181 }
1182 
1183 # Return 0 if destroy successfully or the pool exists; $? otherwise
1184 # Note: In local zones, this function should return 0 silently.
1185 #
1186 # $1 - pool name

1187 # Destroy pool with the given parameters.
1188 
1189 function destroy_pool #pool
1190 {
1191         typeset pool=${1%%/*}

1192         typeset mtpt
1193 
1194         if [[ -z $pool ]]; then
1195                 log_note "No pool name given."
1196                 return 1
1197         fi
1198 
1199         if is_global_zone ; then
1200                 if poolexists "$pool" ; then
1201                         mtpt=$(get_prop mountpoint "$pool")
1202 
1203                         # At times, syseventd activity can cause attempts to
1204                         # destroy a pool to fail with EBUSY. We retry a few
1205                         # times allowing failures before requiring the destroy
1206                         # to succeed.
1207                         typeset -i wait_time=10 ret=1 count=0
1208                         must=""
1209                         while [[ $ret -ne 0 ]]; do
1210                                 $must zpool destroy -f $pool
1211                                 ret=$?
1212                                 [[ $ret -eq 0 ]] && break
1213                                 log_note "zpool destroy failed with $ret"
1214                                 [[ count++ -ge 7 ]] && must=log_must
1215                                 sleep $wait_time
1216                         done
1217 
1218                         [[ -d $mtpt ]] && \
1219                                 log_must rm -rf $mtpt
1220                 else
1221                         log_note "Pool does not exist. ($pool)"
1222                         return 1
1223                 fi
1224         fi
1225 
1226         return 0
1227 }
1228 


1229 #


































1230 # Firstly, create a pool with 5 datasets. Then, create a single zone and
1231 # export the 5 datasets to it. In addition, we also add a ZFS filesystem
1232 # and a zvol device to the zone.
1233 #
1234 # $1 zone name
1235 # $2 zone root directory prefix
1236 # $3 zone ip
1237 #
1238 function zfs_zones_setup #zone_name zone_root zone_ip
1239 {
1240         typeset zone_name=${1:-$(hostname)-z}
1241         typeset zone_root=${2:-"/zone_root"}
1242         typeset zone_ip=${3:-"10.1.1.10"}
1243         typeset prefix_ctr=$ZONE_CTR
1244         typeset pool_name=$ZONE_POOL
1245         typeset -i cntctr=5
1246         typeset -i i=0
1247 
1248         # Create pool and 5 container within it
1249         #


1489 #
1490 # Verify a given hotspare disk is inuse or avail
1491 #
1492 # Return 0 is pool/disk matches expected state, 1 otherwise
1493 #
1494 function check_hotspare_state # pool disk state{inuse,avail}
1495 {
1496         typeset pool=$1
1497         typeset disk=${2#/dev/dsk/}
1498         typeset state=$3
1499 
1500         cur_state=$(get_device_state $pool $disk "spares")
1501 
1502         if [[ $state != ${cur_state} ]]; then
1503                 return 1
1504         fi
1505         return 0
1506 }
1507 
1508 #
1509 # Wait until a hotspare transitions to a given state or times out.
1510 #
1511 # Return 0 when  pool/disk matches expected state, 1 on timeout.
1512 #
1513 function wait_hotspare_state # pool disk state timeout
1514 {
1515         typeset pool=$1
1516         typeset disk=${2#$/DEV_DSKDIR/}
1517         typeset state=$3
1518         typeset timeout=${4:-60}
1519         typeset -i i=0
1520 
1521         while [[ $i -lt $timeout ]]; do
1522                 if check_hotspare_state $pool $disk $state; then
1523                         return 0
1524                 fi
1525 
1526                 i=$((i+1))
1527                 sleep 1
1528         done
1529 
1530         return 1
1531 }
1532 
1533 #
1534 # Verify a given slog disk is inuse or avail
1535 #
1536 # Return 0 is pool/disk matches expected state, 1 otherwise
1537 #
1538 function check_slog_state # pool disk state{online,offline,unavail}
1539 {
1540         typeset pool=$1
1541         typeset disk=${2#/dev/dsk/}
1542         typeset state=$3
1543 
1544         cur_state=$(get_device_state $pool $disk "logs")
1545 
1546         if [[ $state != ${cur_state} ]]; then
1547                 return 1
1548         fi
1549         return 0
1550 }
1551 
1552 #
1553 # Verify a given vdev disk is inuse or avail
1554 #
1555 # Return 0 is pool/disk matches expected state, 1 otherwise
1556 #
1557 function check_vdev_state # pool disk state{online,offline,unavail}
1558 {
1559         typeset pool=$1
1560         typeset disk=${2#/dev/dsk/}
1561         typeset state=$3
1562 
1563         cur_state=$(get_device_state $pool $disk)
1564 
1565         if [[ $state != ${cur_state} ]]; then
1566                 return 1
1567         fi
1568         return 0
1569 }
1570 
1571 #
1572 # Wait until a vdev transitions to a given state or times out.
1573 #
1574 # Return 0 when  pool/disk matches expected state, 1 on timeout.
1575 #
1576 function wait_vdev_state # pool disk state timeout
1577 {
1578         typeset pool=$1
1579         typeset disk=${2#$/DEV_DSKDIR/}
1580         typeset state=$3
1581         typeset timeout=${4:-60}
1582         typeset -i i=0
1583 
1584         while [[ $i -lt $timeout ]]; do
1585                 if check_vdev_state $pool $disk $state; then
1586                         return 0
1587                 fi
1588 
1589                 i=$((i+1))
1590                 sleep 1
1591         done
1592 
1593         return 1
1594 }
1595 
1596 #
1597 # Check the output of 'zpool status -v <pool>',
1598 # and to see if the content of <token> contain the <keyword> specified.
1599 #
1600 # Return 0 is contain, 1 otherwise
1601 #
1602 function check_pool_status # pool token keyword <verbose>
1603 {
1604         typeset pool=$1
1605         typeset token=$2
1606         typeset keyword=$3
1607         typeset verbose=${4:-false}
1608 
1609         scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
1610                 ($1==token) {print $0}')
1611         if [[ $verbose == true ]]; then
1612                 log_note $scan
1613         fi
1614         echo $scan | grep -i "$keyword" > /dev/null 2>&1
1615 
1616         return $?
1617 }
1618 
1619 #
1620 # These 6 following functions are instance of check_pool_status()
1621 #       is_pool_resilvering - to check if the pool is resilver in progress
1622 #       is_pool_resilvered - to check if the pool is resilver completed
1623 #       is_pool_scrubbing - to check if the pool is scrub in progress
1624 #       is_pool_scrubbed - to check if the pool is scrub completed
1625 #       is_pool_scrub_stopped - to check if the pool is scrub stopped
1626 #       is_pool_scrub_paused - to check if the pool has scrub paused
1627 #       is_pool_removing - to check if the pool is removing a vdev
1628 #       is_pool_removed - to check if the pool is remove completed
1629 #
1630 function is_pool_resilvering #pool <verbose>
1631 {
1632         check_pool_status "$1" "scan" "resilver in progress since " $2
1633         return $?
1634 }
1635 
1636 function is_pool_resilvered #pool <verbose>
1637 {
1638         check_pool_status "$1" "scan" "resilvered " $2
1639         return $?
1640 }
1641 
1642 function is_pool_scrubbing #pool <verbose>
1643 {
1644         check_pool_status "$1" "scan" "scrub in progress since " $2
1645         return $?
1646 }
1647 
1648 function is_pool_scrubbed #pool <verbose>
1649 {
1650         check_pool_status "$1" "scan" "scrub repaired" $2
1651         return $?
1652 }
1653 
1654 function is_pool_scrub_stopped #pool <verbose>
1655 {
1656         check_pool_status "$1" "scan" "scrub canceled" $2
1657         return $?
1658 }
1659 
1660 function is_pool_scrub_paused #pool <verbose>
1661 {
1662         check_pool_status "$1" "scan" "scrub paused since " $2
1663         return $?
1664 }
1665 
1666 function is_pool_removing #pool
1667 {
1668         check_pool_status "$1" "remove" "in progress since "
1669         return $?
1670 }
1671 
1672 function is_pool_removed #pool
1673 {
1674         check_pool_status "$1" "remove" "completed on"
1675         return $?
1676 }
1677 
1678 #
1679 # Use create_pool()/destroy_pool() to clean up the infomation in
1680 # in the given disk to avoid slice overlapping.
1681 #
1682 function cleanup_devices #vdevs
1683 {
1684         typeset pool="foopool$$"
1685 
1686         if poolexists $pool ; then
1687                 destroy_pool $pool
1688         fi
1689 
1690         create_pool $pool $@
1691         destroy_pool $pool
1692 
1693         return 0
1694 }
1695 
1696 #/**
1697 # A function to find and locate free disks on a system or from given


2432 function is_mp
2433 {
2434         (($(psrinfo | wc -l) > 1))
2435 }
2436 
2437 function get_cpu_freq
2438 {
2439         psrinfo -v 0 | awk '/processor operates at/ {print $6}'
2440 }
2441 
2442 # Run the given command as the user provided.
2443 function user_run
2444 {
2445         typeset user=$1
2446         shift
2447 
2448         eval su \$user -c \"$@\" > /tmp/out 2>/tmp/err
2449         return $?
2450 }
2451 

2452 #









































2453 # Check if the pool contains the specified vdevs
2454 #
2455 # $1 pool
2456 # $2..n <vdev> ...
2457 #
2458 # Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2459 # vdevs is not in the pool, and 2 if pool name is missing.
2460 #
2461 function vdevs_in_pool
2462 {
2463         typeset pool=$1
2464         typeset vdev
2465 
2466         if [[ -z $pool ]]; then
2467                 log_note "Missing pool name."
2468                 return 2
2469         fi
2470 
2471         shift
2472 


2532             --minimal \
2533             --randrepeat=0 \
2534             --buffer_compress_percentage=66 \
2535             --buffer_compress_chunk=4096 \
2536             --directory=$dir \
2537             --numjobs=$nfiles \
2538             --rw=write \
2539             --bs=$bs \
2540             --filesize=$megs \
2541             --filename_format='$fname.\$jobnum' >/dev/null"
2542 }
2543 
2544 function get_objnum
2545 {
2546         typeset pathname=$1
2547         typeset objnum
2548 
2549         [[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
2550         objnum=$(stat -c %i $pathname)
2551         echo $objnum
2552 }
2553 
2554 #
2555 # Prints the current time in seconds since UNIX Epoch.
2556 #
2557 function current_epoch
2558 {
2559         printf '%(%s)T'
2560 }
2561 
2562 #
2563 # Get decimal value of global uint32_t variable using mdb.
2564 #
2565 function mdb_get_uint32
2566 {
2567         typeset variable=$1
2568         typeset value
2569 
2570         value=$(mdb -k -e "$variable/X | ::eval .=U")
2571         if [[ $? -ne 0 ]]; then
2572                 log_fail "Failed to get value of '$variable' from mdb."
2573                 return 1
2574         fi
2575 
2576         echo $value
2577         return 0
2578 }
2579 
2580 #
2581 # Set global uint32_t variable to a decimal value using mdb.
2582 #
2583 function mdb_set_uint32
2584 {
2585         typeset variable=$1
2586         typeset value=$2
2587 
2588         mdb -kw -e "$variable/W 0t$value" > /dev/null
2589         if [[ $? -ne 0 ]]; then
2590                 echo "Failed to set '$variable' to '$value' in mdb."
2591                 return 1
2592         fi
2593 
2594         return 0
2595 }


 247 {
 248         typeset fs_vol=${1:-$TESTFS}
 249         typeset snap=${2:-$TESTSNAP}
 250         typeset bkmark=${3:-$TESTBKMARK}
 251 
 252         [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
 253         [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
 254         [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
 255 
 256         if bkmarkexists $fs_vol#$bkmark; then
 257                 log_fail "$fs_vol#$bkmark already exists."
 258         fi
 259         datasetexists $fs_vol || \
 260                 log_fail "$fs_vol must exist."
 261         snapexists $fs_vol@$snap || \
 262                 log_fail "$fs_vol@$snap must exist."
 263 
 264         log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
 265 }
 266 



































 267 function default_mirror_setup
 268 {
 269         default_mirror_setup_noexit $1 $2 $3
 270 
 271         log_pass
 272 }
 273 
 274 #
 275 # Given a pair of disks, set up a storage pool and dataset for the mirror
 276 # @parameters: $1 the primary side of the mirror
 277 #   $2 the secondary side of the mirror
 278 # @uses: ZPOOL ZFS TESTPOOL TESTFS
 279 function default_mirror_setup_noexit
 280 {
 281         readonly func="default_mirror_setup_noexit"
 282         typeset primary=$1
 283         typeset secondary=$2
 284 
 285         [[ -z $primary ]] && \
 286                 log_fail "$func: No parameters passed"


 439                                 enc=$(get_prop encryption $fs)
 440                                 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
 441                                         [[ "$enc" == "off" ]]; then
 442                                         log_must zfs set checksum=on $fs
 443                                 fi
 444                                 log_must zfs set compression=off $fs
 445                                 log_must zfs set atime=on $fs
 446                                 log_must zfs set devices=off $fs
 447                                 log_must zfs set exec=on $fs
 448                                 log_must zfs set setuid=on $fs
 449                                 log_must zfs set readonly=off $fs
 450                                 log_must zfs set snapdir=hidden $fs
 451                                 log_must zfs set aclmode=groupmask $fs
 452                                 log_must zfs set aclinherit=secure $fs
 453                         fi
 454                 done
 455         fi
 456 
 457         [[ -d $TESTDIR ]] && \
 458                 log_must rm -rf $TESTDIR
 459         [[ -d $TESTDIR1 ]] && \
 460                 log_must rm -rf $TESTDIR1
 461 }
 462 
 463 
 464 #
 465 # Common function used to cleanup storage pools, file systems
 466 # and containers.
 467 #
 468 function default_container_cleanup
 469 {
 470         if ! is_global_zone; then
 471                 reexport_pool
 472         fi
 473 
 474         ismounted $TESTPOOL/$TESTCTR/$TESTFS1
 475         [[ $? -eq 0 ]] && \
 476             log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
 477 
 478         datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
 479             log_must zfs destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
 480 


1134                 log_note "Missing pool name."
1135                 return 1
1136         fi
1137 
1138         if poolexists $pool ; then
1139                 destroy_pool $pool
1140         fi
1141 
1142         if is_global_zone ; then
1143                 [[ -d /$pool ]] && rm -rf /$pool
1144                 log_must zpool create -f $pool $@
1145         fi
1146 
1147         return 0
1148 }
1149 
1150 # Return 0 if destroy successfully or the pool exists; $? otherwise
1151 # Note: In local zones, this function should return 0 silently.
1152 #
1153 # $1 - pool name
1154 # $2 - optional force flag
1155 # Destroy pool with the given parameters.
1156 
1157 function do_destroy_pool #pool <-f>
1158 {
1159         typeset pool=${1%%/*}
1160         typeset force=$2
1161         typeset mtpt
1162 
1163         if [[ -z $pool ]]; then
1164                 log_note "No pool name given."
1165                 return 1
1166         fi
1167 
1168         if is_global_zone ; then
1169                 if poolexists "$pool" ; then
1170                         mtpt=$(get_prop mountpoint "$pool")
1171 
1172                         # At times, syseventd activity can cause attempts to
1173                         # destroy a pool to fail with EBUSY. We retry a few
1174                         # times allowing failures before requiring the destroy
1175                         # to succeed.
1176                         typeset -i wait_time=$DESTROY_SLEEP_TIME ret=1 count=0
1177                         must=""
1178                         while [[ $ret -ne 0 ]]; do
1179                                 $must zpool destroy $force $pool
1180                                 ret=$?
1181                                 [[ $ret -eq 0 ]] && break
1182                                 log_note "zpool destroy failed with $ret"
1183                                 [[ count++ -ge $NUM_RETRIES ]] && must=log_must
1184                                 sleep $wait_time
1185                         done
1186 
1187                         [[ -d $mtpt ]] && \
1188                                 log_must rm -rf $mtpt
1189                 else
1190                         log_note "Pool does not exist. ($pool)"
1191                         return 1
1192                 fi
1193         fi
1194 
1195         return 0
1196 }
1197 
1198 # Return 0 if destroy successfully or the pool exists; $? otherwise
1199 # Note: In local zones, this function should return 0 silently.
1200 #
1201 # $1 - pool name
1202 # Destroy pool with the given parameters.
1203 
1204 function destroy_pool_no_force #pool
1205 {
1206         typeset pool=${1%%/*}
1207 
1208         do_destroy_pool $pool
1209         if (( $? != 0 )); then
1210                 return 1
1211         else
1212                 return 0
1213         fi
1214 }
1215 
1216 # Return 0 if destroy successfully or the pool exists; $? otherwise
1217 # Note: In local zones, this function should return 0 silently.
1218 #
1219 # $1 - pool name
1220 # Force a destroy pool with the given parameters.
1221 
1222 function destroy_pool #pool
1223 {
1224         typeset pool=${1%%/*}
1225 
1226         do_destroy_pool $pool -f
1227         if (( $? != 0 )); then
1228                 return 1
1229         else
1230                 return 0
1231         fi
1232 }
1233 
1234 #
1235 # Firstly, create a pool with 5 datasets. Then, create a single zone and
1236 # export the 5 datasets to it. In addition, we also add a ZFS filesystem
1237 # and a zvol device to the zone.
1238 #
1239 # $1 zone name
1240 # $2 zone root directory prefix
1241 # $3 zone ip
1242 #
1243 function zfs_zones_setup #zone_name zone_root zone_ip
1244 {
1245         typeset zone_name=${1:-$(hostname)-z}
1246         typeset zone_root=${2:-"/zone_root"}
1247         typeset zone_ip=${3:-"10.1.1.10"}
1248         typeset prefix_ctr=$ZONE_CTR
1249         typeset pool_name=$ZONE_POOL
1250         typeset -i cntctr=5
1251         typeset -i i=0
1252 
1253         # Create pool and 5 container within it
1254         #


1494 #
1495 # Verify a given hotspare disk is inuse or avail
1496 #
1497 # Return 0 is pool/disk matches expected state, 1 otherwise
1498 #
1499 function check_hotspare_state # pool disk state{inuse,avail}
1500 {
1501         typeset pool=$1
1502         typeset disk=${2#/dev/dsk/}
1503         typeset state=$3
1504 
1505         cur_state=$(get_device_state $pool $disk "spares")
1506 
1507         if [[ $state != ${cur_state} ]]; then
1508                 return 1
1509         fi
1510         return 0
1511 }
1512 
1513 #

























1514 # Verify a given slog disk is inuse or avail
1515 #
1516 # Return 0 is pool/disk matches expected state, 1 otherwise
1517 #
1518 function check_slog_state # pool disk state{online,offline,unavail}
1519 {
1520         typeset pool=$1
1521         typeset disk=${2#/dev/dsk/}
1522         typeset state=$3
1523 
1524         cur_state=$(get_device_state $pool $disk "logs")
1525 
1526         if [[ $state != ${cur_state} ]]; then
1527                 return 1
1528         fi
1529         return 0
1530 }
1531 
1532 #
1533 # Verify a given vdev disk is inuse or avail
1534 #
1535 # Return 0 is pool/disk matches expected state, 1 otherwise
1536 #
1537 function check_vdev_state # pool disk state{online,offline,unavail}
1538 {
1539         typeset pool=$1
1540         typeset disk=${2#/dev/dsk/}
1541         typeset state=$3
1542 
1543         cur_state=$(get_device_state $pool $disk)
1544 
1545         if [[ $state != ${cur_state} ]]; then
1546                 return 1
1547         fi
1548         return 0
1549 }
1550 
1551 #

























1552 # Check the output of 'zpool status -v <pool>',
1553 # and to see if the content of <token> contain the <keyword> specified.
1554 #
1555 # Return 0 is contain, 1 otherwise
1556 #
1557 function check_pool_status # pool token keyword <verbose>
1558 {
1559         typeset pool=$1
1560         typeset token=$2
1561         typeset keyword=$3
1562         typeset verbose=${4:-false}
1563 
1564         scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
1565                 ($1==token) {print $0}')
1566         if [[ $verbose == true ]]; then
1567                 log_note $scan
1568         fi
1569         echo $scan | grep -i "$keyword" > /dev/null 2>&1
1570 
1571         return $?
1572 }
1573 
1574 #
1575 # These 6 following functions are instance of check_pool_status()
1576 #       is_pool_resilvering - to check if the pool is resilver in progress
1577 #       is_pool_resilvered - to check if the pool is resilver completed
1578 #       is_pool_scrubbing - to check if the pool is scrub in progress
1579 #       is_pool_scrubbed - to check if the pool is scrub completed
1580 #       is_pool_scrub_stopped - to check if the pool is scrub stopped
1581 #       is_pool_scrub_paused - to check if the pool has scrub paused


1582 #
1583 function is_pool_resilvering #pool <verbose>
1584 {
1585         check_pool_status "$1" "scan" "resilver in progress since " $2
1586         return $?
1587 }
1588 
1589 function is_pool_resilvered #pool <verbose>
1590 {
1591         check_pool_status "$1" "scan" "resilvered " $2
1592         return $?
1593 }
1594 
1595 function is_pool_scrubbing #pool <verbose>
1596 {
1597         check_pool_status "$1" "scan" "scrub in progress since " $2
1598         return $?
1599 }
1600 
1601 function is_pool_scrubbed #pool <verbose>
1602 {
1603         check_pool_status "$1" "scan" "scrub repaired" $2
1604         return $?
1605 }
1606 
1607 function is_pool_scrub_stopped #pool <verbose>
1608 {
1609         check_pool_status "$1" "scan" "scrub canceled" $2
1610         return $?
1611 }
1612 
1613 function is_pool_scrub_paused #pool <verbose>
1614 {
1615         check_pool_status "$1" "scan" "scrub paused since " $2
1616         return $?
1617 }
1618 












1619 #
1620 # Use create_pool()/destroy_pool() to clean up the infomation in
1621 # in the given disk to avoid slice overlapping.
1622 #
1623 function cleanup_devices #vdevs
1624 {
1625         typeset pool="foopool$$"
1626 
1627         if poolexists $pool ; then
1628                 destroy_pool $pool
1629         fi
1630 
1631         create_pool $pool $@
1632         destroy_pool $pool
1633 
1634         return 0
1635 }
1636 
1637 #/**
1638 # A function to find and locate free disks on a system or from given


2373 function is_mp
2374 {
2375         (($(psrinfo | wc -l) > 1))
2376 }
2377 
2378 function get_cpu_freq
2379 {
2380         psrinfo -v 0 | awk '/processor operates at/ {print $6}'
2381 }
2382 
2383 # Run the given command as the user provided.
2384 function user_run
2385 {
2386         typeset user=$1
2387         shift
2388 
2389         eval su \$user -c \"$@\" > /tmp/out 2>/tmp/err
2390         return $?
2391 }
2392 
2393 # Return 0 if the pool is successfully epxorted; $? otherwise
2394 #
2395 # $1 - pool name
2396 # Export pool.
2397 function export_pool #pool
2398 {
2399         typeset pool=${1%%/*}
2400         # Checking to see if the device is busy. If so, we'll
2401         # retry the export a few times with a sleep between tries.
2402         errmsg='device is busy'
2403         retry_num=$NUM_RETRIES
2404         TMPFILE=`mktemp`
2405         if [ -z "$TMPFILE" ] ; then
2406                 log_note "Unable to create temporary file $TMPFILE"
2407                 return 1
2408         fi
2409         until [ $retry_num == 0 ] ; do
2410                 log_note "zpool export $pool"
2411                 zpool export $pool 2>$TMPFILE
2412                 # If the export failed, see if it's due to a
2413                 # device is busy issue and retry if it is.
2414                 if (( $? != 0 )); then
2415                         # if this is busy then we want to retry
2416                         if [ "`grep "$errmsg" $TMPFILE`" != "" ]; then
2417                                 retry_num-=1
2418                                 log_note "Device is busy, retry zpool export"
2419                                 sleep $EXPORT_SLEEP_TIME
2420                         else # return here if error is something else
2421                                 rn $TMPFILE
2422                                 return 1
2423                         fi
2424                 else
2425                         # export succeeded.
2426                         rm $TMPFILE
2427                         return 0
2428                 fi
2429         done
2430         # We've reached our max retries, try to export one more time
2431         # and require it to succeed.
2432         log_must zpool export $pool
2433 }
2434 
2435 #
2436 # Check if the pool contains the specified vdevs
2437 #
2438 # $1 pool
2439 # $2..n <vdev> ...
2440 #
2441 # Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2442 # vdevs is not in the pool, and 2 if pool name is missing.
2443 #
2444 function vdevs_in_pool
2445 {
2446         typeset pool=$1
2447         typeset vdev
2448 
2449         if [[ -z $pool ]]; then
2450                 log_note "Missing pool name."
2451                 return 2
2452         fi
2453 
2454         shift
2455 


2515             --minimal \
2516             --randrepeat=0 \
2517             --buffer_compress_percentage=66 \
2518             --buffer_compress_chunk=4096 \
2519             --directory=$dir \
2520             --numjobs=$nfiles \
2521             --rw=write \
2522             --bs=$bs \
2523             --filesize=$megs \
2524             --filename_format='$fname.\$jobnum' >/dev/null"
2525 }
2526 
2527 function get_objnum
2528 {
2529         typeset pathname=$1
2530         typeset objnum
2531 
2532         [[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
2533         objnum=$(stat -c %i $pathname)
2534         echo $objnum











































2535 }