Print this page
7290 ZFS test suite needs to control what utilities it can run
Reviewed by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>

*** 20,30 **** # # # Copyright 2009 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. ! # Copyright (c) 2012, 2015 by Delphix. All rights reserved. # Copyright 2016 Nexenta Systems, Inc. # . ${STF_TOOLS}/contrib/include/logapi.shlib --- 20,30 ---- # # # Copyright 2009 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. ! # Copyright (c) 2012, 2016 by Delphix. All rights reserved. # Copyright 2016 Nexenta Systems, Inc. # . ${STF_TOOLS}/contrib/include/logapi.shlib
*** 45,65 **** typeset out dir name ret case $fstype in zfs) if [[ "$1" == "/"* ]] ; then ! for out in $($ZFS mount | $AWK '{print $2}'); do [[ $1 == $out ]] && return 0 done else ! for out in $($ZFS mount | $AWK '{print $1}'); do [[ $1 == $out ]] && return 0 done fi ;; ufs|nfs) ! out=$($DF -F $fstype $1 2>/dev/null) ret=$? (($ret != 0)) && return $ret dir=${out%%\(*} dir=${dir%% *} --- 45,65 ---- typeset out dir name ret case $fstype in zfs) if [[ "$1" == "/"* ]] ; then ! for out in $(zfs mount | awk '{print $2}'); do [[ $1 == $out ]] && return 0 done else ! for out in $(zfs mount | awk '{print $1}'); do [[ $1 == $out ]] && return 0 done fi ;; ufs|nfs) ! out=$(df -F $fstype $1 2>/dev/null) ret=$? (($ret != 0)) && return $ret dir=${out%%\(*} dir=${dir%% *}
*** 102,112 **** # # $1 - line to split function splitline { ! $ECHO $1 | $SED "s/,/ /g" } function default_setup { default_setup_noexit "$@" --- 102,112 ---- # # $1 - line to split function splitline { ! echo $1 | sed "s/,/ /g" } function default_setup { default_setup_noexit "$@"
*** 125,164 **** if is_global_zone; then if poolexists $TESTPOOL ; then destroy_pool $TESTPOOL fi ! [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL ! log_must $ZPOOL create -f $TESTPOOL $disklist else reexport_pool fi ! $RM -rf $TESTDIR || log_unresolved Could not remove $TESTDIR ! $MKDIR -p $TESTDIR || log_unresolved Could not create $TESTDIR ! log_must $ZFS create $TESTPOOL/$TESTFS ! log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS if [[ -n $container ]]; then ! $RM -rf $TESTDIR1 || \ log_unresolved Could not remove $TESTDIR1 ! $MKDIR -p $TESTDIR1 || \ log_unresolved Could not create $TESTDIR1 ! log_must $ZFS create $TESTPOOL/$TESTCTR ! log_must $ZFS set canmount=off $TESTPOOL/$TESTCTR ! log_must $ZFS create $TESTPOOL/$TESTCTR/$TESTFS1 ! log_must $ZFS set mountpoint=$TESTDIR1 \ $TESTPOOL/$TESTCTR/$TESTFS1 fi if [[ -n $volume ]]; then if is_global_zone ; then ! log_must $ZFS create -V $VOLSIZE $TESTPOOL/$TESTVOL else ! log_must $ZFS create $TESTPOOL/$TESTVOL fi fi } # --- 125,164 ---- if is_global_zone; then if poolexists $TESTPOOL ; then destroy_pool $TESTPOOL fi ! [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL ! log_must zpool create -f $TESTPOOL $disklist else reexport_pool fi ! rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR ! mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR ! log_must zfs create $TESTPOOL/$TESTFS ! log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS if [[ -n $container ]]; then ! rm -rf $TESTDIR1 || \ log_unresolved Could not remove $TESTDIR1 ! mkdir -p $TESTDIR1 || \ log_unresolved Could not create $TESTDIR1 ! log_must zfs create $TESTPOOL/$TESTCTR ! log_must zfs set canmount=off $TESTPOOL/$TESTCTR ! log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1 ! log_must zfs set mountpoint=$TESTDIR1 \ $TESTPOOL/$TESTCTR/$TESTFS1 fi if [[ -n $volume ]]; then if is_global_zone ; then ! log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL else ! log_must zfs create $TESTPOOL/$TESTVOL fi fi } #
*** 213,223 **** log_fail "$fs_vol@$snap already exists." fi datasetexists $fs_vol || \ log_fail "$fs_vol must exist." ! log_must $ZFS snapshot $fs_vol@$snap } # # Create a clone from a snapshot, default clone name is $TESTCLONE. # --- 213,223 ---- log_fail "$fs_vol@$snap already exists." fi datasetexists $fs_vol || \ log_fail "$fs_vol must exist." ! log_must zfs snapshot $fs_vol@$snap } # # Create a clone from a snapshot, default clone name is $TESTCLONE. #
*** 232,242 **** [[ -z $snap ]] && \ log_fail "Snapshot name is undefined." [[ -z $clone ]] && \ log_fail "Clone name is undefined." ! log_must $ZFS clone $snap $clone } function default_mirror_setup { default_mirror_setup_noexit $1 $2 $3 --- 232,242 ---- [[ -z $snap ]] && \ log_fail "Snapshot name is undefined." [[ -z $clone ]] && \ log_fail "Clone name is undefined." ! log_must zfs clone $snap $clone } function default_mirror_setup { default_mirror_setup_noexit $1 $2 $3
*** 257,270 **** [[ -z $primary ]] && \ log_fail "$func: No parameters passed" [[ -z $secondary ]] && \ log_fail "$func: No secondary partition passed" ! [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL ! log_must $ZPOOL create -f $TESTPOOL mirror $@ ! log_must $ZFS create $TESTPOOL/$TESTFS ! log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS } # # create a number of mirrors. # We create a number($1) of 2 way mirrors using the pairs of disks named --- 257,270 ---- [[ -z $primary ]] && \ log_fail "$func: No parameters passed" [[ -z $secondary ]] && \ log_fail "$func: No secondary partition passed" ! [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL ! log_must zpool create -f $TESTPOOL mirror $@ ! log_must zfs create $TESTPOOL/$TESTFS ! log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS } # # create a number of mirrors. # We create a number($1) of 2 way mirrors using the pairs of disks named
*** 277,288 **** typeset -i nmirrors=$1 shift while ((nmirrors > 0)); do log_must test -n "$1" -a -n "$2" ! [[ -d /$TESTPOOL$nmirrors ]] && $RM -rf /$TESTPOOL$nmirrors ! log_must $ZPOOL create -f $TESTPOOL$nmirrors mirror $1 $2 shift 2 ((nmirrors = nmirrors - 1)) done } --- 277,288 ---- typeset -i nmirrors=$1 shift while ((nmirrors > 0)); do log_must test -n "$1" -a -n "$2" ! [[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors ! log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2 shift 2 ((nmirrors = nmirrors - 1)) done }
*** 298,309 **** typeset -i nraidzs=$1 shift while ((nraidzs > 0)); do log_must test -n "$1" -a -n "$2" ! [[ -d /$TESTPOOL$nraidzs ]] && $RM -rf /$TESTPOOL$nraidzs ! log_must $ZPOOL create -f $TESTPOOL$nraidzs raidz $1 $2 shift 2 ((nraidzs = nraidzs - 1)) done } --- 298,309 ---- typeset -i nraidzs=$1 shift while ((nraidzs > 0)); do log_must test -n "$1" -a -n "$2" ! [[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs ! log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2 shift 2 ((nraidzs = nraidzs - 1)) done }
*** 329,342 **** if [[ ${#disks[*]} -lt 2 ]]; then log_fail "A raid-z requires a minimum of two disks." fi ! [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL ! log_must $ZPOOL create -f $TESTPOOL raidz $1 $2 $3 ! log_must $ZFS create $TESTPOOL/$TESTFS ! log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS log_pass } # --- 329,342 ---- if [[ ${#disks[*]} -lt 2 ]]; then log_fail "A raid-z requires a minimum of two disks." fi ! [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL ! log_must zpool create -f $TESTPOOL raidz $1 $2 $3 ! log_must zfs create $TESTPOOL/$TESTFS ! log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS log_pass } #
*** 361,374 **** # # Destroying the pool will also destroy any # filesystems it contains. # if is_global_zone; then ! $ZFS unmount -a > /dev/null 2>&1 ! exclude=`eval $ECHO \"'(${KEEP})'\"` ! ALL_POOLS=$($ZPOOL list -H -o name \ ! | $GREP -v "$NO_POOLS" | $EGREP -v "$exclude") # Here, we loop through the pools we're allowed to # destroy, only destroying them if it's safe to do # so. while [ ! -z ${ALL_POOLS} ] do --- 361,374 ---- # # Destroying the pool will also destroy any # filesystems it contains. # if is_global_zone; then ! zfs unmount -a > /dev/null 2>&1 ! exclude=`eval echo \"'(${KEEP})'\"` ! ALL_POOLS=$(zpool list -H -o name \ ! | grep -v "$NO_POOLS" | egrep -v "$exclude") # Here, we loop through the pools we're allowed to # destroy, only destroying them if it's safe to do # so. while [ ! -z ${ALL_POOLS} ] do
*** 376,436 **** do if safe_to_destroy_pool $pool ; then destroy_pool $pool fi ! ALL_POOLS=$($ZPOOL list -H -o name \ ! | $GREP -v "$NO_POOLS" \ ! | $EGREP -v "$exclude") done done ! $ZFS mount -a else typeset fs="" ! for fs in $($ZFS list -H -o name \ ! | $GREP "^$ZONE_POOL/$ZONE_CTR[01234]/"); do datasetexists $fs && \ ! log_must $ZFS destroy -Rf $fs done # Need cleanup here to avoid garbage dir left. ! for fs in $($ZFS list -H -o name); do [[ $fs == /$ZONE_POOL ]] && continue ! [[ -d $fs ]] && log_must $RM -rf $fs/* done # # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to # the default value # ! for fs in $($ZFS list -H -o name); do if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then ! log_must $ZFS set reservation=none $fs ! log_must $ZFS set recordsize=128K $fs ! log_must $ZFS set mountpoint=/$fs $fs typeset enc="" enc=$(get_prop encryption $fs) if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \ [[ "$enc" == "off" ]]; then ! log_must $ZFS set checksum=on $fs fi ! log_must $ZFS set compression=off $fs ! log_must $ZFS set atime=on $fs ! log_must $ZFS set devices=off $fs ! log_must $ZFS set exec=on $fs ! log_must $ZFS set setuid=on $fs ! log_must $ZFS set readonly=off $fs ! log_must $ZFS set snapdir=hidden $fs ! log_must $ZFS set aclmode=groupmask $fs ! log_must $ZFS set aclinherit=secure $fs fi done fi [[ -d $TESTDIR ]] && \ ! log_must $RM -rf $TESTDIR } # # Common function used to cleanup storage pools, file systems --- 376,436 ---- do if safe_to_destroy_pool $pool ; then destroy_pool $pool fi ! ALL_POOLS=$(zpool list -H -o name \ ! | grep -v "$NO_POOLS" \ ! | egrep -v "$exclude") done done ! zfs mount -a else typeset fs="" ! for fs in $(zfs list -H -o name \ ! | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do datasetexists $fs && \ ! log_must zfs destroy -Rf $fs done # Need cleanup here to avoid garbage dir left. ! for fs in $(zfs list -H -o name); do [[ $fs == /$ZONE_POOL ]] && continue ! [[ -d $fs ]] && log_must rm -rf $fs/* done # # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to # the default value # ! for fs in $(zfs list -H -o name); do if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then ! log_must zfs set reservation=none $fs ! log_must zfs set recordsize=128K $fs ! log_must zfs set mountpoint=/$fs $fs typeset enc="" enc=$(get_prop encryption $fs) if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \ [[ "$enc" == "off" ]]; then ! log_must zfs set checksum=on $fs fi ! log_must zfs set compression=off $fs ! log_must zfs set atime=on $fs ! log_must zfs set devices=off $fs ! log_must zfs set exec=on $fs ! log_must zfs set setuid=on $fs ! log_must zfs set readonly=off $fs ! log_must zfs set snapdir=hidden $fs ! log_must zfs set aclmode=groupmask $fs ! log_must zfs set aclinherit=secure $fs fi done fi [[ -d $TESTDIR ]] && \ ! log_must rm -rf $TESTDIR } # # Common function used to cleanup storage pools, file systems
*** 442,461 **** reexport_pool fi ismounted $TESTPOOL/$TESTCTR/$TESTFS1 [[ $? -eq 0 ]] && \ ! log_must $ZFS unmount $TESTPOOL/$TESTCTR/$TESTFS1 datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \ ! log_must $ZFS destroy -R $TESTPOOL/$TESTCTR/$TESTFS1 datasetexists $TESTPOOL/$TESTCTR && \ ! log_must $ZFS destroy -Rf $TESTPOOL/$TESTCTR [[ -e $TESTDIR1 ]] && \ ! log_must $RM -rf $TESTDIR1 > /dev/null 2>&1 default_cleanup } # --- 442,461 ---- reexport_pool fi ismounted $TESTPOOL/$TESTCTR/$TESTFS1 [[ $? -eq 0 ]] && \ ! log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1 datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \ ! log_must zfs destroy -R $TESTPOOL/$TESTCTR/$TESTFS1 datasetexists $TESTPOOL/$TESTCTR && \ ! log_must zfs destroy -Rf $TESTPOOL/$TESTCTR [[ -e $TESTDIR1 ]] && \ ! log_must rm -rf $TESTDIR1 > /dev/null 2>&1 default_cleanup } #
*** 482,494 **** mtpt=$(get_prop mountpoint $snap) (($? != 0)) && \ log_fail "get_prop mountpoint $snap failed." fi ! log_must $ZFS destroy $snap [[ $mtpt != "" && -d $mtpt ]] && \ ! log_must $RM -rf $mtpt } # # Common function used to cleanup clone. # --- 482,494 ---- mtpt=$(get_prop mountpoint $snap) (($? != 0)) && \ log_fail "get_prop mountpoint $snap failed." fi ! log_must zfs destroy $snap [[ $mtpt != "" && -d $mtpt ]] && \ ! log_must rm -rf $mtpt } # # Common function used to cleanup clone. #
*** 508,529 **** mtpt=$(get_prop mountpoint $clone) (($? != 0)) && \ log_fail "get_prop mountpoint $clone failed." fi ! log_must $ZFS destroy $clone [[ $mtpt != "" && -d $mtpt ]] && \ ! log_must $RM -rf $mtpt } # Return 0 if a snapshot exists; $? otherwise # # $1 - snapshot name function snapexists { ! $ZFS list -H -t snapshot "$1" > /dev/null 2>&1 return $? } # # Set a property to a certain value on a dataset. --- 508,529 ---- mtpt=$(get_prop mountpoint $clone) (($? != 0)) && \ log_fail "get_prop mountpoint $clone failed." fi ! log_must zfs destroy $clone [[ $mtpt != "" && -d $mtpt ]] && \ ! log_must rm -rf $mtpt } # Return 0 if a snapshot exists; $? otherwise # # $1 - snapshot name function snapexists { ! zfs list -H -t snapshot "$1" > /dev/null 2>&1 return $? } # # Set a property to a certain value on a dataset.
*** 544,554 **** if (($# < 3)); then log_note "$fn: Insufficient parameters (need 3, had $#)" return 1 fi typeset output= ! output=$($ZFS set $2=$3 $1 2>&1) typeset rv=$? if ((rv != 0)); then log_note "Setting property on $1 failed." log_note "property $2=$3" log_note "Return Code: $rv" --- 544,554 ---- if (($# < 3)); then log_note "$fn: Insufficient parameters (need 3, had $#)" return 1 fi typeset output= ! output=$(zfs set $2=$3 $1 2>&1) typeset rv=$? if ((rv != 0)); then log_note "Setting property on $1 failed." log_note "property $2=$3" log_note "Return Code: $rv"
*** 576,586 **** [[ -z $dataset ]] && return 1 typeset confset= typeset -i found=0 ! for confset in $($ZFS list); do if [[ $dataset = $confset ]]; then found=1 break fi done --- 576,586 ---- [[ -z $dataset ]] && return 1 typeset confset= typeset -i found=0 ! for confset in $(zfs list); do if [[ $dataset = $confset ]]; then found=1 break fi done
*** 639,662 **** typeset disk=$4 [[ -z $slicenum || -z $size || -z $disk ]] && \ log_fail "The slice, size or disk name is unspecified." typeset format_file=/var/tmp/format_in.$$ ! $ECHO "partition" >$format_file ! $ECHO "$slicenum" >> $format_file ! $ECHO "" >> $format_file ! $ECHO "" >> $format_file ! $ECHO "$start" >> $format_file ! $ECHO "$size" >> $format_file ! $ECHO "label" >> $format_file ! $ECHO "" >> $format_file ! $ECHO "q" >> $format_file ! $ECHO "q" >> $format_file ! $FORMAT -e -s -d $disk -f $format_file typeset ret_val=$? ! $RM -f $format_file [[ $ret_val -ne 0 ]] && \ log_fail "Unable to format $disk slice $slicenum to $size" return 0 } --- 639,662 ---- typeset disk=$4 [[ -z $slicenum || -z $size || -z $disk ]] && \ log_fail "The slice, size or disk name is unspecified." typeset format_file=/var/tmp/format_in.$$ ! echo "partition" >$format_file ! echo "$slicenum" >> $format_file ! echo "" >> $format_file ! echo "" >> $format_file ! echo "$start" >> $format_file ! echo "$size" >> $format_file ! echo "label" >> $format_file ! echo "" >> $format_file ! echo "q" >> $format_file ! echo "q" >> $format_file ! format -e -s -d $disk -f $format_file typeset ret_val=$? ! rm -f $format_file [[ $ret_val -ne 0 ]] && \ log_fail "Unable to format $disk slice $slicenum to $size" return 0 }
*** 674,693 **** disk=${disk#/dev/dsk/} disk=${disk#/dev/rdsk/} disk=${disk%s*} typeset -i ratio=0 ! ratio=$($PRTVTOC /dev/rdsk/${disk}s2 | \ ! $GREP "sectors\/cylinder" | \ ! $AWK '{print $2}') if ((ratio == 0)); then return fi ! typeset -i endcyl=$($PRTVTOC -h /dev/rdsk/${disk}s2 | ! $NAWK -v token="$slice" '{if ($1==token) print $6}') ((endcyl = (endcyl + 1) / ratio)) echo $endcyl } --- 674,693 ---- disk=${disk#/dev/dsk/} disk=${disk#/dev/rdsk/} disk=${disk%s*} typeset -i ratio=0 ! ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \ ! grep "sectors\/cylinder" | \ ! awk '{print $2}') if ((ratio == 0)); then return fi ! typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 | ! nawk -v token="$slice" '{if ($1==token) print $6}') ((endcyl = (endcyl + 1) / ratio)) echo $endcyl }
*** 717,727 **** done } # # This function continues to write to a filenum number of files into dirnum ! # number of directories until either $FILE_WRITE returns an error or the # maximum number of files per directory have been written. # # Usage: # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data] # --- 717,727 ---- done } # # This function continues to write to a filenum number of files into dirnum ! # number of directories until either file_write returns an error or the # maximum number of files per directory have been written. # # Usage: # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data] #
*** 753,779 **** typeset -i odirnum=1 typeset -i idirnum=0 typeset -i fn=0 typeset -i retval=0 ! log_must $MKDIR -p $destdir/$idirnum while (($odirnum > 0)); do if ((dirnum >= 0 && idirnum >= dirnum)); then odirnum=0 break fi ! $FILE_WRITE -o create -f $destdir/$idirnum/$TESTFILE.$fn \ -b $bytes -c $num_writes -d $data retval=$? if (($retval != 0)); then odirnum=0 break fi if (($fn >= $filenum)); then fn=0 ((idirnum = idirnum + 1)) ! log_must $MKDIR -p $destdir/$idirnum else ((fn = fn + 1)) fi done return $retval --- 753,779 ---- typeset -i odirnum=1 typeset -i idirnum=0 typeset -i fn=0 typeset -i retval=0 ! log_must mkdir -p $destdir/$idirnum while (($odirnum > 0)); do if ((dirnum >= 0 && idirnum >= dirnum)); then odirnum=0 break fi ! file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \ -b $bytes -c $num_writes -d $data retval=$? if (($retval != 0)); then odirnum=0 break fi if (($fn >= $filenum)); then fn=0 ((idirnum = idirnum + 1)) ! log_must mkdir -p $destdir/$idirnum else ((fn = fn + 1)) fi done return $retval
*** 789,806 **** { typeset prop_val typeset prop=$1 typeset dataset=$2 ! prop_val=$($ZFS get -pH -o value $prop $dataset 2>/dev/null) if [[ $? -ne 0 ]]; then log_note "Unable to get $prop property for dataset " \ "$dataset" return 1 fi ! $ECHO $prop_val return 0 } # # Simple function to get the specified property of pool. If unable to --- 789,806 ---- { typeset prop_val typeset prop=$1 typeset dataset=$2 ! prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null) if [[ $? -ne 0 ]]; then log_note "Unable to get $prop property for dataset " \ "$dataset" return 1 fi ! echo $prop_val return 0 } # # Simple function to get the specified property of pool. If unable to
*** 811,822 **** typeset prop_val typeset prop=$1 typeset pool=$2 if poolexists $pool ; then ! prop_val=$($ZPOOL get $prop $pool 2>/dev/null | $TAIL -1 | \ ! $AWK '{print $3}') if [[ $? -ne 0 ]]; then log_note "Unable to get $prop property for pool " \ "$pool" return 1 fi --- 811,822 ---- typeset prop_val typeset prop=$1 typeset pool=$2 if poolexists $pool ; then ! prop_val=$(zpool get $prop $pool 2>/dev/null | tail -1 | \ ! awk '{print $3}') if [[ $? -ne 0 ]]; then log_note "Unable to get $prop property for pool " \ "$pool" return 1 fi
*** 823,833 **** else log_note "Pool $pool not exists." return 1 fi ! $ECHO $prop_val return 0 } # Return 0 if a pool exists; $? otherwise # --- 823,833 ---- else log_note "Pool $pool not exists." return 1 fi ! echo $prop_val return 0 } # Return 0 if a pool exists; $? otherwise #
*** 840,850 **** if [[ -z $pool ]]; then log_note "No pool name given." return 1 fi ! $ZPOOL get name "$pool" > /dev/null 2>&1 return $? } # Return 0 if all the specified datasets exist; $? otherwise # --- 840,850 ---- if [[ -z $pool ]]; then log_note "No pool name given." return 1 fi ! zpool get name "$pool" > /dev/null 2>&1 return $? } # Return 0 if all the specified datasets exist; $? otherwise #
*** 855,865 **** log_note "No dataset name given." return 1 fi while (($# > 0)); do ! $ZFS get name $1 > /dev/null 2>&1 || \ return $? shift done return 0 --- 855,865 ---- log_note "No dataset name given." return 1 fi while (($# > 0)); do ! zfs get name $1 > /dev/null 2>&1 || \ return $? shift done return 0
*** 874,884 **** log_note "No dataset name given." return 1 fi while (($# > 0)); do ! $ZFS list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \ && return 1 shift done return 0 --- 874,884 ---- log_note "No dataset name given." return 1 fi while (($# > 0)); do ! zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \ && return 1 shift done return 0
*** 906,922 **** ;; esac fi fi ! for mtpt in `$SHARE | $AWK '{print $2}'` ; do if [[ $mtpt == $fs ]] ; then return 0 fi done ! typeset stat=$($SVCS -H -o STA nfs/server:default) if [[ $stat != "ON" ]]; then log_note "Current nfs/server status: $stat" fi return 1 --- 906,922 ---- ;; esac fi fi ! for mtpt in `share | awk '{print $2}'` ; do if [[ $mtpt == $fs ]] ; then return 0 fi done ! typeset stat=$(svcs -H -o STA nfs/server:default) if [[ $stat != "ON" ]]; then log_note "Current nfs/server status: $stat" fi return 1
*** 946,956 **** { typeset fs=$1 is_shared $fs if (($? == 0)); then ! log_must $ZFS unshare $fs fi return 0 } --- 946,956 ---- { typeset fs=$1 is_shared $fs if (($? == 0)); then ! log_must zfs unshare $fs fi return 0 }
*** 965,1019 **** log_note "Cannot trigger NFS server by sharing in LZ." return fi typeset nfs_fmri="svc:/network/nfs/server:default" ! if [[ $($SVCS -Ho STA $nfs_fmri) != "ON" ]]; then # # Only really sharing operation can enable NFS server # to online permanently. # typeset dummy=/tmp/dummy if [[ -d $dummy ]]; then ! log_must $RM -rf $dummy fi ! log_must $MKDIR $dummy ! log_must $SHARE $dummy # # Waiting for fmri's status to be the final status. # Otherwise, in transition, an asterisk (*) is appended for # instances, unshare will reverse status to 'DIS' again. # # Waiting for 1's at least. # ! log_must $SLEEP 1 timeout=10 ! while [[ timeout -ne 0 && $($SVCS -Ho STA $nfs_fmri) == *'*' ]] do ! log_must $SLEEP 1 ((timeout -= 1)) done ! log_must $UNSHARE $dummy ! log_must $RM -rf $dummy fi ! log_note "Current NFS status: '$($SVCS -Ho STA,FMRI $nfs_fmri)'" } # # To verify whether calling process is in global zone # # Return 0 if in global zone, 1 in non-global zone # function is_global_zone { ! typeset cur_zone=$($ZONENAME 2>/dev/null) if [[ $cur_zone != "global" ]]; then return 1 fi return 0 } --- 965,1019 ---- log_note "Cannot trigger NFS server by sharing in LZ." return fi typeset nfs_fmri="svc:/network/nfs/server:default" ! if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then # # Only really sharing operation can enable NFS server # to online permanently. # typeset dummy=/tmp/dummy if [[ -d $dummy ]]; then ! log_must rm -rf $dummy fi ! log_must mkdir $dummy ! log_must share $dummy # # Waiting for fmri's status to be the final status. # Otherwise, in transition, an asterisk (*) is appended for # instances, unshare will reverse status to 'DIS' again. # # Waiting for 1's at least. # ! log_must sleep 1 timeout=10 ! while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]] do ! log_must sleep 1 ((timeout -= 1)) done ! log_must unshare $dummy ! log_must rm -rf $dummy fi ! log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'" } # # To verify whether calling process is in global zone # # Return 0 if in global zone, 1 in non-global zone # function is_global_zone { ! typeset cur_zone=$(zonename 2>/dev/null) if [[ $cur_zone != "global" ]]; then return 1 fi return 0 }
*** 1081,1092 **** if poolexists $pool ; then destroy_pool $pool fi if is_global_zone ; then ! [[ -d /$pool ]] && $RM -rf /$pool ! log_must $ZPOOL create -f $pool $@ fi return 0 } --- 1081,1092 ---- if poolexists $pool ; then destroy_pool $pool fi if is_global_zone ; then ! [[ -d /$pool ]] && rm -rf /$pool ! log_must zpool create -f $pool $@ fi return 0 }
*** 1115,1134 **** # times allowing failures before requiring the destroy # to succeed. typeset -i wait_time=10 ret=1 count=0 must="" while [[ $ret -ne 0 ]]; do ! $must $ZPOOL destroy -f $pool ret=$? [[ $ret -eq 0 ]] && break log_note "zpool destroy failed with $ret" [[ count++ -ge 7 ]] && must=log_must ! $SLEEP $wait_time done [[ -d $mtpt ]] && \ ! log_must $RM -rf $mtpt else log_note "Pool does not exist. ($pool)" return 1 fi fi --- 1115,1134 ---- # times allowing failures before requiring the destroy # to succeed. typeset -i wait_time=10 ret=1 count=0 must="" while [[ $ret -ne 0 ]]; do ! $must zpool destroy -f $pool ret=$? [[ $ret -eq 0 ]] && break log_note "zpool destroy failed with $ret" [[ count++ -ge 7 ]] && must=log_must ! sleep $wait_time done [[ -d $mtpt ]] && \ ! log_must rm -rf $mtpt else log_note "Pool does not exist. ($pool)" return 1 fi fi
*** 1155,1254 **** typeset -i cntctr=5 typeset -i i=0 # Create pool and 5 container within it # ! [[ -d /$pool_name ]] && $RM -rf /$pool_name ! log_must $ZPOOL create -f $pool_name $DISKS while ((i < cntctr)); do ! log_must $ZFS create $pool_name/$prefix_ctr$i ((i += 1)) done # create a zvol ! log_must $ZFS create -V 1g $pool_name/zone_zvol # # If current system support slog, add slog device for pool # if verify_slog_support ; then typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2" ! log_must $MKFILE $MINVDEVSIZE $sdevs ! log_must $ZPOOL add $pool_name log mirror $sdevs fi # this isn't supported just yet. # Create a filesystem. In order to add this to # the zone, it must have it's mountpoint set to 'legacy' ! # log_must $ZFS create $pool_name/zfs_filesystem ! # log_must $ZFS set mountpoint=legacy $pool_name/zfs_filesystem [[ -d $zone_root ]] && \ ! log_must $RM -rf $zone_root/$zone_name [[ ! -d $zone_root ]] && \ ! log_must $MKDIR -p -m 0700 $zone_root/$zone_name # Create zone configure file and configure the zone # typeset zone_conf=/tmp/zone_conf.$$ ! $ECHO "create" > $zone_conf ! $ECHO "set zonepath=$zone_root/$zone_name" >> $zone_conf ! $ECHO "set autoboot=true" >> $zone_conf i=0 while ((i < cntctr)); do ! $ECHO "add dataset" >> $zone_conf ! $ECHO "set name=$pool_name/$prefix_ctr$i" >> \ $zone_conf ! $ECHO "end" >> $zone_conf ((i += 1)) done # add our zvol to the zone ! $ECHO "add device" >> $zone_conf ! $ECHO "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf ! $ECHO "end" >> $zone_conf # add a corresponding zvol rdsk to the zone ! $ECHO "add device" >> $zone_conf ! $ECHO "set match=/dev/zvol/rdsk/$pool_name/zone_zvol" >> $zone_conf ! $ECHO "end" >> $zone_conf # once it's supported, we'll add our filesystem to the zone ! # $ECHO "add fs" >> $zone_conf ! # $ECHO "set type=zfs" >> $zone_conf ! # $ECHO "set special=$pool_name/zfs_filesystem" >> $zone_conf ! # $ECHO "set dir=/export/zfs_filesystem" >> $zone_conf ! # $ECHO "end" >> $zone_conf ! $ECHO "verify" >> $zone_conf ! $ECHO "commit" >> $zone_conf ! log_must $ZONECFG -z $zone_name -f $zone_conf ! log_must $RM -f $zone_conf # Install the zone ! $ZONEADM -z $zone_name install if (($? == 0)); then ! log_note "SUCCESS: $ZONEADM -z $zone_name install" else ! log_fail "FAIL: $ZONEADM -z $zone_name install" fi # Install sysidcfg file # typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg ! $ECHO "system_locale=C" > $sysidcfg ! $ECHO "terminal=dtterm" >> $sysidcfg ! $ECHO "network_interface=primary {" >> $sysidcfg ! $ECHO "hostname=$zone_name" >> $sysidcfg ! $ECHO "}" >> $sysidcfg ! $ECHO "name_service=NONE" >> $sysidcfg ! $ECHO "root_password=mo791xfZ/SFiw" >> $sysidcfg ! $ECHO "security_policy=NONE" >> $sysidcfg ! $ECHO "timezone=US/Eastern" >> $sysidcfg # Boot this zone ! log_must $ZONEADM -z $zone_name boot } # # Reexport TESTPOOL & TESTPOOL(1-4) # --- 1155,1254 ---- typeset -i cntctr=5 typeset -i i=0 # Create pool and 5 container within it # ! [[ -d /$pool_name ]] && rm -rf /$pool_name ! log_must zpool create -f $pool_name $DISKS while ((i < cntctr)); do ! log_must zfs create $pool_name/$prefix_ctr$i ((i += 1)) done # create a zvol ! log_must zfs create -V 1g $pool_name/zone_zvol # # If current system support slog, add slog device for pool # if verify_slog_support ; then typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2" ! log_must mkfile $MINVDEVSIZE $sdevs ! log_must zpool add $pool_name log mirror $sdevs fi # this isn't supported just yet. # Create a filesystem. In order to add this to # the zone, it must have it's mountpoint set to 'legacy' ! # log_must zfs create $pool_name/zfs_filesystem ! # log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem [[ -d $zone_root ]] && \ ! log_must rm -rf $zone_root/$zone_name [[ ! -d $zone_root ]] && \ ! log_must mkdir -p -m 0700 $zone_root/$zone_name # Create zone configure file and configure the zone # typeset zone_conf=/tmp/zone_conf.$$ ! echo "create" > $zone_conf ! echo "set zonepath=$zone_root/$zone_name" >> $zone_conf ! echo "set autoboot=true" >> $zone_conf i=0 while ((i < cntctr)); do ! echo "add dataset" >> $zone_conf ! echo "set name=$pool_name/$prefix_ctr$i" >> \ $zone_conf ! echo "end" >> $zone_conf ((i += 1)) done # add our zvol to the zone ! echo "add device" >> $zone_conf ! echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf ! echo "end" >> $zone_conf # add a corresponding zvol rdsk to the zone ! echo "add device" >> $zone_conf ! echo "set match=/dev/zvol/rdsk/$pool_name/zone_zvol" >> $zone_conf ! echo "end" >> $zone_conf # once it's supported, we'll add our filesystem to the zone ! # echo "add fs" >> $zone_conf ! # echo "set type=zfs" >> $zone_conf ! # echo "set special=$pool_name/zfs_filesystem" >> $zone_conf ! # echo "set dir=/export/zfs_filesystem" >> $zone_conf ! # echo "end" >> $zone_conf ! echo "verify" >> $zone_conf ! echo "commit" >> $zone_conf ! log_must zonecfg -z $zone_name -f $zone_conf ! log_must rm -f $zone_conf # Install the zone ! zoneadm -z $zone_name install if (($? == 0)); then ! log_note "SUCCESS: zoneadm -z $zone_name install" else ! log_fail "FAIL: zoneadm -z $zone_name install" fi # Install sysidcfg file # typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg ! echo "system_locale=C" > $sysidcfg ! echo "terminal=dtterm" >> $sysidcfg ! echo "network_interface=primary {" >> $sysidcfg ! echo "hostname=$zone_name" >> $sysidcfg ! echo "}" >> $sysidcfg ! echo "name_service=NONE" >> $sysidcfg ! echo "root_password=mo791xfZ/SFiw" >> $sysidcfg ! echo "security_policy=NONE" >> $sysidcfg ! echo "timezone=US/Eastern" >> $sysidcfg # Boot this zone ! log_must zoneadm -z $zone_name boot } # # Reexport TESTPOOL & TESTPOOL(1-4) #
*** 1259,1274 **** while ((i < cntctr)); do if ((i == 0)); then TESTPOOL=$ZONE_POOL/$ZONE_CTR$i if ! ismounted $TESTPOOL; then ! log_must $ZFS mount $TESTPOOL fi else eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i if eval ! ismounted \$TESTPOOL$i; then ! log_must eval $ZFS mount \$TESTPOOL$i fi fi ((i += 1)) done } --- 1259,1274 ---- while ((i < cntctr)); do if ((i == 0)); then TESTPOOL=$ZONE_POOL/$ZONE_CTR$i if ! ismounted $TESTPOOL; then ! log_must zfs mount $TESTPOOL fi else eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i if eval ! ismounted \$TESTPOOL$i; then ! log_must eval zfs mount \$TESTPOOL$i fi fi ((i += 1)) done }
*** 1282,1292 **** { typeset pool=$1 typeset disk=${2#/dev/dsk/} typeset state=$3 ! $ZPOOL status -v $pool | grep "$disk" \ | grep -i "$state" > /dev/null 2>&1 return $? } --- 1282,1292 ---- { typeset pool=$1 typeset disk=${2#/dev/dsk/} typeset state=$3 ! zpool status -v $pool | grep "$disk" \ | grep -i "$state" > /dev/null 2>&1 return $? }
*** 1308,1318 **** if [[ -z $fs || -z $snap ]]; then log_fail "Error name of snapshot '$dataset'." fi ! $ECHO $(get_prop mountpoint $fs)/.zfs/snapshot/$snap } # # Given a pool and file system, this function will verify the file system # using the zdb internal tool. Note that the pool is exported and imported --- 1308,1318 ---- if [[ -z $fs || -z $snap ]]; then log_fail "Error name of snapshot '$dataset'." fi ! echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap } # # Given a pool and file system, this function will verify the file system # using the zdb internal tool. Note that the pool is exported and imported
*** 1327,1371 **** shift shift typeset dirs=$@ typeset search_path="" ! log_note "Calling $ZDB to verify filesystem '$filesys'" ! $ZFS unmount -a > /dev/null 2>&1 ! log_must $ZPOOL export $pool if [[ -n $dirs ]] ; then for dir in $dirs ; do search_path="$search_path -d $dir" done fi ! log_must $ZPOOL import $search_path $pool ! $ZDB -cudi $filesys > $zdbout 2>&1 if [[ $? != 0 ]]; then ! log_note "Output: $ZDB -cudi $filesys" ! $CAT $zdbout ! log_fail "$ZDB detected errors with: '$filesys'" fi ! log_must $ZFS mount -a ! log_must $RM -rf $zdbout } # # Given a pool, and this function list all disks in the pool # function get_disklist # pool { typeset disklist="" ! disklist=$($ZPOOL iostat -v $1 | $NAWK '(NR >4) {print $1}' | \ ! $GREP -v "\-\-\-\-\-" | \ ! $EGREP -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$") ! $ECHO $disklist } # /** # This function kills a given list of processes after a time period. We use # this in the stress tests instead of STF_TIMEOUT so that we can have processes --- 1327,1371 ---- shift shift typeset dirs=$@ typeset search_path="" ! log_note "Calling zdb to verify filesystem '$filesys'" ! zfs unmount -a > /dev/null 2>&1 ! log_must zpool export $pool if [[ -n $dirs ]] ; then for dir in $dirs ; do search_path="$search_path -d $dir" done fi ! log_must zpool import $search_path $pool ! zdb -cudi $filesys > $zdbout 2>&1 if [[ $? != 0 ]]; then ! log_note "Output: zdb -cudi $filesys" ! cat $zdbout ! log_fail "zdb detected errors with: '$filesys'" fi ! log_must zfs mount -a ! log_must rm -rf $zdbout } # # Given a pool, and this function list all disks in the pool # function get_disklist # pool { typeset disklist="" ! disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \ ! grep -v "\-\-\-\-\-" | \ ! egrep -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$") ! echo $disklist } # /** # This function kills a given list of processes after a time period. We use # this in the stress tests instead of STF_TIMEOUT so that we can have processes
*** 1382,1399 **** shift typeset cpids="$@" log_note "Waiting for child processes($cpids). " \ "It could last dozens of minutes, please be patient ..." ! log_must $SLEEP $TIMEOUT log_note "Killing child processes after ${TIMEOUT} stress timeout." typeset pid for pid in $cpids; do ! $PS -p $pid > /dev/null 2>&1 if (($? == 0)); then ! log_must $KILL -USR1 $pid fi done } # --- 1382,1399 ---- shift typeset cpids="$@" log_note "Waiting for child processes($cpids). " \ "It could last dozens of minutes, please be patient ..." ! log_must sleep $TIMEOUT log_note "Killing child processes after ${TIMEOUT} stress timeout." typeset pid for pid in $cpids; do ! ps -p $pid > /dev/null 2>&1 if (($? == 0)); then ! log_must kill -USR1 $pid fi done } #
*** 1463,1475 **** { typeset pool=$1 typeset token=$2 typeset keyword=$3 ! $ZPOOL status -v "$pool" 2>/dev/null | $NAWK -v token="$token:" ' ($1==token) {print $0}' \ ! | $GREP -i "$keyword" > /dev/null 2>&1 return $? } # --- 1463,1475 ---- { typeset pool=$1 typeset token=$2 typeset keyword=$3 ! zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" ' ($1==token) {print $0}' \ ! | grep -i "$keyword" > /dev/null 2>&1 return $? } #
*** 1537,1571 **** # function verify_rsh_connect #rhost, username { typeset rhost=$1 typeset username=$2 ! typeset rsh_cmd="$RSH -n" typeset cur_user= ! $GETENT hosts $rhost >/dev/null 2>&1 if (($? != 0)); then log_note "$rhost cannot be found from" \ "administrative database." return 1 fi ! $PING $rhost 3 >/dev/null 2>&1 if (($? != 0)); then log_note "$rhost is not reachable." return 1 fi if ((${#username} != 0)); then rsh_cmd="$rsh_cmd -l $username" cur_user="given user \"$username\"" else ! cur_user="current user \"`$LOGNAME`\"" fi ! if ! $rsh_cmd $rhost $TRUE; then ! log_note "$RSH to $rhost is not accessible" \ "with $cur_user." return 1 fi return 0 --- 1537,1571 ---- # function verify_rsh_connect #rhost, username { typeset rhost=$1 typeset username=$2 ! typeset rsh_cmd="rsh -n" typeset cur_user= ! getent hosts $rhost >/dev/null 2>&1 if (($? != 0)); then log_note "$rhost cannot be found from" \ "administrative database." return 1 fi ! ping $rhost 3 >/dev/null 2>&1 if (($? != 0)); then log_note "$rhost is not reachable." return 1 fi if ((${#username} != 0)); then rsh_cmd="$rsh_cmd -l $username" cur_user="given user \"$username\"" else ! cur_user="current user \"`logname`\"" fi ! if ! $rsh_cmd $rhost true; then ! log_note "rsh to $rhost is not accessible" \ "with $cur_user." return 1 fi return 0
*** 1618,1647 **** shift; shift cmd_str="$@" err_file=/tmp/${rhost}.$$.err if ((${#ruser} == 0)); then ! rsh_str="$RSH -n" else ! rsh_str="$RSH -n -l $ruser" fi $rsh_str $rhost /usr/bin/ksh -c "'$cmd_str; \ print -u 2 \"status=\$?\"'" \ >/dev/null 2>$err_file ret=$? if (($ret != 0)); then ! $CAT $err_file ! $RM -f $std_file $err_file ! log_fail "$RSH itself failed with exit code $ret..." fi ! ret=$($GREP -v 'print -u 2' $err_file | $GREP 'status=' | \ ! $CUT -d= -f2) ! (($ret != 0)) && $CAT $err_file >&2 ! $RM -f $err_file >/dev/null 2>&1 return $ret } # # Get the SUNWstc-fs-zfs package installation path in a remote host --- 1618,1647 ---- shift; shift cmd_str="$@" err_file=/tmp/${rhost}.$$.err if ((${#ruser} == 0)); then ! rsh_str="rsh -n" else ! rsh_str="rsh -n -l $ruser" fi $rsh_str $rhost /usr/bin/ksh -c "'$cmd_str; \ print -u 2 \"status=\$?\"'" \ >/dev/null 2>$err_file ret=$? if (($ret != 0)); then ! cat $err_file ! rm -f $std_file $err_file ! log_fail "rsh itself failed with exit code $ret..." fi ! ret=$(grep -v 'print -u 2' $err_file | grep 'status=' | \ ! cut -d= -f2) ! (($ret != 0)) && cat $err_file >&2 ! rm -f $err_file >/dev/null 2>&1 return $ret } # # Get the SUNWstc-fs-zfs package installation path in a remote host
*** 1650,1663 **** function get_remote_pkgpath { typeset rhost=$1 typeset pkgpath="" ! pkgpath=$($RSH -n $rhost "$PKGINFO -l SUNWstc-fs-zfs | $GREP BASEDIR: |\ ! $CUT -d: -f2") ! $ECHO $pkgpath } #/** # A function to find and locate free disks on a system or from given # disks as the parameter. It works by locating disks that are in use --- 1650,1663 ---- function get_remote_pkgpath { typeset rhost=$1 typeset pkgpath="" ! pkgpath=$(rsh -n $rhost "pkginfo -l SUNWstc-fs-zfs | grep BASEDIR: |\ ! cut -d: -f2") ! echo $pkgpath } #/** # A function to find and locate free disks on a system or from given # disks as the parameter. It works by locating disks that are in use
*** 1672,1690 **** { sfi=/tmp/swaplist.$$ dmpi=/tmp/dumpdev.$$ max_finddisksnum=${MAX_FINDDISKSNUM:-6} ! $SWAP -l > $sfi ! $DUMPADM > $dmpi 2>/dev/null # write an awk script that can process the output of format # to produce a list of disks we know about. Note that we have # to escape "$2" so that the shell doesn't interpret it while # we're creating the awk script. # ------------------- ! $CAT > /tmp/find_disks.awk <<EOF #!/bin/nawk -f BEGIN { FS="."; } /^Specify disk/{ searchdisks=0; --- 1672,1690 ---- { sfi=/tmp/swaplist.$$ dmpi=/tmp/dumpdev.$$ max_finddisksnum=${MAX_FINDDISKSNUM:-6} ! swap -l > $sfi ! dumpadm > $dmpi 2>/dev/null # write an awk script that can process the output of format # to produce a list of disks we know about. Note that we have # to escape "$2" so that the shell doesn't interpret it while # we're creating the awk script. # ------------------- ! cat > /tmp/find_disks.awk <<EOF #!/bin/nawk -f BEGIN { FS="."; } /^Specify disk/{ searchdisks=0;
*** 1701,1733 **** searchdisks=1; } EOF #--------------------- ! $CHMOD 755 /tmp/find_disks.awk ! disks=${@:-$($ECHO "" | $FORMAT -e 2>/dev/null | /tmp/find_disks.awk)} ! $RM /tmp/find_disks.awk unused="" for disk in $disks; do # Check for mounted ! $GREP "${disk}[sp]" /etc/mnttab >/dev/null (($? == 0)) && continue # Check for swap ! $GREP "${disk}[sp]" $sfi >/dev/null (($? == 0)) && continue # check for dump device ! $GREP "${disk}[sp]" $dmpi >/dev/null (($? == 0)) && continue # check to see if this disk hasn't been explicitly excluded # by a user-set environment variable ! $ECHO "${ZFS_HOST_DEVICES_IGNORE}" | $GREP "${disk}" > /dev/null (($? == 0)) && continue unused_candidates="$unused_candidates $disk" done ! $RM $sfi ! $RM $dmpi # now just check to see if those disks do actually exist # by looking for a device pointing to the first slice in # each case. limit the number to max_finddisksnum count=0 --- 1701,1733 ---- searchdisks=1; } EOF #--------------------- ! chmod 755 /tmp/find_disks.awk ! disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)} ! rm /tmp/find_disks.awk unused="" for disk in $disks; do # Check for mounted ! grep "${disk}[sp]" /etc/mnttab >/dev/null (($? == 0)) && continue # Check for swap ! grep "${disk}[sp]" $sfi >/dev/null (($? == 0)) && continue # check for dump device ! grep "${disk}[sp]" $dmpi >/dev/null (($? == 0)) && continue # check to see if this disk hasn't been explicitly excluded # by a user-set environment variable ! echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null (($? == 0)) && continue unused_candidates="$unused_candidates $disk" done ! rm $sfi ! rm $dmpi # now just check to see if those disks do actually exist # by looking for a device pointing to the first slice in # each case. limit the number to max_finddisksnum count=0
*** 1740,1750 **** fi fi done # finally, return our disk list ! $ECHO $unused } # # Add specified user to specified group # --- 1740,1750 ---- fi fi done # finally, return our disk list ! echo $unused } # # Add specified user to specified group #
*** 1760,1770 **** if ((${#gname} == 0 || ${#uname} == 0)); then log_fail "group name or user name are not defined." fi ! log_must $USERADD -g $gname -d $basedir/$uname -m $uname return 0 } # --- 1760,1770 ---- if ((${#gname} == 0 || ${#uname} == 0)); then log_fail "group name or user name are not defined." fi ! log_must useradd -g $gname -d $basedir/$uname -m $uname return 0 } #
*** 1780,1794 **** if ((${#user} == 0)); then log_fail "login name is necessary." fi ! if $ID $user > /dev/null 2>&1; then ! log_must $USERDEL $user fi ! [[ -d $basedir/$user ]] && $RM -fr $basedir/$user return 0 } # --- 1780,1794 ---- if ((${#user} == 0)); then log_fail "login name is necessary." fi ! if id $user > /dev/null 2>&1; then ! log_must userdel $user fi ! [[ -d $basedir/$user ]] && rm -fr $basedir/$user return 0 } #
*** 1805,1815 **** fi # Assign 100 as the base gid typeset -i gid=100 while true; do ! $GROUPADD -g $gid $group > /dev/null 2>&1 typeset -i ret=$? case $ret in 0) return 0 ;; # The gid is not unique 4) ((gid += 1)) ;; --- 1805,1815 ---- fi # Assign 100 as the base gid typeset -i gid=100 while true; do ! groupadd -g $gid $group > /dev/null 2>&1 typeset -i ret=$? case $ret in 0) return 0 ;; # The gid is not unique 4) ((gid += 1)) ;;
*** 1828,1844 **** typeset grp=$1 if ((${#grp} == 0)); then log_fail "group name is necessary." fi ! $GROUPMOD -n $grp $grp > /dev/null 2>&1 typeset -i ret=$? case $ret in # Group does not exist. 6) return 0 ;; # Name already exists as a group name ! 9) log_must $GROUPDEL $grp ;; *) return 1 ;; esac return 0 } --- 1828,1844 ---- typeset grp=$1 if ((${#grp} == 0)); then log_fail "group name is necessary." fi ! groupmod -n $grp $grp > /dev/null 2>&1 typeset -i ret=$? case $ret in # Group does not exist. 6) return 0 ;; # Name already exists as a group name ! 9) log_must groupdel $grp ;; *) return 1 ;; esac return 0 }
*** 1856,1888 **** # We check that by deleting the $1 pool, we're not # going to pull the rug out from other pools. Do this # by looking at all other pools, ensuring that they # aren't built from files or zvols contained in this pool. ! for pool in $($ZPOOL list -H -o name) do ALTMOUNTPOOL="" # this is a list of the top-level directories in each of the # files that make up the path to the files the pool is based on ! FILEPOOL=$($ZPOOL status -v $pool | $GREP /$1/ | \ ! $AWK '{print $1}') # this is a list of the zvols that make up the pool ! ZVOLPOOL=$($ZPOOL status -v $pool | $GREP "/dev/zvol/dsk/$1$" \ ! | $AWK '{print $1}') # also want to determine if it's a file-based pool using an # alternate mountpoint... ! POOL_FILE_DIRS=$($ZPOOL status -v $pool | \ ! $GREP / | $AWK '{print $1}' | \ ! $AWK -F/ '{print $2}' | $GREP -v "dev") for pooldir in $POOL_FILE_DIRS do ! OUTPUT=$($ZFS list -H -r -o mountpoint $1 | \ ! $GREP "${pooldir}$" | $AWK '{print $1}') ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}" done --- 1856,1888 ---- # We check that by deleting the $1 pool, we're not # going to pull the rug out from other pools. Do this # by looking at all other pools, ensuring that they # aren't built from files or zvols contained in this pool. ! for pool in $(zpool list -H -o name) do ALTMOUNTPOOL="" # this is a list of the top-level directories in each of the # files that make up the path to the files the pool is based on ! FILEPOOL=$(zpool status -v $pool | grep /$1/ | \ ! awk '{print $1}') # this is a list of the zvols that make up the pool ! ZVOLPOOL=$(zpool status -v $pool | grep "/dev/zvol/dsk/$1$" \ ! | awk '{print $1}') # also want to determine if it's a file-based pool using an # alternate mountpoint... ! POOL_FILE_DIRS=$(zpool status -v $pool | \ ! grep / | awk '{print $1}' | \ ! awk -F/ '{print $2}' | grep -v "dev") for pooldir in $POOL_FILE_DIRS do ! OUTPUT=$(zfs list -H -r -o mountpoint $1 | \ ! grep "${pooldir}$" | awk '{print $1}') ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}" done
*** 1928,1942 **** COMPRESS_OPTS="on lzjb" elif [[ $1 == "zfs_set" ]] ; then COMPRESS_OPTS="on off lzjb" fi typeset valid_opts="$COMPRESS_OPTS" ! $ZFS get 2>&1 | $GREP gzip >/dev/null 2>&1 if [[ $? -eq 0 ]]; then valid_opts="$valid_opts $GZIP_OPTS" fi ! $ECHO "$valid_opts" } # # Verify zfs operation with -p option work as expected # $1 operation, value could be create, clone or rename --- 1928,1942 ---- COMPRESS_OPTS="on lzjb" elif [[ $1 == "zfs_set" ]] ; then COMPRESS_OPTS="on off lzjb" fi typeset valid_opts="$COMPRESS_OPTS" ! zfs get 2>&1 | grep gzip >/dev/null 2>&1 if [[ $? -eq 0 ]]; then valid_opts="$valid_opts $GZIP_OPTS" fi ! echo "$valid_opts" } # # Verify zfs operation with -p option work as expected # $1 operation, value could be create, clone or rename
*** 1985,2010 **** ;; esac # make sure the upper level filesystem does not exist if datasetexists ${newdataset%/*} ; then ! log_must $ZFS destroy -rRf ${newdataset%/*} fi # without -p option, operation will fail ! log_mustnot $ZFS $ops $dataset $newdataset log_mustnot datasetexists $newdataset ${newdataset%/*} # with -p option, operation should succeed ! log_must $ZFS $ops -p $dataset $newdataset if ! datasetexists $newdataset ; then log_fail "-p option does not work for $ops" fi # when $ops is create or clone, redo the operation still return zero if [[ $ops != "rename" ]]; then ! log_must $ZFS $ops -p $dataset $newdataset fi return 0 } --- 1985,2010 ---- ;; esac # make sure the upper level filesystem does not exist if datasetexists ${newdataset%/*} ; then ! log_must zfs destroy -rRf ${newdataset%/*} fi # without -p option, operation will fail ! log_mustnot zfs $ops $dataset $newdataset log_mustnot datasetexists $newdataset ${newdataset%/*} # with -p option, operation should succeed ! log_must zfs $ops -p $dataset $newdataset if ! datasetexists $newdataset ; then log_fail "-p option does not work for $ops" fi # when $ops is create or clone, redo the operation still return zero if [[ $ops != "rename" ]]; then ! log_must zfs $ops -p $dataset $newdataset fi return 0 }
*** 2020,2035 **** typeset alt_root if ! poolexists "$pool" ; then return 1 fi ! alt_root=$($ZPOOL list -H $pool | $AWK '{print $NF}') if [[ $alt_root == "-" ]]; then ! value=$($ZDB -C $pool | $GREP "$config:" | $AWK -F: \ '{print $2}') else ! value=$($ZDB -e $pool | $GREP "$config:" | $AWK -F: \ '{print $2}') fi if [[ -n $value ]] ; then value=${value#'} value=${value%'} --- 2020,2035 ---- typeset alt_root if ! poolexists "$pool" ; then return 1 fi ! alt_root=$(zpool list -H $pool | awk '{print $NF}') if [[ $alt_root == "-" ]]; then ! value=$(zdb -C $pool | grep "$config:" | awk -F: \ '{print $2}') else ! value=$(zdb -e $pool | grep "$config:" | awk -F: \ '{print $2}') fi if [[ -n $value ]] ; then value=${value#'} value=${value%'}
*** 2052,2063 **** typeset str="$@" typeset -i ind ((ind = RANDOM % cnt + 1)) ! typeset ret=$($ECHO "$str" | $CUT -f $ind -d ' ') ! $ECHO $ret } # # Random select one of item from arguments which include NONE string # --- 2052,2063 ---- typeset str="$@" typeset -i ind ((ind = RANDOM % cnt + 1)) ! typeset ret=$(echo "$str" | cut -f $ind -d ' ') ! echo $ret } # # Random select one of item from arguments which include NONE string #
*** 2085,2102 **** typeset dir=/tmp/disk.$$ typeset pool=foo.$$ typeset vdev=$dir/a typeset sdev=$dir/b ! $MKDIR -p $dir ! $MKFILE $MINVDEVSIZE $vdev $sdev typeset -i ret=0 ! if ! $ZPOOL create -n $pool $vdev log $sdev > /dev/null 2>&1; then ret=1 fi ! $RM -r $dir return $ret } # --- 2085,2102 ---- typeset dir=/tmp/disk.$$ typeset pool=foo.$$ typeset vdev=$dir/a typeset sdev=$dir/b ! mkdir -p $dir ! mkfile $MINVDEVSIZE $vdev $sdev typeset -i ret=0 ! if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then ret=1 fi ! rm -r $dir return $ret } #
*** 2121,2131 **** l_name="${l_name}$basestr" ((iter -= 1)) done ! $ECHO $l_name } # # Get cksum tuple of dataset # $1 dataset name --- 2121,2131 ---- l_name="${l_name}$basestr" ((iter -= 1)) done ! echo $l_name } # # Get cksum tuple of dataset # $1 dataset name
*** 2136,2160 **** # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744 function datasetcksum { typeset cksum ! $SYNC ! cksum=$($ZDB -vvv $1 | $GREP "^Dataset $1 \[" | $GREP "cksum" \ ! | $AWK -F= '{print $7}') ! $ECHO $cksum } # # Get cksum of file # #1 file path # function checksum { typeset cksum ! cksum=$($CKSUM $1 | $AWK '{print $1}') ! $ECHO $cksum } # # Get the given disk/slice state from the specific field of the pool # --- 2136,2160 ---- # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744 function datasetcksum { typeset cksum ! sync ! cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \ ! | awk -F= '{print $7}') ! echo $cksum } # # Get cksum of file # #1 file path # function checksum { typeset cksum ! cksum=$(cksum $1 | awk '{print $1}') ! echo $cksum } # # Get the given disk/slice state from the specific field of the pool #
*** 2162,2173 **** { typeset pool=$1 typeset disk=${2#/dev/dsk/} typeset field=${3:-$pool} ! state=$($ZPOOL status -v "$pool" 2>/dev/null | \ ! $NAWK -v device=$disk -v pool=$pool -v field=$field \ 'BEGIN {startconfig=0; startfield=0; } /config:/ {startconfig=1} (startconfig==1) && ($1==field) {startfield=1; next;} (startfield==1) && ($1==device) {print $2; exit;} (startfield==1) && --- 2162,2173 ---- { typeset pool=$1 typeset disk=${2#/dev/dsk/} typeset field=${3:-$pool} ! state=$(zpool status -v "$pool" 2>/dev/null | \ ! nawk -v device=$disk -v pool=$pool -v field=$field \ 'BEGIN {startconfig=0; startfield=0; } /config:/ {startconfig=1} (startconfig==1) && ($1==field) {startfield=1; next;} (startfield==1) && ($1==device) {print $2; exit;} (startfield==1) &&
*** 2191,2201 **** # # $ df -n / # / : ufs # ! $DF -n $dir | $AWK '{print $3}' } # # Given a disk, label it to VTOC regardless what label was on the disk # $1 disk --- 2191,2201 ---- # # $ df -n / # / : ufs # ! df -n $dir | awk '{print $3}' } # # Given a disk, label it to VTOC regardless what label was on the disk # $1 disk
*** 2205,2244 **** typeset disk=$1 if [[ -z $disk ]]; then log_fail "The disk name is unspecified." fi typeset label_file=/var/tmp/labelvtoc.$$ ! typeset arch=$($UNAME -p) if [[ $arch == "i386" ]]; then ! $ECHO "label" > $label_file ! $ECHO "0" >> $label_file ! $ECHO "" >> $label_file ! $ECHO "q" >> $label_file ! $ECHO "q" >> $label_file ! $FDISK -B $disk >/dev/null 2>&1 # wait a while for fdisk finishes ! $SLEEP 60 elif [[ $arch == "sparc" ]]; then ! $ECHO "label" > $label_file ! $ECHO "0" >> $label_file ! $ECHO "" >> $label_file ! $ECHO "" >> $label_file ! $ECHO "" >> $label_file ! $ECHO "q" >> $label_file else log_fail "unknown arch type" fi ! $FORMAT -e -s -d $disk -f $label_file typeset -i ret_val=$? ! $RM -f $label_file # # wait the format to finish # ! $SLEEP 60 if ((ret_val != 0)); then log_fail "unable to label $disk as VTOC." fi return 0 --- 2205,2244 ---- typeset disk=$1 if [[ -z $disk ]]; then log_fail "The disk name is unspecified." fi typeset label_file=/var/tmp/labelvtoc.$$ ! typeset arch=$(uname -p) if [[ $arch == "i386" ]]; then ! echo "label" > $label_file ! echo "0" >> $label_file ! echo "" >> $label_file ! echo "q" >> $label_file ! echo "q" >> $label_file ! fdisk -B $disk >/dev/null 2>&1 # wait a while for fdisk finishes ! sleep 60 elif [[ $arch == "sparc" ]]; then ! echo "label" > $label_file ! echo "0" >> $label_file ! echo "" >> $label_file ! echo "" >> $label_file ! echo "" >> $label_file ! echo "q" >> $label_file else log_fail "unknown arch type" fi ! format -e -s -d $disk -f $label_file typeset -i ret_val=$? ! rm -f $label_file # # wait the format to finish # ! sleep 60 if ((ret_val != 0)); then log_fail "unable to label $disk as VTOC." fi return 0
*** 2248,2258 **** # check if the system was installed as zfsroot or not # return: 0 ture, otherwise false # function is_zfsroot { ! $DF -n / | $GREP zfs > /dev/null 2>&1 return $? } # # get the root filesystem name if it's zfsroot system. --- 2248,2258 ---- # check if the system was installed as zfsroot or not # return: 0 ture, otherwise false # function is_zfsroot { ! df -n / | grep zfs > /dev/null 2>&1 return $? } # # get the root filesystem name if it's zfsroot system.
*** 2259,2276 **** # # return: root filesystem name function get_rootfs { typeset rootfs="" ! rootfs=$($AWK '{if ($2 == "/" && $3 == "zfs") print $1}' \ /etc/mnttab) if [[ -z "$rootfs" ]]; then log_fail "Can not get rootfs" fi ! $ZFS list $rootfs > /dev/null 2>&1 if (($? == 0)); then ! $ECHO $rootfs else log_fail "This is not a zfsroot system." fi } --- 2259,2276 ---- # # return: root filesystem name function get_rootfs { typeset rootfs="" ! rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \ /etc/mnttab) if [[ -z "$rootfs" ]]; then log_fail "Can not get rootfs" fi ! zfs list $rootfs > /dev/null 2>&1 if (($? == 0)); then ! echo $rootfs else log_fail "This is not a zfsroot system." fi }
*** 2281,2299 **** # function get_rootpool { typeset rootfs="" typeset rootpool="" ! rootfs=$($AWK '{if ($2 == "/" && $3 =="zfs") print $1}' \ /etc/mnttab) if [[ -z "$rootfs" ]]; then log_fail "Can not get rootpool" fi ! $ZFS list $rootfs > /dev/null 2>&1 if (($? == 0)); then ! rootpool=`$ECHO $rootfs | awk -F\/ '{print $1}'` ! $ECHO $rootpool else log_fail "This is not a zfsroot system." fi } --- 2281,2299 ---- # function get_rootpool { typeset rootfs="" typeset rootpool="" ! rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \ /etc/mnttab) if [[ -z "$rootfs" ]]; then log_fail "Can not get rootpool" fi ! zfs list $rootfs > /dev/null 2>&1 if (($? == 0)); then ! rootpool=`echo $rootfs | awk -F\/ '{print $1}'` ! echo $rootpool else log_fail "This is not a zfsroot system." fi }
*** 2303,2313 **** function is_physical_device #device { typeset device=${1#/dev/dsk/} device=${device#/dev/rdsk/} ! $ECHO $device | $EGREP "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1 return $? } # # Get the directory path of given device --- 2303,2313 ---- function is_physical_device #device { typeset device=${1#/dev/dsk/} device=${device#/dev/rdsk/} ! echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1 return $? } # # Get the directory path of given device
*** 2318,2330 **** if ! $(is_physical_device $device) ; then if [[ $device != "/" ]]; then device=${device%/*} fi ! $ECHO $device else ! $ECHO "/dev/dsk" fi } # # Get the package name --- 2318,2330 ---- if ! $(is_physical_device $device) ; then if [[ $device != "/" ]]; then device=${device%/*} fi ! echo $device else ! echo "/dev/dsk" fi } # # Get the package name
*** 2339,2349 **** # # Get the word numbers from a string separated by white space # function get_word_count { ! $ECHO $1 | $WC -w } # # To verify if the require numbers of disks is given # --- 2339,2349 ---- # # Get the word numbers from a string separated by white space # function get_word_count { ! echo $1 | wc -w } # # To verify if the require numbers of disks is given #
*** 2383,2393 **** # # Check if Trusted Extensions are installed and enabled # function is_te_enabled { ! $SVCS -H -o state labeld 2>/dev/null | $GREP "enabled" if (($? != 0)); then return 1 else return 0 fi --- 2383,2393 ---- # # Check if Trusted Extensions are installed and enabled # function is_te_enabled { ! svcs -H -o state labeld 2>/dev/null | grep "enabled" if (($? != 0)); then return 1 else return 0 fi
*** 2394,2418 **** } # Utility function to determine if a system has multiple cpus. function is_mp { ! (($($PSRINFO | $WC -l) > 1)) } function get_cpu_freq { ! $PSRINFO -v 0 | $AWK '/processor operates at/ {print $6}' } # Run the given command as the user provided. function user_run { typeset user=$1 shift ! eval \$SU \$user -c \"$@\" > /tmp/out 2>/tmp/err return $? } # # Check if the pool contains the specified vdevs --- 2394,2418 ---- } # Utility function to determine if a system has multiple cpus. function is_mp { ! (($(psrinfo | wc -l) > 1)) } function get_cpu_freq { ! psrinfo -v 0 | awk '/processor operates at/ {print $6}' } # Run the given command as the user provided. function user_run { typeset user=$1 shift ! eval su \$user -c \"$@\" > /tmp/out 2>/tmp/err return $? } # # Check if the pool contains the specified vdevs
*** 2433,2450 **** return 2 fi shift ! typeset tmpfile=$($MKTEMP) ! $ZPOOL list -Hv "$pool" >$tmpfile for vdev in $@; do ! $GREP -w ${vdev##*/} $tmpfile >/dev/null 2>&1 [[ $? -ne 0 ]] && return 1 done ! $RM -f $tmpfile return 0; } function get_max --- 2433,2450 ---- return 2 fi shift ! typeset tmpfile=$(mktemp) ! zpool list -Hv "$pool" >$tmpfile for vdev in $@; do ! grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1 [[ $? -ne 0 ]] && return 1 done ! rm -f $tmpfile return 0; } function get_max