1 #! /usr/bin/ksh -p
2 #
3 # CDDL HEADER START
4 #
5 # The contents of this file are subject to the terms of the
6 # Common Development and Distribution License (the "License").
7 # You may not use this file except in compliance with the License.
8 #
9 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 # or http://www.opensolaris.org/os/licensing.
11 # See the License for the specific language governing permissions
12 # and limitations under the License.
13 #
14 # When distributing Covered Code, include this CDDL HEADER in each
15 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 # If applicable, add the following below this CDDL HEADER, with the
17 # fields enclosed by brackets "[]" replaced with your own identifying
18 # information: Portions Copyright [yyyy] [name of copyright owner]
19 #
20 # CDDL HEADER END
21 #
22
23 #
24 # Copyright 2008 Sun Microsystems, Inc. All rights reserved.
25 # Use is subject to license terms.
26 #
27 # setup the $SERVER for testing NFS V4 protocols.
28 #
29
30 SETDEBUG
31 [[ -n $DEBUG ]] && [[ $DEBUG != 0 ]] && set -x
32
33 NAME=$(basename $0)
34
35 id | grep "0(root)" > /dev/null 2>&1
36 if (( $? != 0 )); then
37 echo "$NAME: ERROR - Must be root to run this script for setup."
38 exit 1
39 fi
40
41 Usage="ERROR - Usage: $NAME -s | -c | -r \n
42 -s: to setup this host w/v4 and share\n
43 -r: to cleanup the LOFI/ZFS filesystems for recovery tests\n
44 -c: to cleanup the server\n
45 "
46 if (( $# < 1 )); then
47 echo "$NAME: ERROR - incorrect usage."
48 echo $Usage
49 exit 2
50 fi
51
52 ENVFILE=ENV_from_client
53 TMPDIR=Tmpdir_from_client
54 CONFIGDIR=CONFIGDIR_from_client
55 ZONE_PATH=ZONE_PATH_from_client
56
57 QUOTA_FMRI="svc:/network/nfs/rquota:default"
58 SMF_TIMEOUT=60
59
60 # source the environment/config file from client to be consistent
61 . $CONFIGDIR/$ENVFILE
62 . $CONFIGDIR/libsmf.shlib
63
64 iscipso=0
65 if [[ -x /usr/sbin/tninfo ]]; then
66 /usr/sbin/tninfo -h $(uname -n) | grep cipso >/dev/null 2>&1
67 if (( $? == 0 )); then
68 iscipso=1
69 if [[ -z $ZONE_PATH ]]; then
70 echo "$NAME: ERROR - ZONE_PATH is null!"
71 exit 2
72 fi
73
74 zlist=$(/usr/sbin/zoneadm list)
75 if [[ -z $zlist ]]; then
76 echo "$NAME: ERROR - no zones exist on server!"
77 exit 2
78 fi
79
80 if [[ $zlist == global ]]; then
81 echo "$NAME: ERROR - No non-global zones on server!"
82 exit 2
83 fi
84
85 fnd=0
86 for azone in $zlist
87 do
88 [[ $azone == global ]] && continue
89 X=$(zoneadm -z $azone list -p | cut -d ":" -f 4)
90 [[ -z $X ]] && continue
91 X1=$(echo "$X" | sed -e 's/\// /g' | awk '{print $1}')
92 X2=$(echo "$X" | sed -e 's/\// /g' | awk '{print $2}')
93 Y1=$(echo "$ZONE_PATH" | sed -e 's/\// /g' | \
94 awk '{print $1}')
95 Y2=$(echo "$ZONE_PATH" | sed -e 's/\// /g' | \
96 awk '{print $2}')
97 if [[ $X1 == $Y1 && $X2 == $Y2 ]]; then
98 fnd=1
99 localzone=$azone
100 break
101 fi
102 done
103
104 if (( fnd == 0 )); then
105 echo "$NAME: ERROR - ZONE_PATH doesn't match any zone!"
106 exit 2
107 fi
108 fi
109 fi
110
111 function cleanup {
112 rm -f $TMPDIR/*.$$
113 exit $1
114 }
115
116 # quick function to create sub ZFS pool
117 function create_zpool
118 {
119 [[ -n "$DEBUG" ]] && [[ "$DEBUG" != "0" ]] && set -x
120 typeset Fname=create_zpool
121 getopts fv opt
122 case $opt in
123 f) # create pool on file
124 typeset pname=$2 fname=$3
125 zpool create -f $pname $fname > $TMPDIR/zpool.out.$$ 2>&1
126 if [[ $? != 0 ]]; then
127 echo "$Fname: failed to create zpool -"
128 cat $TMPDIR/zpool.out.$$
129 zpool status $pname
130 return 2
131 fi
132 ;;
133 v) # create pool on volume
134 typeset size=$2 # size is in the form of 5m/2g
135 typeset vname=$3
136 typeset pname=$4
137 echo "$NAME: Setting test filesystems with ZFS ..."
138 zpool status > $TMPDIR/zstatus.out.$$ 2>&1
139 grep "$vname" $TMPDIR/zstatus.out.$$ | \
140 grep ONLINE >/dev/null 2>&1
141 if [[ $? != 0 ]]; then
142 zfs create -V $size $vname > $TMPDIR/zpool.out.$$ 2>&1
143 if [[ $? != 0 ]]; then
144 echo "$NAME: failed to create volume -"
145 cat $TMPDIR/zpool.out.$$
146 grep "same dev" $TMPDIR/zpool.out.$$ \
147 > /dev/null 2>&1
148 [[ $? == 0 ]] && zpool status
149 return 2
150 fi
151 zpool create -f $pname /dev/zvol/dsk/$vname \
152 > $TMPDIR/zpool.out.$$ 2>&1
153 if [[ $? != 0 ]]; then
154 echo "$NAME: failed to create sub zpool -"
155 cat $TMPDIR/zpool.out.$$
156 grep "same dev" $TMPDIR/zpool.out.$$ \
157 > /dev/null 2>&1
158 [[ $? == 0 ]] && zpool status
159 return 2
160 fi
161 fi
162 ;;
163 *)
164 echo "$Fname: ERROR - incorrect usage."
165 return 2
166 ;;
167 esac
168
169 }
170
171 function destroy_zpool
172 {
173 [[ -n $DEBUG ]] && [[ $DEBUG != 0 ]] && set -x
174 # NSPCPOOL is always created; so need to destroy
175 if [[ -n $NSPCPOOL ]]; then
176 zpool destroy -f $NSPCPOOL >> $TMPDIR/zfsDes.out.$$ 2>&1
177 if (( $? != 0 )); then
178 echo "WARNING, failed to destroy [$NSPCPOOL];"
179 cat $TMPDIR/zfsDes.out.$$
180 echo "\t Please clean it up manually."
181 fi
182 fi
183
184 ZFSn=$(zfs list | grep "$BASEDIR" | nawk '{print $1}')
185 zfs destroy -f -r $ZFSn > $TMPDIR/zfsDes.out.$$ 2>&1
186 if (( $? != 0 )); then
187 echo "WARNING, unable to cleanup [$BASEDIR];"
188 cat $TMPDIR/zfsDes.out.$$
189 echo "\t Please clean it up manually."
190 fi
191 }
192
193 function create_test_fs
194 {
195 [[ -n $DEBUG ]] && [[ $DEBUG != 0 ]] && set -x
196
197 fsname=$1
198 shift
199 FSdir_opt=$*
200 typeset ret=0
201
202 if (( TestZFS == 1 )); then
203 if [[ $fsname == NSPCDIR ]]; then
204 typeset pool=NSPCpool
205 mkfile 64m $BASEDIR/NSPCpoolfile
206 create_zpool -f $pool $BASEDIR/NSPCpoolfile \
207 > $TMPDIR/nspc.out.$$ 2>&1
208 if [[ $? != 0 ]]; then
209 echo "ERROR, unable to setup NSPC pool;"
210 cat $TMPDIR/nspc.out.$$
211 cleanup 3
212 fi
213 echo "NSPCPOOL=$pool; export NSPCPOOL" \
214 >> $CONFIGDIR/$ENVFILE
215 zfs set mountpoint=$NSPCDIR $pool \
216 > $TMPDIR/$fsname.out.$$ 2>&1
217 ret=$?
218 chmod 0777 $NSPCDIR
219 else
220 create_zfs_fs $FSdir_opt > $TMPDIR/$fsname.out.$$ 2>&1
221 ret=$?
222 fi
223 else
224 $CONFIGDIR/setupFS -s $FSdir_opt > $TMPDIR/$fsname.out.$$ 2>&1
225 ret=$?
226 fi
227 if (( $ret != 0 )); then
228 echo "WARNING: unable to setup $fsname - "
229 cat $TMPDIR/$fsname.out.$$
230 cleanup $ret
231 fi
232 }
233
234 function create_some_files # quick function to create some files
235 {
236 [[ -n $DEBUG ]] && [[ $DEBUG != 0 ]] && set -x
237 UDIR=$1
238
239 head -88 $CONFIGDIR/setserver > $UDIR/$RWFILE
240 chmod 0666 $UDIR/$RWFILE
241 tail -38 $CONFIGDIR/setupFS > $UDIR/$ROFILE
242 chmod 0444 $UDIR/$ROFILE
243 mkdir -p $UDIR/$DIR0755/dir2/dir3
244 chmod -R 0755 $UDIR/$DIR0755
245 if (( TestZFS == 1 )); then
246 ACLs=write_xattr/write_attributes/write_acl/add_file:allow
247 chmod A+everyone@:${ACLs} $UDIR/$DIR0755 $UDIR/$RWFILE
248 fi
249 echo "this is the ext-attr file for $UDIR/$DIR0755" | \
250 runat $UDIR/$DIR0755 "cat > $ATTRDIR_AT1; chmod 0777 ."
251 runat $UDIR/$DIR0755 \
252 "cp $ATTRDIR_AT1 $ATTRDIR_AT2; chmod 0 $ATTRDIR_AT2"
253 }
254
255 function create_zfs_fs # quick function to create ZFS filesystem
256 {
257 [[ -n $DEBUG ]] && [[ $DEBUG != 0 ]] && set -x
258 FSname=$1
259 (( $# == 2 )) && FSsize=$2 # size is in the form of 5m/2g
260 (( $# == 3 )) && FSmopt=$3 # remount option
261
262 typeset -u ZName=$(basename $FSname)
263 zfs create $ZFSPOOL/$ZName > $TMPDIR/czfs.out.$$ 2>&1
264 if (( $? != 0 )); then
265 echo "create_zfs_fs failed to zfs create $ZFSPOOL/$ZName"
266 cat $TMPDIR/czfs.out.$$
267 return 2
268 fi
269 zfs set mountpoint=$FSname $ZFSPOOL/$ZName > $TMPDIR/szfs.out.$$ 2>&1
270 if (( $? != 0 )); then
271 echo "create_zfs_fs failed to zfs set mountpoint=$FSname \c"
272 echo "to $ZFSPOOL/$Zname"
273 cat $TMPDIR/szfs.out.$$
274 return 2
275 fi
276 chmod 777 $FSname
277 ACLs=write_xattr/write_attributes/write_acl/add_file:allow
278 chmod A+everyone@:${ACLs} $FSname
279
280 if [[ -n $FSsize ]]; then
281 zfs set quota=$FSsize $ZFSPOOL/$ZName > $TMPDIR/qzfs.out.$$ 2>&1
282 if (( $? != 0 )); then
283 echo "create_zfs_fs failed to zfs set quota=$FSsize"
284 cat $TMPDIR/qzfs.out.$$
285 return 2
286 fi
287 unset FSsize
288 fi
289 if [[ -n $FSmopt ]]; then
290 zfs umount $ZFSPOOL/$ZName > $TMPDIR/mzfs.out.$$ 2>&1
291 zfs mount -o $FSmopt $ZFSPOOL/$ZName >> $TMPDIR/mzfs.out.$$ 2>&1
292 if (( $? != 0 )); then
293 echo "create_zfs_fs failed to zfs remount $FSmopt"
294 cat $TMPDIR/mzfs.out.$$
295 return 2
296 fi
297 unset FSmopt
298 fi
299 }
300
301
302 getopts scr opt
303 case $opt in
304 s)
305 # Check if correct arch is in path (in case default got wrong value)
306 arch=$(uname -p)
307 if [[ $arch == sparc ]]; then
308 arch2="i386"
309 else
310 arch2="sparc"
311 fi
312 # Make sure the wrong arch is not in string
313 res=$(echo $CC_SRV | grep $arch2)
314 if (( $? == 0 )); then
315 OLD_CC=$CC_SRV;
316 # try to fix by replacing with correct arch
317 CC_SRV=$(echo $CC_SRV | sed "s/$arch2/$arch/g")
318 sed "s@$OLD_CC@$CC_SRV@" $TMPDIR/$ENVFILE \
319 > $TMPDIR/env.fil
320 rm -f $TMPDIR/$ENVFILE
321 mv $TMPDIR/env.fil $TMPDIR/$ENVFILE
322 fi
323 # Check if the specified compiler is available
324 $CC_SRV -flags > $TMPDIR/cc-flags.out.$$ 2>&1
325 if (( $? != 0 )); then
326 echo "WARNING: the compiler <$CC_SRV> failed to run"
327 echo "\tsome tests may fail"
328 echo "\t<cc -flags> output was:"
329 cat $TMPDIR/cc-flags.out.$$
330 fi
331
332 cp -p /etc/passwd /etc/passwd.orig
333 cp -p /etc/group /etc/group.orig
334 # remove users left from setups not cleaned
335 /usr/xpg4/bin/egrep -v "2345678." /etc/passwd.orig > /etc/passwd 2>&1
336 /usr/xpg4/bin/egrep -v "2345678." /etc/group.orig > /etc/group 2>&1
337
338 # add test users ... should be same as in $TESTHOST
339 echo "$TUSER1:x:23456787:10:NFSv4 Test User 1:$TMPDIR:/bin/sh" \
340 >>/etc/passwd
341 echo "$TUSER2:x:23456788:10:NFSv4 Test User 2:$TMPDIR:/bin/sh" \
342 >>/etc/passwd
343 echo "$TUSER3:x:23456789:1:NFSv4 Test User 3:$TMPDIR:/bin/sh" \
344 >>/etc/passwd
345 #except this user
346 echo "$TUSERS:x:$TUSERSID:10:NFSv4 Test User Server:$TMPDIR:/bin/sh" \
347 >>/etc/passwd
348 echo "$TUSERS2:x:$TUSERID:10:NFSv4 Test User Server 2:$TMPDIR:/bin/sh" \
349 >>/etc/passwd
350 echo \
351 "$TUSERS3:x:$TUSERSID3:10:NFSv4 Test User Server 3:$TMPDIR:/bin/sh" \
352 >>/etc/passwd
353 echo "$UTF8_USR:x:$TUSERUTF8:$TUSERUTF8:uts8 USER 1:$TMPDIR:/sbin/sh"\
354 >>/etc/passwd
355 echo "$UTF8_USR::$TUSERUTF8:" >> /etc/group
356
357 pwconv # make sure shadow file match
358 N=1
359 n=$(/usr/xpg4/bin/egrep "2345678." /etc/group | wc -l | \
360 nawk '{print $1}')
361 if (( n != N )); then
362 echo "$NAME: ERROR - adding test groups failed, \
363 groups file shows n=$n not $N"
364 cleanup 2
365 fi
366 n=$(/usr/xpg4/bin/egrep \
367 "^$TUSER1|^$TUSER2|^$TUSER3|^$TUSERS|^$TUSERS2|^$TUSERS3" \
368 /etc/shadow | wc -l | nawk '{print $1}')
369 N=6
370 if (( n != N )); then
371 echo "$NAME: ERROR - adding normal test users failed, \
372 shadow file shows n=$n not $N"
373 cleanup 2
374 fi
375 res=$(locale | awk -F= '{print $2}' | grep -v "^$" | grep -v "C")
376 if (( $? == 0 )); then
377 echo "WARNING: locale not set to C. Some utf8 tests may fail."
378 [[ $DEBUG != 0 ]] && echo "locale = $(locale)\n"
379 else
380 # this test is broken with some locales, so only execute
381 n=$(/usr/xpg4/bin/egrep "^$(echo $UTF8_USR)" /etc/shadow | \
382 wc -l | nawk '{print $1}')
383 N=1
384 if (( n != N )); then
385 echo "$NAME: ERROR - adding UTF8 test users failed, \
386 shadow file shows n=$n not $N"
387 [[ $DEBUG != 0 ]] && echo "locale = $(locale)\n"
388 cleanup 2
389 fi
390 fi
391
392 # check if the nfs tunable values meet the requirement, if not,
393 # set the new values and save the old values to .nfs.flg file
394 if [[ ! -f $CONFIGDIR/$SERVER.nfs.flg ]]; then
395 res=$($CONFIGDIR/set_nfstunable SERVER_VERSMIN=2 SERVER_VERSMAX=4)
396 if (( $? != 0 )); then
397 echo "ERROR: cannot set the specific nfs tunable on $SERVER"
398 cleanup 1
399 else
400 [[ -n $res ]] && echo $res > $CONFIGDIR/$SERVER.nfs.flg
401 fi
402 fi
403
404 # backup BASEDIR if it exists
405 rm -fr $BASEDIR.Saved > /dev/null 2>&1
406 [[ -d $BASEDIR ]] && mkdir -m 0777 $BASEDIR.Saved && \
407 mv $BASEDIR/* $BASEDIR.Saved > /dev/null 2>&1
408
409 # Create pre-defined test files/directories in $BASEDIR
410 if (( TestZFS == 1 )); then
411 # check first and create the pool only when it's not yet available
412 if [[ -z $ZFSDISK ]]; then
413 echo "$NAME: setup failed at $SERVER -"
414 echo "\tmust define a valid ZFSDISK=<$ZFSDISK> \c"
415 cleanup 2
416 fi
417
418 zpool status > $TMPDIR/zstatus.out.$$ 2>&1
419 grep "$ZFSDISK" $TMPDIR/zstatus.out.$$ |grep ONLINE >/dev/null 2>&1
420 if (( $? != 0 )); then
421 echo "$NAME: zpool<$ZFSDISK> is not online -"
422 cat $TMPDIR/zstatus.out.$$
423 cleanup 2
424 fi
425 ZFSPOOL=$ZFSDISK; export ZFSPOOL
426
427 echo "$NAME: Setting test filesystems with ZFS ..."
428 create_zfs_fs $BASEDIR > $TMPDIR/zfs.out.$$ 2>&1
429 if (( $? != 0 )); then
430 echo "$NAME: failed to create_zfs_fs $BASEDIR -"
431 cat $TMPDIR/zfs.out.$$
432 cleanup 2
433 fi
434 # set aclinherit as "passthrough", which causes sub-dirs
435 # and sub-files to inherit all inheritable ACL entries
436 # without any modifications;
437 # used for acl test
438 typeset -u ZName=$(basename $BASEDIR)
439 zfs set aclinherit=passthrough $ZFSPOOL/$ZName \
440 > $TMPDIR/setprop.out.$$ 2>&1
441 if (( $? != 0 )); then
442 echo "$NAME: WARNING - Failed to set zfs property aclinherit \c"
443 echo "to <passthrough> for $BASEDIR in $SERVER. \c"
444 echo "Some acl tests may fail."
445 cat $TMPDIR/setprop.out.$$
446 fi
447 # verify the property is set to passthrough
448 aclprop=$(zfs get -H -o value aclinherit $ZFSPOOL/$ZName 2>&1)
449 if [[ $? != 0 || $aclprop != passthrough ]]; then
450 echo "$NAME: WARNING - Failed to get zfs property aclinherit. \c"
451 echo "Expected value is <passthrough>, while returned <$aclprop>"
452 echo "Some acl tests may fail."
453 fi
454 fi
455
456 # create test files/directories in the BASEDIR
457 $CONFIGDIR/mk_srvdir $BASEDIR > $TMPDIR/mkbd.out.$$ 2>&1
458 if (( $? != 0 )); then
459 echo "$NAME: ERROR - failed to create test files/dirs in $BASEDIR"
460 cat $TMPDIR/mkbd.out.$$
461 cleanup 99
462 fi
463
464 # share $BASEDIR with "-p" option for NFS testing;
465 # such that when it's enabled by smf it's persistent, even with reboot
466 share -F nfs -p -o rw $BASEDIR
467 share | grep "$BASEDIR" > /dev/null 2>&1
468 if (( $? != 0 )); then
469 echo "$NAME: ERROR - failed to share <$BASEDIR>, aborting ..."
470 share
471 cleanup 99
472 fi
473
474 if (( TestZFS != 1 )); then
475 # Create other FSs (with LOFI) for testing of different areas.
476 SRVTESTDIR=$BASEDIR/LOFI_FILES; export SRVTESTDIR
477 mkdir -m 0777 -p $SRVTESTDIR
478 fi
479
480 # ROFS test dir
481 create_test_fs ROFSDIR $ROFSDIR 5m
482 create_some_files $ROFSDIR
483 $CONFIGDIR/operate_dir "share" $ROFSDIR "ro"
484
485 # Create an FS to be exported with root access
486 create_test_fs ROOTDIR $ROOTDIR
487 create_some_files $ROOTDIR
488 $CONFIGDIR/operate_dir "share" $ROOTDIR "anon=0"
489
490 # PUBLIC test dir
491 create_test_fs PUBTDIR $PUBTDIR
492 $CONFIGDIR/mk_srvdir $PUBTDIR > $TMPDIR/cfpubt.out.$$ 2>&1
493 if (( $? != 0 )); then
494 echo "WARNING, unable to create files/dirs in [$PUBTDIR];"
495 cat $TMPDIR/cfpubt.out.$$
496 echo "\t testing in the area may fail."
497 fi
498 $CONFIGDIR/operate_dir "share" $PUBTDIR "rw,public"
499
500 # NSPCDIR test dir
501 create_test_fs NSPCDIR $NSPCDIR
502 # Create few test files/dirs first
503 create_some_files $NSPCDIR
504 # Also fill up the FS here
505 $CONFIGDIR/fillDisk $NSPCDIR
506 $CONFIGDIR/operate_dir "share" $NSPCDIR
507
508 # KRB5 test dir
509 create_test_fs KRB5DIR $KRB5DIR
510 $CONFIGDIR/mk_srvdir $KRB5DIR > $TMPDIR/cfkrb5.out.$$ 2>&1
511 if (( $? != 0 )); then
512 echo "WARNING, unable to create files/dirs in [$KRB5DIR];"
513 cat $TMPDIR/cfkrb5.out.$$
514 echo "\t testing in the area may fail."
515 fi
516 # XXX test system needs to be able to kinit in order to share w/krb5
517 #$CONFIGDIR/operate_dir "share" $KRB5DIR "sec=krb5:krb5i:krb5p"
518
519 # SSPC test dir
520 create_test_fs SSPCDIR $SSPCDIR
521 $CONFIGDIR/mk_srvdir $SSPCDIR > $TMPDIR/cfsspc.out.$$ 2>&1
522 if (( $? != 0 )); then
523 echo "WARNING, unable to create files/dirs in [$SSPCDIR];"
524 cat $TMPDIR/cfsspc.out.$$
525 echo "\t testing in the area may fail."
526 fi
527 $CONFIGDIR/operate_dir "share" $SSPCDIR
528
529 # QUOTA test dir
530 create_test_fs QUOTADIR $QUOTADIR 5m
531 $CONFIGDIR/mk_srvdir $QUOTADIR > $TMPDIR/cfpubt.out.$$ 2>&1
532 if (( $? != 0 )); then
533 echo "WARNING, unable to create files/dirs in [$QUOTADIR];"
534 cat $TMPDIR/cfpubt.out.$$
535 echo "\t testing in the area may fail."
536 fi
537 touch $QUOTADIR/quotas
538 if (( TestZFS != 1 )); then
539 # also set quota for $TUSER2 and fill the quotas:
540 quotaoff $QUOTADIR
541 edquota $TUSER2 << __END > /dev/null 2>&1
542 :s/hard = 0/hard = 5/g
543 :wq
544 __END
545 quotaon $QUOTADIR
546 smf_fmri_transition_state do $QUOTA_FMRI online $SMF_TIMEOUT
547 if [[ $? != 0 ]]; then
548 echo "$NAME: ERROR - unable to start $QUOTA_FMRI"
549 echo "\t testing in the area may fail."
550 fi
551 fi
552 if (( iscipso == 1 )); then
553 ZONEDIR=${QUOTADIR#$ZONE_PATH/root}
554 zlogin $localzone "su $TUSER2 -c \
555 \"cd $ZONEDIR; \
556 touch file_$TUSER2.1 file_$TUSER2.2 file_$TUSER2.3; \
557 mkfile 4k file_$TUSER2.4\""
558 else
559 su $TUSER2 -c \
560 "cd $QUOTADIR; \
561 touch file_$TUSER2.1 file_$TUSER2.2 file_$TUSER2.3; \
562 mkfile 4k file_$TUSER2.4"
563 fi
564 if (( TestZFS == 1 )); then
565 $CONFIGDIR/fillDisk $QUOTADIR
566 fi
567 $CONFIGDIR/operate_dir "share" $QUOTADIR
568
569 # SSPCDIR2 test dir
570 create_test_fs SSPCDIR2 $SSPCDIR2 3m
571 $CONFIGDIR/operate_dir "share" $SSPCDIR2
572
573 # SSPCDIR3 test dir with noxattr
574 create_test_fs SSPCDIR3 $SSPCDIR3 6m noxattr
575 $CONFIGDIR/operate_dir "share" $SSPCDIR3
576
577 # NOTSHDIR - test requirement not to share this UFS
578 create_test_fs NOTSHDIR $NOTSHDIR
579 create_some_files $NOTSHDIR
580
581 # and some symlinks for mounting symlink testing
582 ln -s $BASEDIR/$LONGDIR $BASEDIR/symldir2
583 ln -s $BASEDIR/nosuchdir $BASEDIR/syml_nodir
584 ln -s $NOTSHDIR/$RWFILE $BASEDIR/syml_nofile
585 ln -s $SSPCDIR2 $BASEDIR/syml_sh_fs
586 if (( iscipso == 1 )); then
587 ln -s $ZONE_PATH/root/usr/lib $BASEDIR/syml_outns
588 else
589 ln -s /usr/lib $BASEDIR/syml_outns
590 fi
591 ln -s $NOTSHDIR $BASEDIR/syml_nosh_fs
592 ln -s $NOTSHDIR $NOTSHDIR/syml_shnfs
593
594 cd $BASEDIR
595 ln -s ./$DIR0755 syml_dotd
596 ln -s ./$DIR0755/../$RWFILE syml_dotf
597 Last=$(basename $SSPCDIR3)
598 ln -s $SSPCDIR3/../$Last syml_dotdot
599
600 # check for correct register on protocols and version 4
601 # give some time to nfsd to register protocols
602 sleep 1
603 rpcinfo -p | grep nfs | awk '{print $2}' | grep 4 \
604 > $TMPDIR/rpcinfoT.out.$$ 2>&1
605 if (( $? != 0 )); then
606 echo "$NAME: ERROR - nfs did not register on $SERVER with v4"
607 cat $TMPDIR/rpcinfoT.out.$$
608 cleanup 1
609 fi
610
611 # also return the server's grace period
612 grace=$($CONFIGDIR/get_tunable rfs4_grace_period K 2> $TMPDIR/dmerr.out.$$)
613 if (( $? != 0 )); then
614 echo "$NAME: ERROR - cannot get grace_period in $SERVER"
615 echo "Output was:\n$grace\n"
616 echo "Stderr was:"
617 cat $TMPDIR/dmerr.out.$$
618 cleanup 2
619 else
620 echo "SERVER_GRACE_PERIOD=$grace"
621 [[ $DEBUG != 0 ]] && cat $TMPDIR/dmerr.out.$$
622 fi
623 # and server's NFS mapid domain
624 Sdomain=$(cat /var/run/nfs4_domain 2> $TMPDIR/dmerr.out.$$)
625 if (( $? != 0 )); then
626 echo "$NAME: ERROR - failed to get NFS mapid domain in $SERVER"
627 echo "Output was:\nSdomain=<$Sdomain>\n"
628 echo "Stderr was:"
629 cat $TMPDIR/dmerr.out.$$
630 cleanup 2
631 else
632 echo "SERVER_NFSmapid_Domain=$Sdomain"
633 fi
634
635 echo "Done - setup daemons and shared $BASEDIR OKAY."
636 rm -f $CONFIGDIR/._DONE_cleanup_LOFI_for_recovery
637 ;;
638
639 r)
640 SHARE_LIST="$SSPCDIR3 $SSPCDIR2 $SSPCDIR $PUBTDIR $QUOTADIR"
641 SHARE_LIST="$SHARE_LIST $NSPCDIR $ROFSDIR $ROOTDIR $KRB5DIR"
642 for fs in $SHARE_LIST $NOTSHDIR; do
643 # unshare FS if is shared, before clean it up
644 share | awk '{print $2}' | grep -w "$fs" > /dev/null 2>&1
645 if (( $? == 0 )); then
646 $CONFIGDIR/operate_dir "unshare" $fs > \
647 $TMPDIR/unshare.out.$$ 2>&1
648 if (( $? != 0 )); then
649 echo "$NAME: WARNING - failed to unshare [$fs]"
650 cat $TMPDIR/unshare.out.$$
651 echo "\trecovery tests may have problems after \c"
652 echo "rebooting the server"
653 fi
654 fi
655 if (( TestZFS == 1 )); then
656 Zfs=$(df -h $fs | grep -v 'Mounted on' | nawk '{print $1}')
657 zfs destroy -f -r $Zfs > $TMPDIR/zfsDes.out.$$ 2>&1
658 if (( $? != 0 )); then
659 echo "WARNING, unable to cleanup [$fs];"
660 cat $TMPDIR/zfsDes.out.$$
661 echo "\trecovery tests may have problems \c"
662 echo "after rebooting the server"
663 else
664 rm -fr $fs > /dev/null 2>&1
665 fi
666 else
667 $CONFIGDIR/setupFS -c $fs > $TMPDIR/cleanFS.out.$$ 2>&1
668 if (( $? != 0 )); then
669 echo "WARNING, unable to cleanup [$fs];"
670 cat $TMPDIR/cleanFS.out.$$
671 echo "\trecovery tests may have problems \c"
672 echo "after rebooting the server"
673 fi
674 fi
675 done
676 echo "Done - cleanup LOFI/ZFS FS's OKAY."
677 touch $CONFIGDIR/._DONE_cleanup_LOFI_for_recovery
678 ;;
679
680 c)
681 # restore nfs tunable values
682 if [[ -f $CONFIGDIR/$SERVER.nfs.flg ]]; then
683 res=$(cat $CONFIGDIR/$SERVER.nfs.flg)
684 [[ -n $res ]] && $CONFIGDIR/set_nfstunable $res \
685 > $TMPDIR/nfs.out.$$ 2>&1
686 if (( $? != 0 )); then
687 echo "WARNING: restoring nfs tunable failed on $SERVER:"
688 cat $TMPDIR/nfs.out.$$
689 echo "Please restore the following nfs tunable manually: $res"
690 fi
691 rm -f $CONFIGDIR/$SERVER.nfs.flg > /dev/null 2>&1
692 fi
693
694 res=$(mv /etc/passwd.orig /etc/passwd 2>&1)
695 res=$(mv /etc/group.orig /etc/group 2>&1)
696 res=$(chmod 444 /etc/passwd /etc/group 2>&1)
697 res=$(pwconv 2>&1)
698 res=$(/usr/xpg4/bin/egrep "2345678." /etc/passwd > $TMPDIR/pwerr.out.$$)
699 n=$(cat $TMPDIR/pwerr.out.$$ | wc -l | nawk '{print $1}')
700 if (( n != 0 )); then
701 echo "WARNING: removing test users failed, \
702 remove the following users manually:"
703 cat $TMPDIR/pwerr.out.$$
704 echo "\n"
705 fi
706 res=$(/usr/xpg4/bin/egrep "2345678." /etc/group > $TMPDIR/grperr.out.$$)
707 n=$(cat $TMPDIR/grperr.out.$$ | wc -l | nawk '{print $1}')
708 if (( n != 0 )); then
709 echo "WARNING: removing test groups failed, \
710 remove the following groups manually:"
711 cat $TMPDIR/grperr.out.$$
712 echo "\n"
713 fi
714
715 if [[ ! -f $CONFIGDIR/._DONE_cleanup_LOFI_for_recovery ]]; then
716 SHARE_LIST="$SSPCDIR3 $SSPCDIR2 $SSPCDIR $PUBTDIR $QUOTADIR"
717 SHARE_LIST="$SHARE_LIST $NSPCDIR $ROFSDIR $ROOTDIR $KRB5DIR"
718 for fs in $SHARE_LIST; do
719 # need to check if KRB5DIR is shared
720 [[ $fs == $KRB5DIR ]] && break
721 $CONFIGDIR/operate_dir "unshare" $fs \
722 > $TMPDIR/unshare.out.$$ 2>&1
723 if (( $? != 0 )); then
724 echo "$NAME: ERROR - failed to unshare [$fs]"
725 cat $TMPDIR/unshare.out.$$
726 fi
727 done
728 if (( TestZFS == 1 )); then
729 for dir in $SHARE_LIST $NOTSHDIR; do
730 ZFSn=$(zfs list | grep "$dir" | \
731 nawk '{print $1}')
732 if [[ -n $ZFSn ]]; then
733 zfs destroy -f -r $ZFSn \
734 > $TMPDIR/zfsDes.out.$$ 2>&1
735 if (( $? != 0 )); then
736 echo "WARNING, unable to cleanup [$dir]"
737 cat $TMPDIR/zfsDes.out.$$
738 echo "\t Please clean it up manually."
739 else
740 rm -fr $dir
741 fi
742 fi
743 done
744 else
745 for dir in $SHARE_LIST $NOTSHDIR; do
746 $CONFIGDIR/setupFS -c $dir \
747 > $TMPDIR/cleanFS.out.$$ 2>&1
748 if (( $? != 0 )); then
749 echo "WARNING, unable to cleanup [$dir]"
750 cat $TMPDIR/cleanFS.out.$$
751 echo "\t Please clean it up manually."
752 fi
753 done
754 fi
755 fi
756
757 unshare -p $BASEDIR > $TMPDIR/unshareb.out.$$ 2>&1
758 if [[ $? != 0 ]]; then
759 echo "$NAME: ERROR - failed to unshare $BASEDIR"
760 cat $TMPDIR/unshareb.out.$$
761 fi
762 smf_fmri_transition_state do $QUOTA_FMRI disabled $SMF_TIMEOUT
763 if [[ $? != 0 ]]; then
764 echo "$NAME: ERROR - unable to disable $QUOTA_FMRI"
765 echo "\t testing in the area may fail."
766 fi
767
768 if (( TestZFS == 1 )); then
769 destroy_zpool
770 fi
771 rm -rf $BASEDIR
772
773 echo "Done - cleanup test filesystems/daemons OKAY"
774 rm -rf $CONFIGDIR/* $TMPDIR
775 exit 0
776 ;;
777
778 \?)
779 echo $Usage
780 exit 2
781 ;;
782 esac
783
784 cleanup 0