1 #! /usr/bin/ksh -p
2 #
3 # CDDL HEADER START
4 #
5 # The contents of this file are subject to the terms of the
6 # Common Development and Distribution License (the "License").
7 # You may not use this file except in compliance with the License.
8 #
9 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 # or http://www.opensolaris.org/os/licensing.
11 # See the License for the specific language governing permissions
12 # and limitations under the License.
13 #
14 # When distributing Covered Code, include this CDDL HEADER in each
15 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 # If applicable, add the following below this CDDL HEADER, with the
17 # fields enclosed by brackets "[]" replaced with your own identifying
18 # information: Portions Copyright [yyyy] [name of copyright owner]
19 #
20 # CDDL HEADER END
21 #
22
23 #
24 # Copyright 2009 Sun Microsystems, Inc. All rights reserved.
25 # Use is subject to license terms.
26 #
27
28 NAME=$(basename $0)
29
30 . $STF_SUITE/include/sharemnt.kshlib
31 . $STC_GENUTILS/include/nfs-tx.kshlib
32
33 export STC_GENUTILS_DEBUG=$STC_GENUTILS_DEBUG:$SHAREMNT_DEBUG
34 [[ :$SHAREMNT_DEBUG: == *:$NAME:* \
35 || :$SHAREMNT_DEBUG: == *:all:* ]] && set -x
36
37 ################################################################################
38 #
39 # __stc_assertion_start
40 #
41 # ID: runtests
42 #
43 # DESCRIPTION:
44 # Print the time of sharing $NUM_SHARES entries on server. For ufs, we write
45 # these entries to /etc/dfs/dfstab and perform "shareall" command; For zfs, we
46 # just share these entries through setting zfs property "sharenfs" to "on".
47 #
48 # STRATEGY:
49 # 1. share $NUM_SHARES entries on the server and print the time
50 # 2. On the client, do mount/umount on each exported dirs
51 # 3. unshare all exported dirs on the server.
52 #
53 # TESTABILITY: explicit
54 #
55 # TEST_AUTOMATION_LEVEL: automated
56 #
57 # __stc_assertion_end
58 #
59 ################################################################################
60
61 function stress_cleanup {
62 [[ :$SHAREMNT_DEBUG: = *:$NAME:* || :$SHAREMNT_DEBUG: = *:all:* ]] \
63 && set -x
64
65 if [[ $tag == 001 || $tag == 002 ]]; then
66 # do umount on client
67 umountall -h $SERVER > $STF_TMPDIR/umountall.out.$$ 2>&1
68 mount | grep $STRESSMNT/mntdir_ >> $STF_TMPDIR/umountall.out.$$
69 if (( $? == 0 )); then
70 echo "$NAME: umountall failed, \c"
71 echo "please do cleanup manually"
72 cat $STF_TMPDIR/umountall.out.$$
73 fi
74
75 # do unshare on server
76 typeset SRVDEBUG=$STC_GENUTILS_DEBUG:$SHAREMNT_DEBUG
77 [[ :$SRVDEBUG: == *:RSH:* ]] && SRVDEBUG=all
78 typeset CMD="export SHAREMNT_DEBUG=$SRVDEBUG; "
79 CMD=$CMD"ksh $SRV_TMPDIR/sharemnt.stress -t cleanup_$tag"
80 RSH root $SERVER "$CMD" > $STF_TMPDIR/rsh.out.$$ 2>&1
81 if (( $? != 0 )); then
82 echo "\n$Tname: running <$CMD> failed, please do \c"
83 echo "cleanup manually."
84 cat $STF_TMPDIR/rsh.out.$$
85 fi
86 [[ :$SRVDEBUG: == *:all:* ]] && cat $STF_TMPDIR/rsh.out.$$
87 sleep 10
88 fi
89
90 cleanup $1
91 }
92
93 USAGE="Usage: runtests Test_name tag"
94
95 if (( $# < 2 )); then
96 echo "$USAGE"
97 exit $STF_UNRESOLVED
98 fi
99
100 typeset Tname=$1
101 typeset tag=$2
102 ZONENAME=$(zonename)
103
104 # NOTICE: When the variable "tag" is 002, we run this script to do
105 # stress_002 test, which is only for zfs testing. So if we want to change
106 # the script in future development, we SHOULD make sure it can run
107 # in both zfs and ufs tests.
108 if [[ -z $ZFSPOOL && $tag == 002 ]]; then
109 echo "\n$Tname: UNTESTED, This is a zfs test case, \c"
110 echo "but currently test runs over non-zfs"
111 exit $STF_UNTESTED
112 fi
113
114 # NOTICE: When the variable "tag" is 003, we run this script to do
115 # stress_003 test, which is only for ufs testing at present. With
116 # zfs, it incurs
117 # 1. Slow execution. It takes about 1 hour to run with 50 entries
118 # in a Sun-Fire-280R server which has 8G mem and 2x750M sparcv9
119 # processors, and 2000 zfs are created.
120 # 2. System error in sharemgr. With 2000 zfs exists, stress_003
121 # plays with 100 entries in one round, sharemgr 'set -p' and
122 # 'remove-share' sometimes complain 'System error'.
123 # So it will be skipped in zfs. After these issues are resolved,
124 # we must enable the test again in the future. This test is
125 # necessary both for zfs and ufs.
126 if [[ -n $ZFSPOOL && $tag == 003 ]]; then
127 echo "\n$Tname: UNTESTED, This is a ufs test case, \c"
128 echo "but currently test runs over non-ufs"
129 exit $STF_UNTESTED
130 fi
131
132 # NOTICE: When the variable "tag" is 004, we run this script to do
133 # stress_004 test, which is only testable if sharemgr is available
134 # on the server.
135 if [[ $tag == 004 ]]; then
136 ck_sharemgr=$(RSH root $SERVER "ls -l /usr/sbin/sharemgr 2>&1")
137 if (( $? != 0 )); then
138 echo "\n$Tname: RSH failed, $ck_sharemgr"
139 exit $STF_UNRESOLVED
140 elif [[ $ck_sharemgr == *"No such file or directory"* ]]; then
141 echo "\n$Tname: UNTESTED, This is testable only \c"
142 echo "if sharemgr is available on the server!\n"
143 exit $STF_UNTESTED
144 fi
145 fi
146
147 client_num=$(get_clients_num)
148 if (( $? != 0 )); then
149 echo "\n$Tname: RSH failed, $client_num"
150 exit $STF_UNRESOLVED
151 elif (( $client_num != 1 )); then
152 echo "\n$Tname: multiple srv_shmnt files were found on the server."
153 echo "\tthe stress tests don't support multiple clients\n"
154 exit $STF_UNTESTED
155 fi
156
157 mount | grep $STRESSMNT/mntdir_ > $STF_TMPDIR/mount.out.$$ 2>&1
158 if (( $? == 0 )); then
159 echo "\n$Tname: Some test dirs were mounted on client \c"
160 echo "please umount them before running the test"
161 cat $STF_TMPDIR/mount.out.$$
162 cleanup $STF_UNINITIATED
163 fi
164
165 SRVDEBUG=$STC_GENUTILS_DEBUG:$SHAREMNT_DEBUG
166 [[ :$SRVDEBUG: == *:RSH:* ]] && SRVDEBUG=all
167 RSH root $SERVER \
168 "if [[ -f $SRV_TMPDIR/sharemnt.stress ]]; then \
169 export SHAREMNT_DEBUG=$SRVDEBUG; \
170 $SRV_TMPDIR/sharemnt.stress -t stress_$tag; else \
171 echo UNTESTED; fi" \
172 > $STF_TMPDIR/rsh.out.$$ 2>&1 &
173 pid=$!
174 condition="cat $STF_TMPDIR/rsh.out.$$ | grep -v ^+ \
175 | egrep \"ERROR|PASS|UNTESTED\" > /dev/null"
176 wait_now $STRESS_TIMEOUT "$condition"
177 if (( $? == 0 )); then
178 cat $STF_TMPDIR/rsh.out.$$ | grep -v ^+ | grep PASS > /dev/null
179 if (( $? == 0 )); then
180 cat $STF_TMPDIR/rsh.out.$$
181 [[ $tag == 003 || $tag == 004 ]] && stress_cleanup $STF_PASS
182 else
183 cat $STF_TMPDIR/rsh.out.$$ | grep -v ^+ | \
184 grep UNTESTED > /dev/null
185 if (( $? == 0 )); then
186 echo "\n$Tname: no $STF_TMPDIR/sharemnt.stress in \c"
187 echo "$SERVER. Maybe multiple clients are running."
188 cleanup $STF_UNTESTED
189 else
190 echo "\n$Tname: run $STF_TMPDIR/sharemnt.stress in \c"
191 echo "$SERVER failed:"
192 cat $STF_TMPDIR/rsh.out.$$
193 stress_cleanup $STF_FAIL
194 fi
195 fi
196 elif [[ $tag == 003 || $tag == 004 ]]; then
197 echo "$Tname: run test on server timeout<$STRESS_TIMEOUT secs>"
198 cat $STF_TMPDIR/rsh.out.$$
199 stress_cleanup $STF_FAIL
200 else
201 echo "$Tname: run test on server timeout<$STRESS_TIMEOUT secs>"
202 cat $STF_TMPDIR/rsh.out.$$
203 # We still have a chance to check exported entries.
204 share_num=$(RSH root $SERVER "cat /etc/dfs/sharetab \
205 | grep $STRESSDIR/sharemnt_ | wc -l" | nawk '{print $1}' \
206 2>/dev/null)
207 expected=$((NUM_SHARES+1))
208 if [[ $expected != $share_num ]]; then
209 echo "\texepected $expected directories were exported, \c"
210 echo "but got $share_num"
211 kill -KILL $pid
212 stress_cleanup $STF_FAIL
213 fi
214 fi
215
216 # define mount options with an array
217 set -A OPTS sec=sys ro hard proto=tcp proto=udp
218 set -A VERS vers=4 vers=3 vers=2
219
220 i=0
221 while (( $i <= $NUM_SHARES )); do
222 m=$((RANDOM % 5))
223 n=$((RANDOM % 3))
224 is_cipso "$Mntopts" "$SERVER"
225 if (( $? != 0 )); then
226 [[ ${OPTS[$m]} == "proto=udp" ]] && opt="" || opt=${OPTS[$m]}
227 ver="vers=4"
228 elif [[ ${OPTS[$m]} == "proto=udp" && ${VERS[$n]} == "vers=4" ]]; then
229 opt=""
230 ver="vers=4"
231 else
232 opt=${OPTS[$m]}
233 ver=${VERS[$n]}
234 fi
235
236 [[ -n $opt ]] && opt=",$opt"
237 mount -o $ver$opt $SERVER:$STRESSDIR/sharemnt_${i}_stress \
238 $STRESSMNT/mntdir_${i}_stress >> $STF_TMPDIR/mount.out.$$ 2>&1 &
239 i=$((i+1))
240 done
241
242 # wait for all mount commands in background to finish
243 condition="! pgrep -z $ZONENAME -P $$ -x mount > /dev/null"
244 wait_now $STRESS_TIMEOUT "$condition"
245 if (( $? != 0 )); then
246 echo "$Tname: timeout<$STRESS_TIMEOUT secs> for mounting..."
247 ps -efz $ZONENAME | grep "mount"
248 cat $STF_TMPDIR/mount.out.$$
249 pkill -z $ZONENAME -P $$ -x mount
250 stress_cleanup $STF_FAIL
251 fi
252
253 num=$(mount | grep $STRESSMNT/mntdir_ | wc -l)
254 if (( $num != $i )); then
255 echo "$Tname: mount was unsuccessful"
256 echo "\tExpected to see $i entries, but got $num"
257 cat $STF_TMPDIR/mount.out.$$
258 mount | grep $STRESSMNT/mntdir_
259 stress_cleanup $STF_FAIL
260 fi
261
262 stress_cleanup $STF_PASS