Print this page
11083 support NFS server in zone
Portions contributed by: Dan Kruchinin <dan.kruchinin@nexenta.com>
Portions contributed by: Stepan Zastupov <stepan.zastupov@gmail.com>
Portions contributed by: Joyce McIntosh <joyce.mcintosh@nexenta.com>
Portions contributed by: Mike Zeller <mike@mikezeller.net>
Portions contributed by: Dan McDonald <danmcd@joyent.com>
Portions contributed by: Gordon Ross <gordon.w.ross@gmail.com>
Portions contributed by: Vitaliy Gusev <gusev.vitaliy@gmail.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
Reviewed by: Rob Gittins <rob.gittins@nexenta.com>
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
Reviewed by: Jason King <jbk@joyent.com>
Reviewed by: C Fraire <cfraire@me.com>
Change-Id: I22f289d357503f9b48a0bc2482cc4328a6d43d16
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/cmd/fs.d/nfs/svc/nfs-server
+++ new/usr/src/cmd/fs.d/nfs/svc/nfs-server
1 1 #!/sbin/sh
2 2 #
3 3 # CDDL HEADER START
4 4 #
5 5 # The contents of this file are subject to the terms of the
6 6 # Common Development and Distribution License (the "License").
7 7 # You may not use this file except in compliance with the License.
8 8 #
9 9 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 10 # or http://www.opensolaris.org/os/licensing.
11 11 # See the License for the specific language governing permissions
12 12 # and limitations under the License.
13 13 #
14 14 # When distributing Covered Code, include this CDDL HEADER in each
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 16 # If applicable, add the following below this CDDL HEADER, with the
17 17 # fields enclosed by brackets "[]" replaced with your own identifying
18 18 # information: Portions Copyright [yyyy] [name of copyright owner]
19 19 #
20 20 # CDDL HEADER END
21 21 #
22 22
23 23 #
24 24 # Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
25 -# Copyright 2015 Nexenta Systems, Inc. All rights reserved.
26 25 # Copyright 2016 Hans Rosenfeld <rosenfeld@grumpf.hope-2000.org>
26 +# Copyright 2018 Nexenta Systems, Inc. All rights reserved.
27 27 #
28 28
29 29 # Start/stop processes required for server NFS
30 30
31 31 . /lib/svc/share/smf_include.sh
32 32 . /lib/svc/share/ipf_include.sh
33 33 zone=`smf_zonename`
34 34
35 35 #
36 36 # Handling a corner case here. If we were in offline state due to an
37 37 # unsatisfied dependency, the ipf_method process wouldn't have generated
38 38 # the ipfilter configuration. When we transition to online because the
39 39 # dependency is satisfied, the start method will have to generate the
40 40 # ipfilter configuration. To avoid all possible deadlock scenarios,
41 41 # we restart ipfilter which will regenerate the ipfilter configuration
42 42 # for the entire system.
43 43 #
44 44 # The ipf_method process signals that it didn't generate ipf rules by
45 45 # removing the service's ipf file. Thus we only restart network/ipfilter
|
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
46 46 # when the file is missing.
47 47 #
48 48 configure_ipfilter()
49 49 {
50 50 ipfile=`fmri_to_file $SMF_FMRI $IPF_SUFFIX`
51 51 ip6file=`fmri_to_file $SMF_FMRI $IPF6_SUFFIX`
52 52 [ -f "$ipfile" -a -f "$ip6file" ] && return 0
53 53
54 54 #
55 55 # Nothing to do if:
56 - # - ipfilter isn't online
56 + # - ipfilter isn't online
57 57 # - global policy is 'custom'
58 58 # - service's policy is 'use_global'
59 59 #
60 60 service_check_state $IPF_FMRI $SMF_ONLINE || return 0
61 61 [ "`get_global_def_policy`" = "custom" ] && return 0
62 62 [ "`get_policy $SMF_FMRI`" = "use_global" ] && return 0
63 63
64 64 svcadm restart $IPF_FMRI
65 65 }
66 66
67 67 case "$1" in
68 68 'start')
69 - # The NFS server is not supported in a local zone
70 - if smf_is_nonglobalzone; then
71 - /usr/sbin/svcadm disable -t svc:/network/nfs/server
72 - echo "The NFS server is not supported in a local zone"
73 - sleep 5 &
74 - exit $SMF_EXIT_OK
75 - fi
76 -
77 69 # Share all file systems enabled for sharing. sharemgr understands
78 70 # regular shares and ZFS shares and will handle both. Technically,
79 71 # the shares would have been started long before getting here since
80 72 # nfsd has a dependency on them.
81 73
82 74 # restart stopped shares from the repository
83 75 /usr/sbin/sharemgr start -P nfs -a
84 76
85 77 # Options for nfsd are now set in SMF
86 78
87 79 /usr/lib/nfs/mountd
88 80 rc=$?
89 81 if [ $rc != 0 ]; then
90 82 /usr/sbin/svcadm mark -t maintenance svc:/network/nfs/server
91 83 echo "$0: mountd failed with $rc"
92 84 sleep 5 &
93 85 exit $SMF_EXIT_ERR_FATAL
94 86 fi
95 87
96 88 /usr/lib/nfs/nfsd
97 89 rc=$?
98 90 if [ $rc != 0 ]; then
99 91 /usr/sbin/svcadm mark -t maintenance svc:/network/nfs/server
100 92 echo "$0: nfsd failed with $rc"
101 93 sleep 5 &
102 94 exit $SMF_EXIT_ERR_FATAL
103 95 fi
104 96
105 97 configure_ipfilter
106 98 ;;
107 99
108 100 'refresh')
109 101 /usr/sbin/sharemgr start -P nfs -a
110 102 ;;
111 103
112 104 'stop')
113 105 /usr/bin/pkill -x -u 0,1 -z $zone '(nfsd|mountd)'
114 106
115 107 # Unshare all shared file systems using NFS
116 108
117 109 /usr/sbin/sharemgr stop -P nfs -a
118 110
119 111 # Kill any processes left in service contract
120 112 smf_kill_contract $2 TERM 1
121 113 [ $? -ne 0 ] && exit 1
122 114 ;;
123 115
124 116 'ipfilter')
125 117 #
126 118 # NFS related services are RPC. nfs/server has nfsd which has
127 119 # well-defined port number but mountd is an RPC daemon.
128 120 #
129 121 # Essentially, we generate rules for the following "services"
130 122 # - nfs/server which has nfsd and mountd
131 123 # - nfs/rquota
132 124 #
133 125 # The following services are enabled for both nfs client and
134 126 # server, if nfs/client is enabled we'll treat them as client
135 127 # services and simply allow incoming traffic.
136 128 # - nfs/status
137 129 # - nfs/nlockmgr
138 130 # - nfs/cbd
139 131 #
140 132 NFS_FMRI="svc:/network/nfs/server:default"
141 133 NFSCLI_FMRI="svc:/network/nfs/client:default"
142 134 RQUOTA_FMRI="svc:/network/nfs/rquota:default"
143 135 FMRI=$2
144 136
145 137 file=`fmri_to_file $FMRI $IPF_SUFFIX`
146 138 file6=`fmri_to_file $FMRI $IPF6_SUFFIX`
147 139 echo "# $FMRI" >$file
148 140 echo "# $FMRI" >$file6
149 141 policy=`get_policy $NFS_FMRI`
150 142
151 143 #
152 144 # nfs/server configuration is processed in the start method.
153 145 #
154 146 if [ "$FMRI" = "$NFS_FMRI" ]; then
155 147 service_check_state $FMRI $SMF_ONLINE
156 148 if [ $? -ne 0 ]; then
157 149 rm $file
158 150 exit $SMF_EXIT_OK
159 151 fi
160 152
161 153 nfs_name=`svcprop -p $FW_CONTEXT_PG/name $FMRI 2>/dev/null`
162 154 tport=`$SERVINFO -p -t -s $nfs_name 2>/dev/null`
163 155 if [ -n "$tport" ]; then
164 156 generate_rules $FMRI $policy "tcp" $tport $file
165 157 fi
166 158
167 159 tport6=`$SERVINFO -p -t6 -s $nfs_name 2>/dev/null`
168 160 if [ -n "$tport6" ]; then
169 161 generate_rules $FMRI $policy "tcp" $tport6 $file6 _6
170 162 fi
171 163
172 164 uport=`$SERVINFO -p -u -s $nfs_name 2>/dev/null`
173 165 if [ -n "$uport" ]; then
174 166 generate_rules $FMRI $policy "udp" $uport $file
175 167 fi
176 168
177 169 uport6=`$SERVINFO -p -u6 -s $nfs_name 2>/dev/null`
178 170 if [ -n "$uport6" ]; then
179 171 generate_rules $FMRI $policy "udp" $uport6 $file6 _6
180 172 fi
181 173
182 174 # mountd IPv6 ports are also reachable through IPv4, so include
183 175 # them when generating IPv4 rules.
184 176 tports=`$SERVINFO -R -p -t -s "mountd" 2>/dev/null`
185 177 tports6=`$SERVINFO -R -p -t6 -s "mountd" 2>/dev/null`
186 178 if [ -n "$tports" -o -n "$tports6" ]; then
187 179 tports=`unique_ports $tports $tports6`
188 180 for tport in $tports; do
189 181 generate_rules $FMRI $policy "tcp" \
190 182 $tport $file
191 183 done
192 184 fi
193 185
194 186 if [ -n "$tports6" ]; then
195 187 for tport6 in $tports6; do
196 188 generate_rules $FMRI $policy "tcp" \
197 189 $tport6 $file6 _6
198 190 done
199 191 fi
200 192
201 193 uports=`$SERVINFO -R -p -u -s "mountd" 2>/dev/null`
202 194 uports6=`$SERVINFO -R -p -u6 -s "mountd" 2>/dev/null`
203 195 if [ -n "$uports" -o -n "$uports6" ]; then
204 196 uports=`unique_ports $uports $uports6`
205 197 for uport in $uports; do
206 198 generate_rules $FMRI $policy "udp" \
207 199 $uport $file
208 200 done
209 201 fi
210 202
211 203 if [ -n "$uports6" ]; then
212 204 for uport6 in $uports6; do
213 205 generate_rules $FMRI $policy "udp" \
214 206 $uport6 $file6 _6
215 207 done
216 208 fi
217 209
218 210 elif [ "$FMRI" = "$RQUOTA_FMRI" ]; then
219 211 iana_name=`svcprop -p inetd/name $FMRI`
220 212
221 213 # rquota IPv6 ports are also reachable through IPv4, so include
222 214 # them when generating IPv4 rules.
223 215 tports=`$SERVINFO -R -p -t -s $iana_name 2>/dev/null`
224 216 tports6=`$SERVINFO -R -p -t6 -s $iana_name 2>/dev/null`
225 217 if [ -n "$tports" -o -n "$tports6" ]; then
226 218 tports=`unique_ports $tports $tports6`
227 219 for tport in $tports; do
228 220 generate_rules $NFS_FMRI $policy "tcp" \
229 221 $tport $file
230 222 done
231 223 fi
232 224
233 225 if [ -n "$tports6" ]; then
234 226 for tport6 in $tports6; do
235 227 generate_rules $NFS_FMRI $policy "tcp" \
236 228 $tport6 $file6 _6
237 229 done
238 230 fi
239 231
240 232 uports=`$SERVINFO -R -p -u -s $iana_name 2>/dev/null`
241 233 uports6=`$SERVINFO -R -p -u6 -s $iana_name 2>/dev/null`
242 234 if [ -n "$uports" -o -n "$uports6" ]; then
243 235 uports=`unique_ports $uports $uports6`
244 236 for uport in $uports; do
245 237 generate_rules $NFS_FMRI $policy "udp" \
246 238 $uport $file
247 239 done
248 240 fi
249 241
250 242 if [ -n "$uports6" ]; then
251 243 for uport6 in $uports6; do
252 244 generate_rules $NFS_FMRI $policy "udp" \
253 245 $uport6 $file6 _6
254 246 done
255 247 fi
256 248 else
257 249 #
258 250 # Handle the client services here
259 251 #
260 252 if service_check_state $NFSCLI_FMRI $SMF_ONLINE; then
261 253 policy=none
262 254 ip=any
263 255 fi
264 256
265 257 restarter=`svcprop -p general/restarter $FMRI 2>/dev/null`
266 258 if [ "$restarter" = "$INETDFMRI" ]; then
267 259 iana_name=`svcprop -p inetd/name $FMRI`
268 260 isrpc=`svcprop -p inetd/isrpc $FMRI`
269 261 else
270 262 iana_name=`svcprop -p $FW_CONTEXT_PG/name $FMRI`
271 263 isrpc=`svcprop -p $FW_CONTEXT_PG/isrpc $FMRI`
272 264 fi
273 265
274 266 if [ "$isrpc" = "true" ]; then
275 267 tports=`$SERVINFO -R -p -t -s $iana_name 2>/dev/null`
276 268 tports6=`$SERVINFO -R -p -t6 -s $iana_name 2>/dev/null`
277 269 uports=`$SERVINFO -R -p -u -s $iana_name 2>/dev/null`
278 270 uports6=`$SERVINFO -R -p -u6 -s $iana_name 2>/dev/null`
279 271 else
280 272 tports=`$SERVINFO -p -t -s $iana_name 2>/dev/null`
281 273 tports6=`$SERVINFO -p -t6 -s $iana_name 2>/dev/null`
282 274 uports=`$SERVINFO -p -u -s $iana_name 2>/dev/null`
283 275 uports6=`$SERVINFO -p -u6 -s $iana_name 2>/dev/null`
284 276 fi
285 277
286 278 # IPv6 ports are also reachable through IPv4, so include
287 279 # them when generating IPv4 rules.
288 280 if [ -n "$tports" -o -n "$tports6" ]; then
289 281 tports=`unique_ports $tports $tports6`
290 282 for tport in $tports; do
291 283 generate_rules $FMRI $policy "tcp" $tport $file
292 284 done
293 285 fi
294 286
295 287 if [ -n "$tports6" ]; then
296 288 for tport6 in $tports6; do
297 289 generate_rules $FMRI $policy "tcp" $tport6 $file6 _6
298 290 done
299 291 fi
300 292
301 293 if [ -n "$uports" -o -n "$uports6" ]; then
302 294 uports=`unique_ports $uports $uports6`
303 295 for uport in $uports; do
304 296 generate_rules $FMRI $policy "udp" $uport $file
305 297 done
306 298 fi
307 299
308 300 if [ -n "$uports6" ]; then
309 301 for uport6 in $uports6; do
310 302 generate_rules $FMRI $policy "udp" $uport6 $file6 _6
311 303 done
312 304 fi
313 305 fi
314 306
315 307 ;;
316 308
317 309 *)
318 310 echo "Usage: $0 { start | stop | refresh }"
319 311 exit 1
320 312 ;;
321 313 esac
322 314 exit $SMF_EXIT_OK
|
↓ open down ↓ |
236 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX