Print this page
re #13613 rb4516 Tunables needs volatile keyword
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/fs/nfs/nfs_vfsops.c
+++ new/usr/src/uts/common/fs/nfs/nfs_vfsops.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23 + * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
23 24 *
24 25 * Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T.
25 26 * All rights reserved.
26 27 */
27 28
28 29 #include <sys/param.h>
29 30 #include <sys/types.h>
30 31 #include <sys/systm.h>
31 32 #include <sys/cred.h>
32 33 #include <sys/vfs.h>
33 34 #include <sys/vfs_opreg.h>
34 35 #include <sys/vnode.h>
35 36 #include <sys/pathname.h>
36 37 #include <sys/sysmacros.h>
37 38 #include <sys/kmem.h>
38 39 #include <sys/mkdev.h>
39 40 #include <sys/mount.h>
40 41 #include <sys/mntent.h>
41 42 #include <sys/statvfs.h>
42 43 #include <sys/errno.h>
43 44 #include <sys/debug.h>
44 45 #include <sys/cmn_err.h>
45 46 #include <sys/utsname.h>
46 47 #include <sys/bootconf.h>
47 48 #include <sys/modctl.h>
48 49 #include <sys/acl.h>
49 50 #include <sys/flock.h>
50 51 #include <sys/policy.h>
51 52 #include <sys/zone.h>
52 53 #include <sys/class.h>
53 54 #include <sys/socket.h>
54 55 #include <sys/netconfig.h>
55 56 #include <sys/mntent.h>
56 57 #include <sys/tsol/label.h>
57 58
58 59 #include <rpc/types.h>
59 60 #include <rpc/auth.h>
60 61 #include <rpc/clnt.h>
61 62
62 63 #include <nfs/nfs.h>
63 64 #include <nfs/nfs_clnt.h>
64 65 #include <nfs/rnode.h>
65 66 #include <nfs/mount.h>
66 67 #include <nfs/nfs_acl.h>
67 68
68 69 #include <fs/fs_subr.h>
69 70
70 71 /*
71 72 * From rpcsec module (common/rpcsec).
72 73 */
73 74 extern int sec_clnt_loadinfo(struct sec_data *, struct sec_data **, model_t);
74 75 extern void sec_clnt_freeinfo(struct sec_data *);
75 76
76 77 static int pathconf_copyin(struct nfs_args *, struct pathcnf *);
77 78 static int pathconf_get(struct mntinfo *, struct nfs_args *);
78 79 static void pathconf_rele(struct mntinfo *);
79 80
80 81 /*
81 82 * The order and contents of this structure must be kept in sync with that of
82 83 * rfsreqcnt_v2_tmpl in nfs_stats.c
83 84 */
84 85 static char *rfsnames_v2[] = {
85 86 "null", "getattr", "setattr", "unused", "lookup", "readlink", "read",
86 87 "unused", "write", "create", "remove", "rename", "link", "symlink",
87 88 "mkdir", "rmdir", "readdir", "fsstat"
88 89 };
89 90
90 91 /*
91 92 * This table maps from NFS protocol number into call type.
92 93 * Zero means a "Lookup" type call
93 94 * One means a "Read" type call
94 95 * Two means a "Write" type call
95 96 * This is used to select a default time-out.
96 97 */
97 98 static uchar_t call_type_v2[] = {
98 99 0, 0, 1, 0, 0, 0, 1,
99 100 0, 2, 2, 2, 2, 2, 2,
100 101 2, 2, 1, 0
101 102 };
102 103
103 104 /*
104 105 * Similar table, but to determine which timer to use
105 106 * (only real reads and writes!)
106 107 */
107 108 static uchar_t timer_type_v2[] = {
108 109 0, 0, 0, 0, 0, 0, 1,
109 110 0, 2, 0, 0, 0, 0, 0,
110 111 0, 0, 1, 0
111 112 };
112 113
113 114 /*
114 115 * This table maps from NFS protocol number into a call type
115 116 * for the semisoft mount option.
116 117 * Zero means do not repeat operation.
117 118 * One means repeat.
118 119 */
119 120 static uchar_t ss_call_type_v2[] = {
120 121 0, 0, 1, 0, 0, 0, 0,
121 122 0, 1, 1, 1, 1, 1, 1,
122 123 1, 1, 0, 0
123 124 };
124 125
125 126 /*
126 127 * nfs vfs operations.
127 128 */
128 129 static int nfs_mount(vfs_t *, vnode_t *, struct mounta *, cred_t *);
129 130 static int nfs_unmount(vfs_t *, int, cred_t *);
130 131 static int nfs_root(vfs_t *, vnode_t **);
131 132 static int nfs_statvfs(vfs_t *, struct statvfs64 *);
132 133 static int nfs_sync(vfs_t *, short, cred_t *);
133 134 static int nfs_vget(vfs_t *, vnode_t **, fid_t *);
134 135 static int nfs_mountroot(vfs_t *, whymountroot_t);
135 136 static void nfs_freevfs(vfs_t *);
136 137
137 138 static int nfsrootvp(vnode_t **, vfs_t *, struct servinfo *,
138 139 int, cred_t *, zone_t *);
139 140
140 141 /*
141 142 * Initialize the vfs structure
142 143 */
143 144
144 145 int nfsfstyp;
145 146 vfsops_t *nfs_vfsops;
146 147
147 148 /*
148 149 * Debug variable to check for rdma based
149 150 * transport startup and cleanup. Controlled
150 151 * through /etc/system. Off by default.
151 152 */
152 153 int rdma_debug = 0;
153 154
154 155 int
155 156 nfsinit(int fstyp, char *name)
156 157 {
157 158 static const fs_operation_def_t nfs_vfsops_template[] = {
158 159 VFSNAME_MOUNT, { .vfs_mount = nfs_mount },
159 160 VFSNAME_UNMOUNT, { .vfs_unmount = nfs_unmount },
160 161 VFSNAME_ROOT, { .vfs_root = nfs_root },
161 162 VFSNAME_STATVFS, { .vfs_statvfs = nfs_statvfs },
162 163 VFSNAME_SYNC, { .vfs_sync = nfs_sync },
163 164 VFSNAME_VGET, { .vfs_vget = nfs_vget },
164 165 VFSNAME_MOUNTROOT, { .vfs_mountroot = nfs_mountroot },
165 166 VFSNAME_FREEVFS, { .vfs_freevfs = nfs_freevfs },
166 167 NULL, NULL
167 168 };
168 169 int error;
169 170
170 171 error = vfs_setfsops(fstyp, nfs_vfsops_template, &nfs_vfsops);
171 172 if (error != 0) {
172 173 zcmn_err(GLOBAL_ZONEID, CE_WARN,
173 174 "nfsinit: bad vfs ops template");
174 175 return (error);
175 176 }
176 177
177 178 error = vn_make_ops(name, nfs_vnodeops_template, &nfs_vnodeops);
178 179 if (error != 0) {
179 180 (void) vfs_freevfsops_by_type(fstyp);
180 181 zcmn_err(GLOBAL_ZONEID, CE_WARN,
181 182 "nfsinit: bad vnode ops template");
182 183 return (error);
183 184 }
184 185
185 186
186 187 nfsfstyp = fstyp;
187 188
188 189 return (0);
189 190 }
190 191
191 192 void
192 193 nfsfini(void)
193 194 {
194 195 }
195 196
196 197 static void
197 198 nfs_free_args(struct nfs_args *nargs, nfs_fhandle *fh)
198 199 {
199 200
200 201 if (fh)
201 202 kmem_free(fh, sizeof (*fh));
202 203
203 204 if (nargs->pathconf) {
204 205 kmem_free(nargs->pathconf, sizeof (struct pathcnf));
205 206 nargs->pathconf = NULL;
206 207 }
207 208
208 209 if (nargs->knconf) {
209 210 if (nargs->knconf->knc_protofmly)
210 211 kmem_free(nargs->knconf->knc_protofmly, KNC_STRSIZE);
211 212 if (nargs->knconf->knc_proto)
212 213 kmem_free(nargs->knconf->knc_proto, KNC_STRSIZE);
213 214 kmem_free(nargs->knconf, sizeof (*nargs->knconf));
214 215 nargs->knconf = NULL;
215 216 }
216 217
217 218 if (nargs->fh) {
218 219 kmem_free(nargs->fh, strlen(nargs->fh) + 1);
219 220 nargs->fh = NULL;
220 221 }
221 222
222 223 if (nargs->hostname) {
223 224 kmem_free(nargs->hostname, strlen(nargs->hostname) + 1);
224 225 nargs->hostname = NULL;
225 226 }
226 227
227 228 if (nargs->addr) {
228 229 if (nargs->addr->buf) {
229 230 ASSERT(nargs->addr->len);
230 231 kmem_free(nargs->addr->buf, nargs->addr->len);
231 232 }
232 233 kmem_free(nargs->addr, sizeof (struct netbuf));
233 234 nargs->addr = NULL;
234 235 }
235 236
236 237 if (nargs->syncaddr) {
237 238 ASSERT(nargs->syncaddr->len);
238 239 if (nargs->syncaddr->buf) {
239 240 ASSERT(nargs->syncaddr->len);
240 241 kmem_free(nargs->syncaddr->buf, nargs->syncaddr->len);
241 242 }
242 243 kmem_free(nargs->syncaddr, sizeof (struct netbuf));
243 244 nargs->syncaddr = NULL;
244 245 }
245 246
246 247 if (nargs->netname) {
247 248 kmem_free(nargs->netname, strlen(nargs->netname) + 1);
248 249 nargs->netname = NULL;
249 250 }
250 251
251 252 if (nargs->nfs_ext_u.nfs_extA.secdata) {
252 253 sec_clnt_freeinfo(nargs->nfs_ext_u.nfs_extA.secdata);
253 254 nargs->nfs_ext_u.nfs_extA.secdata = NULL;
254 255 }
255 256 }
256 257
257 258 static int
258 259 nfs_copyin(char *data, int datalen, struct nfs_args *nargs, nfs_fhandle *fh)
259 260 {
260 261
261 262 int error;
262 263 size_t nlen; /* length of netname */
263 264 size_t hlen; /* length of hostname */
264 265 char netname[MAXNETNAMELEN+1]; /* server's netname */
265 266 struct netbuf addr; /* server's address */
266 267 struct netbuf syncaddr; /* AUTH_DES time sync addr */
267 268 struct knetconfig *knconf; /* transport knetconfig structure */
268 269 struct sec_data *secdata = NULL; /* security data */
269 270 STRUCT_DECL(nfs_args, args); /* nfs mount arguments */
270 271 STRUCT_DECL(knetconfig, knconf_tmp);
271 272 STRUCT_DECL(netbuf, addr_tmp);
272 273 int flags;
273 274 struct pathcnf *pc; /* Pathconf */
274 275 char *p, *pf;
275 276 char *userbufptr;
276 277
277 278
278 279 bzero(nargs, sizeof (*nargs));
279 280
280 281 STRUCT_INIT(args, get_udatamodel());
281 282 bzero(STRUCT_BUF(args), SIZEOF_STRUCT(nfs_args, DATAMODEL_NATIVE));
282 283 if (copyin(data, STRUCT_BUF(args), MIN(datalen, STRUCT_SIZE(args))))
283 284 return (EFAULT);
284 285
285 286 nargs->wsize = STRUCT_FGET(args, wsize);
286 287 nargs->rsize = STRUCT_FGET(args, rsize);
287 288 nargs->timeo = STRUCT_FGET(args, timeo);
288 289 nargs->retrans = STRUCT_FGET(args, retrans);
289 290 nargs->acregmin = STRUCT_FGET(args, acregmin);
290 291 nargs->acregmax = STRUCT_FGET(args, acregmax);
291 292 nargs->acdirmin = STRUCT_FGET(args, acdirmin);
292 293 nargs->acdirmax = STRUCT_FGET(args, acdirmax);
293 294
294 295 flags = STRUCT_FGET(args, flags);
295 296 nargs->flags = flags;
296 297
297 298
298 299 addr.buf = NULL;
299 300 syncaddr.buf = NULL;
300 301
301 302 /*
302 303 * Allocate space for a knetconfig structure and
303 304 * its strings and copy in from user-land.
304 305 */
305 306 knconf = kmem_zalloc(sizeof (*knconf), KM_SLEEP);
306 307 STRUCT_INIT(knconf_tmp, get_udatamodel());
307 308 if (copyin(STRUCT_FGETP(args, knconf), STRUCT_BUF(knconf_tmp),
308 309 STRUCT_SIZE(knconf_tmp))) {
309 310 kmem_free(knconf, sizeof (*knconf));
310 311 return (EFAULT);
311 312 }
312 313
313 314 knconf->knc_semantics = STRUCT_FGET(knconf_tmp, knc_semantics);
314 315 knconf->knc_protofmly = STRUCT_FGETP(knconf_tmp, knc_protofmly);
315 316 knconf->knc_proto = STRUCT_FGETP(knconf_tmp, knc_proto);
316 317 if (get_udatamodel() != DATAMODEL_LP64) {
317 318 knconf->knc_rdev = expldev(STRUCT_FGET(knconf_tmp, knc_rdev));
318 319 } else {
319 320 knconf->knc_rdev = STRUCT_FGET(knconf_tmp, knc_rdev);
320 321 }
321 322
322 323 pf = kmem_alloc(KNC_STRSIZE, KM_SLEEP);
323 324 p = kmem_alloc(KNC_STRSIZE, KM_SLEEP);
324 325 error = copyinstr(knconf->knc_protofmly, pf, KNC_STRSIZE, NULL);
325 326 if (error) {
326 327 kmem_free(pf, KNC_STRSIZE);
327 328 kmem_free(p, KNC_STRSIZE);
328 329 kmem_free(knconf, sizeof (*knconf));
329 330 return (error);
330 331 }
331 332
332 333 error = copyinstr(knconf->knc_proto, p, KNC_STRSIZE, NULL);
333 334 if (error) {
334 335 kmem_free(pf, KNC_STRSIZE);
335 336 kmem_free(p, KNC_STRSIZE);
336 337 kmem_free(knconf, sizeof (*knconf));
337 338 return (error);
338 339 }
339 340
340 341
341 342 knconf->knc_protofmly = pf;
342 343 knconf->knc_proto = p;
343 344
344 345 nargs->knconf = knconf;
345 346
346 347 /* Copyin pathconf if there is one */
347 348 if (STRUCT_FGETP(args, pathconf) != NULL) {
348 349 pc = kmem_alloc(sizeof (*pc), KM_SLEEP);
349 350 error = pathconf_copyin(STRUCT_BUF(args), pc);
350 351 nargs->pathconf = pc;
351 352 if (error)
352 353 goto errout;
353 354 }
354 355
355 356 /*
356 357 * Get server address
357 358 */
358 359 STRUCT_INIT(addr_tmp, get_udatamodel());
359 360 if (copyin(STRUCT_FGETP(args, addr), STRUCT_BUF(addr_tmp),
360 361 STRUCT_SIZE(addr_tmp))) {
361 362 error = EFAULT;
362 363 goto errout;
363 364 }
364 365 nargs->addr = kmem_alloc(sizeof (struct netbuf), KM_SLEEP);
365 366 userbufptr = STRUCT_FGETP(addr_tmp, buf);
366 367 addr.len = STRUCT_FGET(addr_tmp, len);
367 368 addr.buf = kmem_alloc(addr.len, KM_SLEEP);
368 369 addr.maxlen = addr.len;
369 370 if (copyin(userbufptr, addr.buf, addr.len)) {
370 371 kmem_free(addr.buf, addr.len);
371 372 error = EFAULT;
372 373 goto errout;
373 374 }
374 375 bcopy(&addr, nargs->addr, sizeof (struct netbuf));
375 376
376 377 /*
377 378 * Get the root fhandle
378 379 */
379 380
380 381 if (copyin(STRUCT_FGETP(args, fh), &fh->fh_buf, NFS_FHSIZE)) {
381 382 error = EFAULT;
382 383 goto errout;
383 384 }
384 385 fh->fh_len = NFS_FHSIZE;
385 386
386 387 /*
387 388 * Get server's hostname
388 389 */
389 390 if (flags & NFSMNT_HOSTNAME) {
390 391 error = copyinstr(STRUCT_FGETP(args, hostname), netname,
391 392 sizeof (netname), &hlen);
392 393 if (error)
393 394 goto errout;
394 395 nargs->hostname = kmem_zalloc(hlen, KM_SLEEP);
395 396 (void) strcpy(nargs->hostname, netname);
396 397
397 398 } else {
398 399 nargs->hostname = NULL;
399 400 }
400 401
401 402
402 403 /*
403 404 * If there are syncaddr and netname data, load them in. This is
404 405 * to support data needed for NFSV4 when AUTH_DH is the negotiated
405 406 * flavor via SECINFO. (instead of using MOUNT protocol in V3).
406 407 */
407 408 netname[0] = '\0';
408 409 if (flags & NFSMNT_SECURE) {
409 410 if (STRUCT_FGETP(args, syncaddr) == NULL) {
410 411 error = EINVAL;
411 412 goto errout;
412 413 }
413 414 /* get syncaddr */
414 415 STRUCT_INIT(addr_tmp, get_udatamodel());
415 416 if (copyin(STRUCT_FGETP(args, syncaddr), STRUCT_BUF(addr_tmp),
416 417 STRUCT_SIZE(addr_tmp))) {
417 418 error = EINVAL;
418 419 goto errout;
419 420 }
420 421 userbufptr = STRUCT_FGETP(addr_tmp, buf);
421 422 syncaddr.len = STRUCT_FGET(addr_tmp, len);
422 423 syncaddr.buf = kmem_alloc(syncaddr.len, KM_SLEEP);
423 424 syncaddr.maxlen = syncaddr.len;
424 425 if (copyin(userbufptr, syncaddr.buf, syncaddr.len)) {
425 426 kmem_free(syncaddr.buf, syncaddr.len);
426 427 error = EFAULT;
427 428 goto errout;
428 429 }
429 430
430 431 nargs->syncaddr = kmem_alloc(sizeof (struct netbuf), KM_SLEEP);
431 432 bcopy(&syncaddr, nargs->syncaddr, sizeof (struct netbuf));
432 433
433 434 ASSERT(STRUCT_FGETP(args, netname));
434 435 if (copyinstr(STRUCT_FGETP(args, netname), netname,
435 436 sizeof (netname), &nlen)) {
436 437 error = EFAULT;
437 438 goto errout;
438 439 }
439 440
440 441 netname[nlen] = '\0';
441 442 nargs->netname = kmem_zalloc(nlen, KM_SLEEP);
442 443 (void) strcpy(nargs->netname, netname);
443 444 }
444 445
445 446 /*
446 447 * Get the extention data which has the security data structure.
447 448 * This includes data for AUTH_SYS as well.
448 449 */
449 450 if (flags & NFSMNT_NEWARGS) {
450 451 nargs->nfs_args_ext = STRUCT_FGET(args, nfs_args_ext);
451 452 if (nargs->nfs_args_ext == NFS_ARGS_EXTA ||
452 453 nargs->nfs_args_ext == NFS_ARGS_EXTB) {
453 454 /*
454 455 * Indicating the application is using the new
455 456 * sec_data structure to pass in the security
456 457 * data.
457 458 */
458 459 if (STRUCT_FGETP(args,
459 460 nfs_ext_u.nfs_extA.secdata) != NULL) {
460 461 error = sec_clnt_loadinfo(
461 462 (struct sec_data *)STRUCT_FGETP(args,
462 463 nfs_ext_u.nfs_extA.secdata), &secdata,
463 464 get_udatamodel());
464 465 }
465 466 nargs->nfs_ext_u.nfs_extA.secdata = secdata;
466 467 }
467 468 }
468 469
469 470 if (error)
470 471 goto errout;
471 472
472 473 /*
473 474 * Failover support:
474 475 *
475 476 * We may have a linked list of nfs_args structures,
476 477 * which means the user is looking for failover. If
477 478 * the mount is either not "read-only" or "soft",
478 479 * we want to bail out with EINVAL.
479 480 */
480 481 if (nargs->nfs_args_ext == NFS_ARGS_EXTB)
481 482 nargs->nfs_ext_u.nfs_extB.next =
482 483 STRUCT_FGETP(args, nfs_ext_u.nfs_extB.next);
483 484
484 485 errout:
485 486 if (error)
486 487 nfs_free_args(nargs, fh);
487 488
488 489 return (error);
489 490 }
490 491
491 492
492 493 /*
493 494 * nfs mount vfsop
494 495 * Set up mount info record and attach it to vfs struct.
495 496 */
496 497 static int
497 498 nfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
498 499 {
499 500 char *data = uap->dataptr;
500 501 int error;
501 502 vnode_t *rtvp; /* the server's root */
502 503 mntinfo_t *mi; /* mount info, pointed at by vfs */
503 504 size_t nlen; /* length of netname */
504 505 struct knetconfig *knconf; /* transport knetconfig structure */
505 506 struct knetconfig *rdma_knconf; /* rdma transport structure */
506 507 rnode_t *rp;
507 508 struct servinfo *svp; /* nfs server info */
508 509 struct servinfo *svp_tail = NULL; /* previous nfs server info */
509 510 struct servinfo *svp_head; /* first nfs server info */
510 511 struct servinfo *svp_2ndlast; /* 2nd last in the server info list */
511 512 struct sec_data *secdata; /* security data */
512 513 struct nfs_args *args = NULL;
513 514 int flags, addr_type;
514 515 zone_t *zone = nfs_zone();
515 516 zone_t *mntzone = NULL;
516 517 nfs_fhandle *fhandle = NULL;
517 518
518 519 if ((error = secpolicy_fs_mount(cr, mvp, vfsp)) != 0)
519 520 return (error);
520 521
521 522 if (mvp->v_type != VDIR)
522 523 return (ENOTDIR);
523 524
524 525 /*
525 526 * get arguments
526 527 *
527 528 * nfs_args is now versioned and is extensible, so
528 529 * uap->datalen might be different from sizeof (args)
529 530 * in a compatible situation.
530 531 */
531 532 more:
532 533
533 534 if (!(uap->flags & MS_SYSSPACE)) {
534 535 if (args == NULL)
535 536 args = kmem_alloc(sizeof (struct nfs_args), KM_SLEEP);
536 537 else {
537 538 nfs_free_args(args, fhandle);
538 539 fhandle = NULL;
539 540 }
540 541 if (fhandle == NULL)
541 542 fhandle = kmem_zalloc(sizeof (nfs_fhandle), KM_SLEEP);
542 543 error = nfs_copyin(data, uap->datalen, args, fhandle);
543 544 if (error) {
544 545 if (args)
545 546 kmem_free(args, sizeof (*args));
546 547 return (error);
547 548 }
548 549 } else {
549 550 args = (struct nfs_args *)data;
550 551 fhandle = (nfs_fhandle *)args->fh;
551 552 }
552 553
553 554
554 555 flags = args->flags;
555 556
556 557 if (uap->flags & MS_REMOUNT) {
557 558 size_t n;
558 559 char name[FSTYPSZ];
559 560
560 561 if (uap->flags & MS_SYSSPACE)
561 562 error = copystr(uap->fstype, name, FSTYPSZ, &n);
562 563 else
563 564 error = copyinstr(uap->fstype, name, FSTYPSZ, &n);
564 565
565 566 if (error) {
566 567 if (error == ENAMETOOLONG)
567 568 return (EINVAL);
568 569 return (error);
569 570 }
570 571
571 572
572 573 /*
573 574 * This check is to ensure that the request is a
574 575 * genuine nfs remount request.
575 576 */
576 577
577 578 if (strncmp(name, "nfs", 3) != 0)
578 579 return (EINVAL);
579 580
580 581 /*
581 582 * If the request changes the locking type, disallow the
582 583 * remount,
583 584 * because it's questionable whether we can transfer the
584 585 * locking state correctly.
585 586 *
586 587 * Remounts need to save the pathconf information.
587 588 * Part of the infamous static kludge.
588 589 */
589 590
590 591 if ((mi = VFTOMI(vfsp)) != NULL) {
591 592 uint_t new_mi_llock;
592 593 uint_t old_mi_llock;
593 594
594 595 new_mi_llock = (flags & NFSMNT_LLOCK) ? 1 : 0;
595 596 old_mi_llock = (mi->mi_flags & MI_LLOCK) ? 1 : 0;
596 597 if (old_mi_llock != new_mi_llock)
597 598 return (EBUSY);
598 599 }
599 600 error = pathconf_get((struct mntinfo *)vfsp->vfs_data, args);
600 601
601 602 if (!(uap->flags & MS_SYSSPACE)) {
602 603 nfs_free_args(args, fhandle);
603 604 kmem_free(args, sizeof (*args));
604 605 }
605 606
606 607 return (error);
607 608 }
608 609
609 610 mutex_enter(&mvp->v_lock);
610 611 if (!(uap->flags & MS_OVERLAY) &&
611 612 (mvp->v_count != 1 || (mvp->v_flag & VROOT))) {
612 613 mutex_exit(&mvp->v_lock);
613 614 if (!(uap->flags & MS_SYSSPACE)) {
614 615 nfs_free_args(args, fhandle);
615 616 kmem_free(args, sizeof (*args));
616 617 }
617 618 return (EBUSY);
618 619 }
619 620 mutex_exit(&mvp->v_lock);
620 621
621 622 /* make sure things are zeroed for errout: */
622 623 rtvp = NULL;
623 624 mi = NULL;
624 625 secdata = NULL;
625 626
626 627 /*
627 628 * A valid knetconfig structure is required.
628 629 */
629 630 if (!(flags & NFSMNT_KNCONF)) {
630 631 if (!(uap->flags & MS_SYSSPACE)) {
631 632 nfs_free_args(args, fhandle);
632 633 kmem_free(args, sizeof (*args));
633 634 }
634 635 return (EINVAL);
635 636 }
636 637
637 638 if ((strlen(args->knconf->knc_protofmly) >= KNC_STRSIZE) ||
638 639 (strlen(args->knconf->knc_proto) >= KNC_STRSIZE)) {
639 640 if (!(uap->flags & MS_SYSSPACE)) {
640 641 nfs_free_args(args, fhandle);
641 642 kmem_free(args, sizeof (*args));
642 643 }
643 644 return (EINVAL);
644 645 }
645 646
646 647
647 648 /*
648 649 * Allocate a servinfo struct.
649 650 */
650 651 svp = kmem_zalloc(sizeof (*svp), KM_SLEEP);
651 652 mutex_init(&svp->sv_lock, NULL, MUTEX_DEFAULT, NULL);
652 653 if (svp_tail) {
653 654 svp_2ndlast = svp_tail;
654 655 svp_tail->sv_next = svp;
655 656 } else {
656 657 svp_head = svp;
657 658 svp_2ndlast = svp;
658 659 }
659 660
660 661 svp_tail = svp;
661 662
662 663 /*
663 664 * Get knetconfig and server address
664 665 */
665 666 svp->sv_knconf = args->knconf;
666 667 args->knconf = NULL;
667 668
668 669 if (args->addr == NULL || args->addr->buf == NULL) {
669 670 error = EINVAL;
670 671 goto errout;
671 672 }
672 673
673 674 svp->sv_addr.maxlen = args->addr->maxlen;
674 675 svp->sv_addr.len = args->addr->len;
675 676 svp->sv_addr.buf = args->addr->buf;
676 677 args->addr->buf = NULL;
677 678
678 679 /*
679 680 * Get the root fhandle
680 681 */
681 682 ASSERT(fhandle);
682 683
683 684 bcopy(&fhandle->fh_buf, &svp->sv_fhandle.fh_buf, fhandle->fh_len);
684 685 svp->sv_fhandle.fh_len = fhandle->fh_len;
685 686
686 687 /*
687 688 * Get server's hostname
688 689 */
689 690 if (flags & NFSMNT_HOSTNAME) {
690 691 if (args->hostname == NULL) {
691 692 error = EINVAL;
692 693 goto errout;
693 694 }
694 695 svp->sv_hostnamelen = strlen(args->hostname) + 1;
695 696 svp->sv_hostname = args->hostname;
696 697 args->hostname = NULL;
697 698 } else {
698 699 char *p = "unknown-host";
699 700 svp->sv_hostnamelen = strlen(p) + 1;
700 701 svp->sv_hostname = kmem_zalloc(svp->sv_hostnamelen, KM_SLEEP);
701 702 (void) strcpy(svp->sv_hostname, p);
702 703 }
703 704
704 705
705 706 /*
706 707 * RDMA MOUNT SUPPORT FOR NFS v2:
707 708 * Establish, is it possible to use RDMA, if so overload the
708 709 * knconf with rdma specific knconf and free the orignal.
709 710 */
710 711 if ((flags & NFSMNT_TRYRDMA) || (flags & NFSMNT_DORDMA)) {
711 712 /*
712 713 * Determine the addr type for RDMA, IPv4 or v6.
713 714 */
714 715 if (strcmp(svp->sv_knconf->knc_protofmly, NC_INET) == 0)
715 716 addr_type = AF_INET;
716 717 else if (strcmp(svp->sv_knconf->knc_protofmly, NC_INET6) == 0)
717 718 addr_type = AF_INET6;
718 719
719 720 if (rdma_reachable(addr_type, &svp->sv_addr,
720 721 &rdma_knconf) == 0) {
721 722 /*
722 723 * If successful, hijack, the orignal knconf and
723 724 * replace with a new one, depending on the flags.
724 725 */
725 726 svp->sv_origknconf = svp->sv_knconf;
726 727 svp->sv_knconf = rdma_knconf;
727 728 knconf = rdma_knconf;
728 729 } else {
729 730 if (flags & NFSMNT_TRYRDMA) {
730 731 #ifdef DEBUG
731 732 if (rdma_debug)
732 733 zcmn_err(getzoneid(), CE_WARN,
733 734 "no RDMA onboard, revert\n");
734 735 #endif
735 736 }
736 737
737 738 if (flags & NFSMNT_DORDMA) {
738 739 /*
739 740 * If proto=rdma is specified and no RDMA
740 741 * path to this server is avialable then
741 742 * ditch this server.
742 743 * This is not included in the mountable
743 744 * server list or the replica list.
744 745 * Check if more servers are specified;
745 746 * Failover case, otherwise bail out of mount.
746 747 */
747 748 if (args->nfs_args_ext == NFS_ARGS_EXTB &&
748 749 args->nfs_ext_u.nfs_extB.next != NULL) {
749 750 data = (char *)
750 751 args->nfs_ext_u.nfs_extB.next;
751 752 if (uap->flags & MS_RDONLY &&
752 753 !(flags & NFSMNT_SOFT)) {
753 754 if (svp_head->sv_next == NULL) {
754 755 svp_tail = NULL;
755 756 svp_2ndlast = NULL;
756 757 sv_free(svp_head);
757 758 goto more;
758 759 } else {
759 760 svp_tail = svp_2ndlast;
760 761 svp_2ndlast->sv_next =
761 762 NULL;
762 763 sv_free(svp);
763 764 goto more;
764 765 }
765 766 }
766 767 } else {
767 768 /*
768 769 * This is the last server specified
769 770 * in the nfs_args list passed down
770 771 * and its not rdma capable.
771 772 */
772 773 if (svp_head->sv_next == NULL) {
773 774 /*
774 775 * Is this the only one
775 776 */
776 777 error = EINVAL;
777 778 #ifdef DEBUG
778 779 if (rdma_debug)
779 780 zcmn_err(getzoneid(),
780 781 CE_WARN,
781 782 "No RDMA srv");
782 783 #endif
783 784 goto errout;
784 785 } else {
785 786 /*
786 787 * There is list, since some
787 788 * servers specified before
788 789 * this passed all requirements
789 790 */
790 791 svp_tail = svp_2ndlast;
791 792 svp_2ndlast->sv_next = NULL;
792 793 sv_free(svp);
793 794 goto proceed;
794 795 }
795 796 }
796 797 }
797 798 }
798 799 }
799 800
800 801 /*
801 802 * Get the extention data which has the new security data structure.
802 803 */
803 804 if (flags & NFSMNT_NEWARGS) {
804 805 switch (args->nfs_args_ext) {
805 806 case NFS_ARGS_EXTA:
806 807 case NFS_ARGS_EXTB:
807 808 /*
808 809 * Indicating the application is using the new
809 810 * sec_data structure to pass in the security
810 811 * data.
811 812 */
812 813 secdata = args->nfs_ext_u.nfs_extA.secdata;
813 814 if (secdata == NULL) {
814 815 error = EINVAL;
815 816 } else {
816 817 /*
817 818 * Need to validate the flavor here if
818 819 * sysspace, userspace was already
819 820 * validate from the nfs_copyin function.
820 821 */
821 822 switch (secdata->rpcflavor) {
822 823 case AUTH_NONE:
823 824 case AUTH_UNIX:
824 825 case AUTH_LOOPBACK:
825 826 case AUTH_DES:
826 827 case RPCSEC_GSS:
827 828 break;
828 829 default:
829 830 error = EINVAL;
830 831 goto errout;
831 832 }
832 833 }
833 834 args->nfs_ext_u.nfs_extA.secdata = NULL;
834 835 break;
835 836
836 837 default:
837 838 error = EINVAL;
838 839 break;
839 840 }
840 841 } else if (flags & NFSMNT_SECURE) {
841 842 /*
842 843 * Keep this for backward compatibility to support
843 844 * NFSMNT_SECURE/NFSMNT_RPCTIMESYNC flags.
844 845 */
845 846 if (args->syncaddr == NULL || args->syncaddr->buf == NULL) {
846 847 error = EINVAL;
847 848 goto errout;
848 849 }
849 850
850 851 /*
851 852 * get time sync address.
852 853 */
853 854 if (args->syncaddr == NULL) {
854 855 error = EFAULT;
855 856 goto errout;
856 857 }
857 858
858 859 /*
859 860 * Move security related data to the sec_data structure.
860 861 */
861 862 {
862 863 dh_k4_clntdata_t *data;
863 864 char *pf, *p;
864 865
865 866 secdata = kmem_alloc(sizeof (*secdata), KM_SLEEP);
866 867 if (flags & NFSMNT_RPCTIMESYNC)
867 868 secdata->flags |= AUTH_F_RPCTIMESYNC;
868 869 data = kmem_alloc(sizeof (*data), KM_SLEEP);
869 870 bcopy(args->syncaddr, &data->syncaddr,
870 871 sizeof (*args->syncaddr));
871 872
872 873
873 874 /*
874 875 * duplicate the knconf information for the
875 876 * new opaque data.
876 877 */
877 878 data->knconf = kmem_alloc(sizeof (*knconf), KM_SLEEP);
878 879 *data->knconf = *knconf;
879 880 pf = kmem_alloc(KNC_STRSIZE, KM_SLEEP);
880 881 p = kmem_alloc(KNC_STRSIZE, KM_SLEEP);
881 882 bcopy(knconf->knc_protofmly, pf, KNC_STRSIZE);
882 883 bcopy(knconf->knc_proto, pf, KNC_STRSIZE);
883 884 data->knconf->knc_protofmly = pf;
884 885 data->knconf->knc_proto = p;
885 886
886 887 /* move server netname to the sec_data structure */
887 888 nlen = strlen(args->hostname) + 1;
888 889 if (nlen != 0) {
889 890 data->netname = kmem_alloc(nlen, KM_SLEEP);
890 891 bcopy(args->hostname, data->netname, nlen);
891 892 data->netnamelen = (int)nlen;
892 893 }
893 894 secdata->secmod = secdata->rpcflavor = AUTH_DES;
894 895 secdata->data = (caddr_t)data;
895 896 }
896 897 } else {
897 898 secdata = kmem_alloc(sizeof (*secdata), KM_SLEEP);
898 899 secdata->secmod = secdata->rpcflavor = AUTH_UNIX;
899 900 secdata->data = NULL;
900 901 }
901 902 svp->sv_secdata = secdata;
902 903
903 904 /*
904 905 * See bug 1180236.
905 906 * If mount secure failed, we will fall back to AUTH_NONE
906 907 * and try again. nfs3rootvp() will turn this back off.
907 908 *
908 909 * The NFS Version 2 mount uses GETATTR and STATFS procedures.
909 910 * The server does not care if these procedures have the proper
910 911 * authentication flavor, so if mount retries using AUTH_NONE
911 912 * that does not require a credential setup for root then the
912 913 * automounter would work without requiring root to be
913 914 * keylogged into AUTH_DES.
914 915 */
915 916 if (secdata->rpcflavor != AUTH_UNIX &&
916 917 secdata->rpcflavor != AUTH_LOOPBACK)
917 918 secdata->flags |= AUTH_F_TRYNONE;
918 919
919 920 /*
920 921 * Failover support:
921 922 *
922 923 * We may have a linked list of nfs_args structures,
923 924 * which means the user is looking for failover. If
924 925 * the mount is either not "read-only" or "soft",
925 926 * we want to bail out with EINVAL.
926 927 */
927 928 if (args->nfs_args_ext == NFS_ARGS_EXTB &&
928 929 args->nfs_ext_u.nfs_extB.next != NULL) {
929 930 if (uap->flags & MS_RDONLY && !(flags & NFSMNT_SOFT)) {
930 931 data = (char *)args->nfs_ext_u.nfs_extB.next;
931 932 goto more;
932 933 }
933 934 error = EINVAL;
934 935 goto errout;
935 936 }
936 937
937 938 /*
938 939 * Determine the zone we're being mounted into.
939 940 */
940 941 zone_hold(mntzone = zone); /* start with this assumption */
941 942 if (getzoneid() == GLOBAL_ZONEID) {
942 943 zone_rele(mntzone);
943 944 mntzone = zone_find_by_path(refstr_value(vfsp->vfs_mntpt));
944 945 ASSERT(mntzone != NULL);
945 946 if (mntzone != zone) {
946 947 error = EBUSY;
947 948 goto errout;
948 949 }
949 950 }
950 951
951 952 if (is_system_labeled()) {
952 953 error = nfs_mount_label_policy(vfsp, &svp->sv_addr,
953 954 svp->sv_knconf, cr);
954 955
955 956 if (error > 0)
956 957 goto errout;
957 958
958 959 if (error == -1) {
959 960 /* change mount to read-only to prevent write-down */
960 961 vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0);
961 962 }
962 963 }
963 964
964 965 /*
965 966 * Stop the mount from going any further if the zone is going away.
966 967 */
967 968 if (zone_status_get(mntzone) >= ZONE_IS_SHUTTING_DOWN) {
968 969 error = EBUSY;
969 970 goto errout;
970 971 }
971 972
972 973 /*
973 974 * Get root vnode.
974 975 */
975 976 proceed:
976 977 error = nfsrootvp(&rtvp, vfsp, svp_head, flags, cr, mntzone);
977 978
978 979 if (error)
979 980 goto errout;
980 981
981 982 /*
982 983 * Set option fields in the mount info record
983 984 */
984 985 mi = VTOMI(rtvp);
985 986
986 987 if (svp_head->sv_next)
987 988 mi->mi_flags |= MI_LLOCK;
988 989
989 990 error = nfs_setopts(rtvp, DATAMODEL_NATIVE, args);
990 991 if (!error) {
991 992 /* static pathconf kludge */
992 993 error = pathconf_get(mi, args);
993 994 }
994 995
995 996 errout:
996 997 if (rtvp != NULL) {
997 998 if (error) {
998 999 rp = VTOR(rtvp);
999 1000 if (rp->r_flags & RHASHED)
1000 1001 rp_rmhash(rp);
1001 1002 }
1002 1003 VN_RELE(rtvp);
1003 1004 }
1004 1005
1005 1006 if (error) {
1006 1007 sv_free(svp_head);
1007 1008 if (mi != NULL) {
1008 1009 nfs_async_stop(vfsp);
1009 1010 nfs_async_manager_stop(vfsp);
1010 1011 if (mi->mi_io_kstats) {
1011 1012 kstat_delete(mi->mi_io_kstats);
1012 1013 mi->mi_io_kstats = NULL;
1013 1014 }
1014 1015 if (mi->mi_ro_kstats) {
1015 1016 kstat_delete(mi->mi_ro_kstats);
1016 1017 mi->mi_ro_kstats = NULL;
1017 1018 }
1018 1019 nfs_free_mi(mi);
1019 1020 }
1020 1021 }
1021 1022
1022 1023 if (!(uap->flags & MS_SYSSPACE)) {
1023 1024 nfs_free_args(args, fhandle);
1024 1025 kmem_free(args, sizeof (*args));
1025 1026 }
1026 1027
1027 1028 if (mntzone != NULL)
1028 1029 zone_rele(mntzone);
1029 1030
1030 1031 return (error);
1031 1032 }
1032 1033
1033 1034 /*
1034 1035 * The pathconf information is kept on a linked list of kmem_alloc'ed
1035 1036 * structs. We search the list & add a new struct iff there is no other
1036 1037 * struct with the same information.
1037 1038 * See sys/pathconf.h for ``the rest of the story.''
1038 1039 */
1039 1040 static struct pathcnf *allpc = NULL;
1040 1041
1041 1042 static int
1042 1043 pathconf_copyin(struct nfs_args *args, struct pathcnf *pc)
1043 1044 {
1044 1045 STRUCT_DECL(pathcnf, pc_tmp);
1045 1046 STRUCT_HANDLE(nfs_args, ap);
1046 1047 int i;
1047 1048 model_t model;
1048 1049
1049 1050 model = get_udatamodel();
1050 1051 STRUCT_INIT(pc_tmp, model);
1051 1052 STRUCT_SET_HANDLE(ap, model, args);
1052 1053
1053 1054 if ((STRUCT_FGET(ap, flags) & NFSMNT_POSIX) &&
1054 1055 STRUCT_FGETP(ap, pathconf) != NULL) {
1055 1056 if (copyin(STRUCT_FGETP(ap, pathconf), STRUCT_BUF(pc_tmp),
1056 1057 STRUCT_SIZE(pc_tmp)))
1057 1058 return (EFAULT);
1058 1059 if (_PC_ISSET(_PC_ERROR, STRUCT_FGET(pc_tmp, pc_mask)))
1059 1060 return (EINVAL);
1060 1061
1061 1062 pc->pc_link_max = STRUCT_FGET(pc_tmp, pc_link_max);
1062 1063 pc->pc_max_canon = STRUCT_FGET(pc_tmp, pc_max_canon);
1063 1064 pc->pc_max_input = STRUCT_FGET(pc_tmp, pc_max_input);
1064 1065 pc->pc_name_max = STRUCT_FGET(pc_tmp, pc_name_max);
1065 1066 pc->pc_path_max = STRUCT_FGET(pc_tmp, pc_path_max);
1066 1067 pc->pc_pipe_buf = STRUCT_FGET(pc_tmp, pc_pipe_buf);
1067 1068 pc->pc_vdisable = STRUCT_FGET(pc_tmp, pc_vdisable);
1068 1069 pc->pc_xxx = STRUCT_FGET(pc_tmp, pc_xxx);
1069 1070 for (i = 0; i < _PC_N; i++)
1070 1071 pc->pc_mask[i] = STRUCT_FGET(pc_tmp, pc_mask[i]);
1071 1072 }
1072 1073 return (0);
1073 1074 }
1074 1075
1075 1076 static int
1076 1077 pathconf_get(struct mntinfo *mi, struct nfs_args *args)
1077 1078 {
1078 1079 struct pathcnf *p, *pc;
1079 1080
1080 1081 pc = args->pathconf;
1081 1082 if (mi->mi_pathconf != NULL) {
1082 1083 pathconf_rele(mi);
1083 1084 mi->mi_pathconf = NULL;
1084 1085 }
1085 1086
1086 1087 if (args->flags & NFSMNT_POSIX && args->pathconf != NULL) {
1087 1088 if (_PC_ISSET(_PC_ERROR, pc->pc_mask))
1088 1089 return (EINVAL);
1089 1090
1090 1091 for (p = allpc; p != NULL; p = p->pc_next) {
1091 1092 if (PCCMP(p, pc) == 0)
1092 1093 break;
1093 1094 }
1094 1095 if (p != NULL) {
1095 1096 mi->mi_pathconf = p;
1096 1097 p->pc_refcnt++;
1097 1098 } else {
1098 1099 p = kmem_alloc(sizeof (*p), KM_SLEEP);
1099 1100 bcopy(pc, p, sizeof (struct pathcnf));
1100 1101 p->pc_next = allpc;
1101 1102 p->pc_refcnt = 1;
1102 1103 allpc = mi->mi_pathconf = p;
1103 1104 }
1104 1105 }
1105 1106 return (0);
1106 1107 }
1107 1108
1108 1109 /*
1109 1110 * release the static pathconf information
1110 1111 */
1111 1112 static void
1112 1113 pathconf_rele(struct mntinfo *mi)
1113 1114 {
1114 1115 if (mi->mi_pathconf != NULL) {
1115 1116 if (--mi->mi_pathconf->pc_refcnt == 0) {
1116 1117 struct pathcnf *p;
1117 1118 struct pathcnf *p2;
1118 1119
1119 1120 p2 = p = allpc;
1120 1121 while (p != NULL && p != mi->mi_pathconf) {
1121 1122 p2 = p;
1122 1123 p = p->pc_next;
1123 1124 }
1124 1125 if (p == NULL) {
1125 1126 panic("mi->pathconf");
1126 1127 /*NOTREACHED*/
1127 1128 }
|
↓ open down ↓ |
1095 lines elided |
↑ open up ↑ |
1128 1129 if (p == allpc)
1129 1130 allpc = p->pc_next;
1130 1131 else
1131 1132 p2->pc_next = p->pc_next;
1132 1133 kmem_free(p, sizeof (*p));
1133 1134 mi->mi_pathconf = NULL;
1134 1135 }
1135 1136 }
1136 1137 }
1137 1138
1138 -static int nfs_dynamic = 1; /* global variable to enable dynamic retrans. */
1139 -static ushort_t nfs_max_threads = 8; /* max number of active async threads */
1140 -static uint_t nfs_async_clusters = 1; /* # of reqs from each async queue */
1141 -static uint_t nfs_cots_timeo = NFS_COTS_TIMEO;
1139 +volatile int nfs_dynamic = 1; /* global variable to enable dynamic retrans. */
1140 +volatile ushort_t nfs_max_threads = 8; /* max number of active async threads */
1141 +volatile uint_t nfs_async_clusters = 1; /* # of reqs from each async queue */
1142 +volatile uint_t nfs_cots_timeo = NFS_COTS_TIMEO;
1142 1143
1143 1144 static int
1144 1145 nfsrootvp(vnode_t **rtvpp, vfs_t *vfsp, struct servinfo *svp,
1145 1146 int flags, cred_t *cr, zone_t *zone)
1146 1147 {
1147 1148 vnode_t *rtvp;
1148 1149 mntinfo_t *mi;
1149 1150 dev_t nfs_dev;
1150 1151 struct vattr va;
1151 1152 int error;
1152 1153 rnode_t *rp;
1153 1154 int i;
1154 1155 struct nfs_stats *nfsstatsp;
1155 1156 cred_t *lcr = NULL, *tcr = cr;
1156 1157
1157 1158 nfsstatsp = zone_getspecific(nfsstat_zone_key, nfs_zone());
1158 1159 ASSERT(nfsstatsp != NULL);
1159 1160
1160 1161 /*
1161 1162 * Create a mount record and link it to the vfs struct.
1162 1163 */
1163 1164 mi = kmem_zalloc(sizeof (*mi), KM_SLEEP);
1164 1165 mutex_init(&mi->mi_lock, NULL, MUTEX_DEFAULT, NULL);
1165 1166 mutex_init(&mi->mi_remap_lock, NULL, MUTEX_DEFAULT, NULL);
1166 1167 mi->mi_flags = MI_ACL | MI_EXTATTR;
1167 1168 if (!(flags & NFSMNT_SOFT))
1168 1169 mi->mi_flags |= MI_HARD;
1169 1170 if ((flags & NFSMNT_SEMISOFT))
1170 1171 mi->mi_flags |= MI_SEMISOFT;
1171 1172 if ((flags & NFSMNT_NOPRINT))
1172 1173 mi->mi_flags |= MI_NOPRINT;
1173 1174 if (flags & NFSMNT_INT)
1174 1175 mi->mi_flags |= MI_INT;
1175 1176 mi->mi_retrans = NFS_RETRIES;
1176 1177 if (svp->sv_knconf->knc_semantics == NC_TPI_COTS_ORD ||
1177 1178 svp->sv_knconf->knc_semantics == NC_TPI_COTS)
1178 1179 mi->mi_timeo = nfs_cots_timeo;
1179 1180 else
1180 1181 mi->mi_timeo = NFS_TIMEO;
1181 1182 mi->mi_prog = NFS_PROGRAM;
1182 1183 mi->mi_vers = NFS_VERSION;
1183 1184 mi->mi_rfsnames = rfsnames_v2;
1184 1185 mi->mi_reqs = nfsstatsp->nfs_stats_v2.rfsreqcnt_ptr;
1185 1186 mi->mi_call_type = call_type_v2;
1186 1187 mi->mi_ss_call_type = ss_call_type_v2;
1187 1188 mi->mi_timer_type = timer_type_v2;
1188 1189 mi->mi_aclnames = aclnames_v2;
1189 1190 mi->mi_aclreqs = nfsstatsp->nfs_stats_v2.aclreqcnt_ptr;
1190 1191 mi->mi_acl_call_type = acl_call_type_v2;
1191 1192 mi->mi_acl_ss_call_type = acl_ss_call_type_v2;
1192 1193 mi->mi_acl_timer_type = acl_timer_type_v2;
1193 1194 cv_init(&mi->mi_failover_cv, NULL, CV_DEFAULT, NULL);
1194 1195 mi->mi_servers = svp;
1195 1196 mi->mi_curr_serv = svp;
1196 1197 mi->mi_acregmin = SEC2HR(ACREGMIN);
1197 1198 mi->mi_acregmax = SEC2HR(ACREGMAX);
1198 1199 mi->mi_acdirmin = SEC2HR(ACDIRMIN);
1199 1200 mi->mi_acdirmax = SEC2HR(ACDIRMAX);
1200 1201
1201 1202 if (nfs_dynamic)
1202 1203 mi->mi_flags |= MI_DYNAMIC;
1203 1204
1204 1205 if (flags & NFSMNT_DIRECTIO)
1205 1206 mi->mi_flags |= MI_DIRECTIO;
1206 1207
1207 1208 /*
1208 1209 * Make a vfs struct for nfs. We do this here instead of below
1209 1210 * because rtvp needs a vfs before we can do a getattr on it.
1210 1211 *
1211 1212 * Assign a unique device id to the mount
1212 1213 */
1213 1214 mutex_enter(&nfs_minor_lock);
1214 1215 do {
1215 1216 nfs_minor = (nfs_minor + 1) & MAXMIN32;
1216 1217 nfs_dev = makedevice(nfs_major, nfs_minor);
1217 1218 } while (vfs_devismounted(nfs_dev));
1218 1219 mutex_exit(&nfs_minor_lock);
1219 1220
1220 1221 vfsp->vfs_dev = nfs_dev;
1221 1222 vfs_make_fsid(&vfsp->vfs_fsid, nfs_dev, nfsfstyp);
1222 1223 vfsp->vfs_data = (caddr_t)mi;
1223 1224 vfsp->vfs_fstype = nfsfstyp;
1224 1225 vfsp->vfs_bsize = NFS_MAXDATA;
1225 1226
1226 1227 /*
1227 1228 * Initialize fields used to support async putpage operations.
1228 1229 */
1229 1230 for (i = 0; i < NFS_ASYNC_TYPES; i++)
1230 1231 mi->mi_async_clusters[i] = nfs_async_clusters;
1231 1232 mi->mi_async_init_clusters = nfs_async_clusters;
1232 1233 mi->mi_async_curr[NFS_ASYNC_QUEUE] =
1233 1234 mi->mi_async_curr[NFS_ASYNC_PGOPS_QUEUE] = &mi->mi_async_reqs[0];
1234 1235 mi->mi_max_threads = nfs_max_threads;
1235 1236 mutex_init(&mi->mi_async_lock, NULL, MUTEX_DEFAULT, NULL);
1236 1237 cv_init(&mi->mi_async_reqs_cv, NULL, CV_DEFAULT, NULL);
1237 1238 cv_init(&mi->mi_async_work_cv[NFS_ASYNC_QUEUE], NULL, CV_DEFAULT, NULL);
1238 1239 cv_init(&mi->mi_async_work_cv[NFS_ASYNC_PGOPS_QUEUE], NULL,
1239 1240 CV_DEFAULT, NULL);
1240 1241 cv_init(&mi->mi_async_cv, NULL, CV_DEFAULT, NULL);
1241 1242
1242 1243 mi->mi_vfsp = vfsp;
1243 1244 mi->mi_zone = zone;
1244 1245 zone_init_ref(&mi->mi_zone_ref);
1245 1246 zone_hold_ref(zone, &mi->mi_zone_ref, ZONE_REF_NFS);
1246 1247 nfs_mi_zonelist_add(mi);
1247 1248
1248 1249 /*
1249 1250 * Make the root vnode, use it to get attributes,
1250 1251 * then remake it with the attributes.
1251 1252 */
1252 1253 rtvp = makenfsnode((fhandle_t *)svp->sv_fhandle.fh_buf,
1253 1254 NULL, vfsp, gethrtime(), cr, NULL, NULL);
1254 1255
1255 1256 va.va_mask = AT_ALL;
1256 1257
1257 1258 /*
1258 1259 * If the uid is set then set the creds for secure mounts
1259 1260 * by proxy processes such as automountd.
1260 1261 */
1261 1262 if (svp->sv_secdata->uid != 0 &&
1262 1263 svp->sv_secdata->rpcflavor == RPCSEC_GSS) {
1263 1264 lcr = crdup(cr);
1264 1265 (void) crsetugid(lcr, svp->sv_secdata->uid, crgetgid(cr));
1265 1266 tcr = lcr;
1266 1267 }
1267 1268
1268 1269 error = nfsgetattr(rtvp, &va, tcr);
1269 1270 if (error)
1270 1271 goto bad;
1271 1272 rtvp->v_type = va.va_type;
1272 1273
1273 1274 /*
1274 1275 * Poll every server to get the filesystem stats; we're
1275 1276 * only interested in the server's transfer size, and we
1276 1277 * want the minimum.
1277 1278 *
1278 1279 * While we're looping, we'll turn off AUTH_F_TRYNONE,
1279 1280 * which is only for the mount operation.
1280 1281 */
1281 1282
1282 1283 mi->mi_tsize = MIN(NFS_MAXDATA, nfstsize());
1283 1284 mi->mi_stsize = MIN(NFS_MAXDATA, nfstsize());
1284 1285
1285 1286 for (svp = mi->mi_servers; svp != NULL; svp = svp->sv_next) {
1286 1287 struct nfsstatfs fs;
1287 1288 int douprintf;
1288 1289
1289 1290 douprintf = 1;
1290 1291 mi->mi_curr_serv = svp;
1291 1292
1292 1293 error = rfs2call(mi, RFS_STATFS, xdr_fhandle,
1293 1294 (caddr_t)svp->sv_fhandle.fh_buf, xdr_statfs, (caddr_t)&fs,
1294 1295 tcr, &douprintf, &fs.fs_status, 0, NULL);
1295 1296 if (error)
1296 1297 goto bad;
1297 1298 mi->mi_stsize = MIN(mi->mi_stsize, fs.fs_tsize);
1298 1299 svp->sv_secdata->flags &= ~AUTH_F_TRYNONE;
1299 1300 }
1300 1301 mi->mi_curr_serv = mi->mi_servers;
1301 1302 mi->mi_curread = mi->mi_tsize;
1302 1303 mi->mi_curwrite = mi->mi_stsize;
1303 1304
1304 1305 /*
1305 1306 * Start the manager thread responsible for handling async worker
1306 1307 * threads.
1307 1308 */
1308 1309 VFS_HOLD(vfsp); /* add reference for thread */
1309 1310 mi->mi_manager_thread = zthread_create(NULL, 0, nfs_async_manager,
1310 1311 vfsp, 0, minclsyspri);
1311 1312 ASSERT(mi->mi_manager_thread != NULL);
1312 1313
1313 1314 /*
1314 1315 * Initialize kstats
1315 1316 */
1316 1317 nfs_mnt_kstat_init(vfsp);
1317 1318
1318 1319 mi->mi_type = rtvp->v_type;
1319 1320
1320 1321 *rtvpp = rtvp;
1321 1322 if (lcr != NULL)
1322 1323 crfree(lcr);
1323 1324
1324 1325 return (0);
1325 1326 bad:
1326 1327 /*
1327 1328 * An error occurred somewhere, need to clean up...
1328 1329 * We need to release our reference to the root vnode and
1329 1330 * destroy the mntinfo struct that we just created.
1330 1331 */
1331 1332 if (lcr != NULL)
1332 1333 crfree(lcr);
1333 1334 rp = VTOR(rtvp);
1334 1335 if (rp->r_flags & RHASHED)
1335 1336 rp_rmhash(rp);
1336 1337 VN_RELE(rtvp);
1337 1338 nfs_async_stop(vfsp);
1338 1339 nfs_async_manager_stop(vfsp);
1339 1340 if (mi->mi_io_kstats) {
1340 1341 kstat_delete(mi->mi_io_kstats);
1341 1342 mi->mi_io_kstats = NULL;
1342 1343 }
1343 1344 if (mi->mi_ro_kstats) {
1344 1345 kstat_delete(mi->mi_ro_kstats);
1345 1346 mi->mi_ro_kstats = NULL;
1346 1347 }
1347 1348 nfs_free_mi(mi);
1348 1349 *rtvpp = NULL;
1349 1350 return (error);
1350 1351 }
1351 1352
1352 1353 /*
1353 1354 * vfs operations
1354 1355 */
1355 1356 static int
1356 1357 nfs_unmount(vfs_t *vfsp, int flag, cred_t *cr)
1357 1358 {
1358 1359 mntinfo_t *mi;
1359 1360 ushort_t omax;
1360 1361
1361 1362 if (secpolicy_fs_unmount(cr, vfsp) != 0)
1362 1363 return (EPERM);
1363 1364
1364 1365 mi = VFTOMI(vfsp);
1365 1366 if (flag & MS_FORCE) {
1366 1367
1367 1368 vfsp->vfs_flag |= VFS_UNMOUNTED;
1368 1369
1369 1370 /*
1370 1371 * We are about to stop the async manager.
1371 1372 * Let every one know not to schedule any
1372 1373 * more async requests.
1373 1374 */
1374 1375 mutex_enter(&mi->mi_async_lock);
1375 1376 mi->mi_max_threads = 0;
1376 1377 NFS_WAKEALL_ASYNC_WORKERS(mi->mi_async_work_cv);
1377 1378 mutex_exit(&mi->mi_async_lock);
1378 1379
1379 1380 /*
1380 1381 * We need to stop the manager thread explicitly; the worker
1381 1382 * threads can time out and exit on their own.
1382 1383 */
1383 1384 nfs_async_manager_stop(vfsp);
1384 1385 destroy_rtable(vfsp, cr);
1385 1386 if (mi->mi_io_kstats) {
1386 1387 kstat_delete(mi->mi_io_kstats);
1387 1388 mi->mi_io_kstats = NULL;
1388 1389 }
1389 1390 if (mi->mi_ro_kstats) {
1390 1391 kstat_delete(mi->mi_ro_kstats);
1391 1392 mi->mi_ro_kstats = NULL;
1392 1393 }
1393 1394 return (0);
1394 1395 }
1395 1396 /*
1396 1397 * Wait until all asynchronous putpage operations on
1397 1398 * this file system are complete before flushing rnodes
1398 1399 * from the cache.
1399 1400 */
1400 1401 omax = mi->mi_max_threads;
1401 1402 if (nfs_async_stop_sig(vfsp)) {
1402 1403 return (EINTR);
1403 1404 }
1404 1405 rflush(vfsp, cr);
1405 1406 /*
1406 1407 * If there are any active vnodes on this file system,
1407 1408 * then the file system is busy and can't be umounted.
1408 1409 */
1409 1410 if (check_rtable(vfsp)) {
1410 1411 mutex_enter(&mi->mi_async_lock);
1411 1412 mi->mi_max_threads = omax;
1412 1413 mutex_exit(&mi->mi_async_lock);
1413 1414 return (EBUSY);
1414 1415 }
1415 1416 /*
1416 1417 * The unmount can't fail from now on; stop the manager thread.
1417 1418 */
1418 1419 nfs_async_manager_stop(vfsp);
1419 1420 /*
1420 1421 * Destroy all rnodes belonging to this file system from the
1421 1422 * rnode hash queues and purge any resources allocated to
1422 1423 * them.
1423 1424 */
1424 1425 destroy_rtable(vfsp, cr);
1425 1426 if (mi->mi_io_kstats) {
1426 1427 kstat_delete(mi->mi_io_kstats);
1427 1428 mi->mi_io_kstats = NULL;
1428 1429 }
1429 1430 if (mi->mi_ro_kstats) {
1430 1431 kstat_delete(mi->mi_ro_kstats);
1431 1432 mi->mi_ro_kstats = NULL;
1432 1433 }
1433 1434 return (0);
1434 1435 }
1435 1436
1436 1437 /*
1437 1438 * find root of nfs
1438 1439 */
1439 1440 static int
1440 1441 nfs_root(vfs_t *vfsp, vnode_t **vpp)
1441 1442 {
1442 1443 mntinfo_t *mi;
1443 1444 vnode_t *vp;
1444 1445 servinfo_t *svp;
1445 1446 rnode_t *rp;
1446 1447 int error = 0;
1447 1448
1448 1449 mi = VFTOMI(vfsp);
1449 1450
1450 1451 if (nfs_zone() != mi->mi_zone)
1451 1452 return (EPERM);
1452 1453
1453 1454 svp = mi->mi_curr_serv;
1454 1455 if (svp && (svp->sv_flags & SV_ROOT_STALE)) {
1455 1456 mutex_enter(&svp->sv_lock);
1456 1457 svp->sv_flags &= ~SV_ROOT_STALE;
1457 1458 mutex_exit(&svp->sv_lock);
1458 1459 error = ENOENT;
1459 1460 }
1460 1461
1461 1462 vp = makenfsnode((fhandle_t *)mi->mi_curr_serv->sv_fhandle.fh_buf,
1462 1463 NULL, vfsp, gethrtime(), CRED(), NULL, NULL);
1463 1464
1464 1465 /*
1465 1466 * if the SV_ROOT_STALE flag was reset above, reset the
1466 1467 * RSTALE flag if needed and return an error
1467 1468 */
1468 1469 if (error == ENOENT) {
1469 1470 rp = VTOR(vp);
1470 1471 if (svp && rp->r_flags & RSTALE) {
1471 1472 mutex_enter(&rp->r_statelock);
1472 1473 rp->r_flags &= ~RSTALE;
1473 1474 mutex_exit(&rp->r_statelock);
1474 1475 }
1475 1476 VN_RELE(vp);
1476 1477 return (error);
1477 1478 }
1478 1479
1479 1480 ASSERT(vp->v_type == VNON || vp->v_type == mi->mi_type);
1480 1481
1481 1482 vp->v_type = mi->mi_type;
1482 1483
1483 1484 *vpp = vp;
1484 1485
1485 1486 return (0);
1486 1487 }
1487 1488
1488 1489 /*
1489 1490 * Get file system statistics.
1490 1491 */
1491 1492 static int
1492 1493 nfs_statvfs(vfs_t *vfsp, struct statvfs64 *sbp)
1493 1494 {
1494 1495 int error;
1495 1496 mntinfo_t *mi;
1496 1497 struct nfsstatfs fs;
1497 1498 int douprintf;
1498 1499 failinfo_t fi;
1499 1500 vnode_t *vp;
1500 1501
1501 1502 error = nfs_root(vfsp, &vp);
1502 1503 if (error)
1503 1504 return (error);
1504 1505
1505 1506 mi = VFTOMI(vfsp);
1506 1507 douprintf = 1;
1507 1508 fi.vp = vp;
1508 1509 fi.fhp = NULL; /* no need to update, filehandle not copied */
1509 1510 fi.copyproc = nfscopyfh;
1510 1511 fi.lookupproc = nfslookup;
1511 1512 fi.xattrdirproc = acl_getxattrdir2;
1512 1513
1513 1514 error = rfs2call(mi, RFS_STATFS, xdr_fhandle, (caddr_t)VTOFH(vp),
1514 1515 xdr_statfs, (caddr_t)&fs, CRED(), &douprintf, &fs.fs_status, 0,
1515 1516 &fi);
1516 1517
1517 1518 if (!error) {
1518 1519 error = geterrno(fs.fs_status);
1519 1520 if (!error) {
1520 1521 mutex_enter(&mi->mi_lock);
1521 1522 if (mi->mi_stsize) {
1522 1523 mi->mi_stsize = MIN(mi->mi_stsize, fs.fs_tsize);
1523 1524 } else {
1524 1525 mi->mi_stsize = fs.fs_tsize;
1525 1526 mi->mi_curwrite = mi->mi_stsize;
1526 1527 }
1527 1528 mutex_exit(&mi->mi_lock);
1528 1529 sbp->f_bsize = fs.fs_bsize;
1529 1530 sbp->f_frsize = fs.fs_bsize;
1530 1531 sbp->f_blocks = (fsblkcnt64_t)fs.fs_blocks;
1531 1532 sbp->f_bfree = (fsblkcnt64_t)fs.fs_bfree;
1532 1533 /*
1533 1534 * Some servers may return negative available
1534 1535 * block counts. They may do this because they
1535 1536 * calculate the number of available blocks by
1536 1537 * subtracting the number of used blocks from
1537 1538 * the total number of blocks modified by the
1538 1539 * minimum free value. For example, if the
1539 1540 * minumum free percentage is 10 and the file
1540 1541 * system is greater than 90 percent full, then
1541 1542 * 90 percent of the total blocks minus the
1542 1543 * actual number of used blocks may be a
1543 1544 * negative number.
1544 1545 *
1545 1546 * In this case, we need to sign extend the
1546 1547 * negative number through the assignment from
1547 1548 * the 32 bit bavail count to the 64 bit bavail
1548 1549 * count.
1549 1550 *
1550 1551 * We need to be able to discern between there
1551 1552 * just being a lot of available blocks on the
1552 1553 * file system and the case described above.
1553 1554 * We are making the assumption that it does
1554 1555 * not make sense to have more available blocks
1555 1556 * than there are free blocks. So, if there
1556 1557 * are, then we treat the number as if it were
1557 1558 * a negative number and arrange to have it
1558 1559 * sign extended when it is converted from 32
1559 1560 * bits to 64 bits.
1560 1561 */
1561 1562 if (fs.fs_bavail <= fs.fs_bfree)
1562 1563 sbp->f_bavail = (fsblkcnt64_t)fs.fs_bavail;
1563 1564 else {
1564 1565 sbp->f_bavail =
1565 1566 (fsblkcnt64_t)((long)fs.fs_bavail);
1566 1567 }
1567 1568 sbp->f_files = (fsfilcnt64_t)-1;
1568 1569 sbp->f_ffree = (fsfilcnt64_t)-1;
1569 1570 sbp->f_favail = (fsfilcnt64_t)-1;
1570 1571 sbp->f_fsid = (unsigned long)vfsp->vfs_fsid.val[0];
1571 1572 (void) strncpy(sbp->f_basetype,
1572 1573 vfssw[vfsp->vfs_fstype].vsw_name, FSTYPSZ);
1573 1574 sbp->f_flag = vf_to_stf(vfsp->vfs_flag);
1574 1575 sbp->f_namemax = (uint32_t)-1;
1575 1576 } else {
1576 1577 PURGE_STALE_FH(error, vp, CRED());
1577 1578 }
1578 1579 }
1579 1580
1580 1581 VN_RELE(vp);
1581 1582
1582 1583 return (error);
1583 1584 }
1584 1585
1585 1586 static kmutex_t nfs_syncbusy;
1586 1587
1587 1588 /*
1588 1589 * Flush dirty nfs files for file system vfsp.
1589 1590 * If vfsp == NULL, all nfs files are flushed.
1590 1591 */
1591 1592 /* ARGSUSED */
1592 1593 static int
1593 1594 nfs_sync(vfs_t *vfsp, short flag, cred_t *cr)
1594 1595 {
1595 1596 /*
1596 1597 * Cross-zone calls are OK here, since this translates to a
1597 1598 * VOP_PUTPAGE(B_ASYNC), which gets picked up by the right zone.
1598 1599 */
1599 1600 if (!(flag & SYNC_ATTR) && mutex_tryenter(&nfs_syncbusy) != 0) {
1600 1601 rflush(vfsp, cr);
1601 1602 mutex_exit(&nfs_syncbusy);
1602 1603 }
1603 1604 return (0);
1604 1605 }
1605 1606
1606 1607 /* ARGSUSED */
1607 1608 static int
1608 1609 nfs_vget(vfs_t *vfsp, vnode_t **vpp, fid_t *fidp)
1609 1610 {
1610 1611 int error;
1611 1612 vnode_t *vp;
1612 1613 struct vattr va;
1613 1614 struct nfs_fid *nfsfidp = (struct nfs_fid *)fidp;
1614 1615 zoneid_t zoneid = VFTOMI(vfsp)->mi_zone->zone_id;
1615 1616
1616 1617 if (nfs_zone() != VFTOMI(vfsp)->mi_zone)
1617 1618 return (EPERM);
1618 1619 if (fidp->fid_len != (sizeof (*nfsfidp) - sizeof (short))) {
1619 1620 #ifdef DEBUG
1620 1621 zcmn_err(zoneid, CE_WARN,
1621 1622 "nfs_vget: bad fid len, %d/%d", fidp->fid_len,
1622 1623 (int)(sizeof (*nfsfidp) - sizeof (short)));
1623 1624 #endif
1624 1625 *vpp = NULL;
1625 1626 return (ESTALE);
1626 1627 }
1627 1628
1628 1629 vp = makenfsnode((fhandle_t *)(nfsfidp->nf_data), NULL, vfsp,
1629 1630 gethrtime(), CRED(), NULL, NULL);
1630 1631
1631 1632 if (VTOR(vp)->r_flags & RSTALE) {
1632 1633 VN_RELE(vp);
1633 1634 *vpp = NULL;
1634 1635 return (ENOENT);
1635 1636 }
1636 1637
1637 1638 if (vp->v_type == VNON) {
1638 1639 va.va_mask = AT_ALL;
1639 1640 error = nfsgetattr(vp, &va, CRED());
1640 1641 if (error) {
1641 1642 VN_RELE(vp);
1642 1643 *vpp = NULL;
1643 1644 return (error);
1644 1645 }
1645 1646 vp->v_type = va.va_type;
1646 1647 }
1647 1648
1648 1649 *vpp = vp;
1649 1650
1650 1651 return (0);
1651 1652 }
1652 1653
1653 1654 /* ARGSUSED */
1654 1655 static int
1655 1656 nfs_mountroot(vfs_t *vfsp, whymountroot_t why)
1656 1657 {
1657 1658 vnode_t *rtvp;
1658 1659 char root_hostname[SYS_NMLN+1];
1659 1660 struct servinfo *svp;
1660 1661 int error;
1661 1662 int vfsflags;
1662 1663 size_t size;
1663 1664 char *root_path;
1664 1665 struct pathname pn;
1665 1666 char *name;
1666 1667 cred_t *cr;
1667 1668 struct nfs_args args; /* nfs mount arguments */
1668 1669 static char token[10];
1669 1670
1670 1671 bzero(&args, sizeof (args));
1671 1672
1672 1673 /* do this BEFORE getfile which causes xid stamps to be initialized */
1673 1674 clkset(-1L); /* hack for now - until we get time svc? */
1674 1675
1675 1676 if (why == ROOT_REMOUNT) {
1676 1677 /*
1677 1678 * Shouldn't happen.
1678 1679 */
1679 1680 panic("nfs_mountroot: why == ROOT_REMOUNT");
1680 1681 }
1681 1682
1682 1683 if (why == ROOT_UNMOUNT) {
1683 1684 /*
1684 1685 * Nothing to do for NFS.
1685 1686 */
1686 1687 return (0);
1687 1688 }
1688 1689
1689 1690 /*
1690 1691 * why == ROOT_INIT
1691 1692 */
1692 1693
1693 1694 name = token;
1694 1695 *name = 0;
1695 1696 getfsname("root", name, sizeof (token));
1696 1697
1697 1698 pn_alloc(&pn);
1698 1699 root_path = pn.pn_path;
1699 1700
1700 1701 svp = kmem_zalloc(sizeof (*svp), KM_SLEEP);
1701 1702 svp->sv_knconf = kmem_zalloc(sizeof (*svp->sv_knconf), KM_SLEEP);
1702 1703 svp->sv_knconf->knc_protofmly = kmem_alloc(KNC_STRSIZE, KM_SLEEP);
1703 1704 svp->sv_knconf->knc_proto = kmem_alloc(KNC_STRSIZE, KM_SLEEP);
1704 1705
1705 1706 /*
1706 1707 * Get server address
1707 1708 * Get the root fhandle
1708 1709 * Get server's transport
1709 1710 * Get server's hostname
1710 1711 * Get options
1711 1712 */
1712 1713 args.addr = &svp->sv_addr;
1713 1714 args.fh = (char *)&svp->sv_fhandle.fh_buf;
1714 1715 args.knconf = svp->sv_knconf;
1715 1716 args.hostname = root_hostname;
1716 1717 vfsflags = 0;
1717 1718 if (error = mount_root(*name ? name : "root", root_path, NFS_VERSION,
1718 1719 &args, &vfsflags)) {
1719 1720 nfs_cmn_err(error, CE_WARN,
1720 1721 "nfs_mountroot: mount_root failed: %m");
1721 1722 sv_free(svp);
1722 1723 pn_free(&pn);
1723 1724 return (error);
1724 1725 }
1725 1726 svp->sv_fhandle.fh_len = NFS_FHSIZE;
1726 1727 svp->sv_hostnamelen = (int)(strlen(root_hostname) + 1);
1727 1728 svp->sv_hostname = kmem_alloc(svp->sv_hostnamelen, KM_SLEEP);
1728 1729 (void) strcpy(svp->sv_hostname, root_hostname);
1729 1730
1730 1731 /*
1731 1732 * Force root partition to always be mounted with AUTH_UNIX for now
1732 1733 */
1733 1734 svp->sv_secdata = kmem_alloc(sizeof (*svp->sv_secdata), KM_SLEEP);
1734 1735 svp->sv_secdata->secmod = AUTH_UNIX;
1735 1736 svp->sv_secdata->rpcflavor = AUTH_UNIX;
1736 1737 svp->sv_secdata->data = NULL;
1737 1738
1738 1739 cr = crgetcred();
1739 1740 rtvp = NULL;
1740 1741
1741 1742 error = nfsrootvp(&rtvp, vfsp, svp, args.flags, cr, global_zone);
1742 1743
1743 1744 crfree(cr);
1744 1745
1745 1746 if (error) {
1746 1747 pn_free(&pn);
1747 1748 sv_free(svp);
1748 1749 return (error);
1749 1750 }
1750 1751
1751 1752 error = nfs_setopts(rtvp, DATAMODEL_NATIVE, &args);
1752 1753 if (error) {
1753 1754 nfs_cmn_err(error, CE_WARN,
1754 1755 "nfs_mountroot: invalid root mount options");
1755 1756 pn_free(&pn);
1756 1757 goto errout;
1757 1758 }
1758 1759
1759 1760 (void) vfs_lock_wait(vfsp);
1760 1761 vfs_add(NULL, vfsp, vfsflags);
1761 1762 vfs_unlock(vfsp);
1762 1763
1763 1764 size = strlen(svp->sv_hostname);
1764 1765 (void) strcpy(rootfs.bo_name, svp->sv_hostname);
1765 1766 rootfs.bo_name[size] = ':';
1766 1767 (void) strcpy(&rootfs.bo_name[size + 1], root_path);
1767 1768
1768 1769 pn_free(&pn);
1769 1770
1770 1771 errout:
1771 1772 if (error) {
1772 1773 sv_free(svp);
1773 1774 nfs_async_stop(vfsp);
1774 1775 nfs_async_manager_stop(vfsp);
1775 1776 }
1776 1777
1777 1778 if (rtvp != NULL)
1778 1779 VN_RELE(rtvp);
1779 1780
1780 1781 return (error);
1781 1782 }
1782 1783
1783 1784 /*
1784 1785 * Initialization routine for VFS routines. Should only be called once
1785 1786 */
1786 1787 int
1787 1788 nfs_vfsinit(void)
1788 1789 {
1789 1790 mutex_init(&nfs_syncbusy, NULL, MUTEX_DEFAULT, NULL);
1790 1791 return (0);
1791 1792 }
1792 1793
1793 1794 void
1794 1795 nfs_vfsfini(void)
1795 1796 {
1796 1797 mutex_destroy(&nfs_syncbusy);
1797 1798 }
1798 1799
1799 1800 void
1800 1801 nfs_freevfs(vfs_t *vfsp)
1801 1802 {
1802 1803 mntinfo_t *mi;
1803 1804 servinfo_t *svp;
1804 1805
1805 1806 /* free up the resources */
1806 1807 mi = VFTOMI(vfsp);
1807 1808 pathconf_rele(mi);
1808 1809 svp = mi->mi_servers;
1809 1810 mi->mi_servers = mi->mi_curr_serv = NULL;
1810 1811 sv_free(svp);
1811 1812
1812 1813 /*
1813 1814 * By this time we should have already deleted the
1814 1815 * mi kstats in the unmount code. If they are still around
1815 1816 * somethings wrong
1816 1817 */
1817 1818 ASSERT(mi->mi_io_kstats == NULL);
1818 1819 nfs_free_mi(mi);
1819 1820 }
|
↓ open down ↓ |
668 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX