1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
22 /* All Rights Reserved */
23
24
25 /*
26 * Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved.
27 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
28 * Copyright 2017 Joyent, Inc.
29 */
30
31 /*
32 * Generic vnode operations.
33 */
34 #include <sys/types.h>
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/errno.h>
38 #include <sys/fcntl.h>
39 #include <sys/flock.h>
40 #include <sys/statvfs.h>
41 #include <sys/vfs.h>
42 #include <sys/vnode.h>
43 #include <sys/proc.h>
44 #include <sys/user.h>
45 #include <sys/unistd.h>
46 #include <sys/cred.h>
47 #include <sys/poll.h>
48 #include <sys/debug.h>
49 #include <sys/cmn_err.h>
50 #include <sys/stream.h>
51 #include <fs/fs_subr.h>
52 #include <fs/fs_reparse.h>
53 #include <sys/door.h>
54 #include <sys/acl.h>
55 #include <sys/share.h>
56 #include <sys/file.h>
57 #include <sys/kmem.h>
58 #include <sys/file.h>
59 #include <sys/nbmlock.h>
60 #include <acl/acl_common.h>
61 #include <sys/pathname.h>
62
63 static callb_cpr_t *frlock_serialize_blocked(flk_cb_when_t, void *);
64
65 /*
66 * Tunable to limit the number of retry to recover from STALE error.
67 */
68 int fs_estale_retry = 5;
69
70 /*
71 * supports for reparse point door upcall
72 */
73 static door_handle_t reparsed_door;
74 static kmutex_t reparsed_door_lock;
75
76 /*
77 * The associated operation is not supported by the file system.
78 */
79 int
80 fs_nosys()
81 {
82 return (ENOSYS);
83 }
84
85 /*
86 * The associated operation is invalid (on this vnode).
87 */
88 int
89 fs_inval()
90 {
91 return (EINVAL);
92 }
93
94 /*
95 * The associated operation is valid only for directories.
96 */
97 int
98 fs_notdir()
99 {
100 return (ENOTDIR);
101 }
102
103 /*
104 * Free the file system specific resources. For the file systems that
105 * do not support the forced unmount, it will be a nop function.
106 */
107
108 /*ARGSUSED*/
109 void
110 fs_freevfs(vfs_t *vfsp)
111 {
112 }
113
114 /* ARGSUSED */
115 int
116 fs_nosys_map(struct vnode *vp, offset_t off, struct as *as, caddr_t *addrp,
117 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, struct cred *cr,
118 caller_context_t *ct)
119 {
120 return (ENOSYS);
121 }
122
123 /* ARGSUSED */
124 int
125 fs_nosys_addmap(struct vnode *vp, offset_t off, struct as *as, caddr_t addr,
126 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, struct cred *cr,
127 caller_context_t *ct)
128 {
129 return (ENOSYS);
130 }
131
132 /* ARGSUSED */
133 int
134 fs_nosys_poll(vnode_t *vp, short events, int anyyet, short *reventsp,
135 struct pollhead **phpp, caller_context_t *ct)
136 {
137 return (ENOSYS);
138 }
139
140
141 /*
142 * The file system has nothing to sync to disk. However, the
143 * VFS_SYNC operation must not fail.
144 */
145 /* ARGSUSED */
146 int
147 fs_sync(struct vfs *vfspp, short flag, cred_t *cr)
148 {
149 return (0);
150 }
151
152 /*
153 * Does nothing but VOP_FSYNC must not fail.
154 */
155 /* ARGSUSED */
156 int
157 fs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
158 {
159 return (0);
160 }
161
162 /*
163 * Does nothing but VOP_PUTPAGE must not fail.
164 */
165 /* ARGSUSED */
166 int
167 fs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr,
168 caller_context_t *ctp)
169 {
170 return (0);
171 }
172
173 /*
174 * Does nothing but VOP_IOCTL must not fail.
175 */
176 /* ARGSUSED */
177 int
178 fs_ioctl(vnode_t *vp, int com, intptr_t data, int flag, cred_t *cred,
179 int *rvalp)
180 {
181 return (0);
182 }
183
184 /*
185 * Read/write lock/unlock. Does nothing.
186 */
187 /* ARGSUSED */
188 int
189 fs_rwlock(vnode_t *vp, int write_lock, caller_context_t *ctp)
190 {
191 return (-1);
192 }
193
194 /* ARGSUSED */
195 void
196 fs_rwunlock(vnode_t *vp, int write_lock, caller_context_t *ctp)
197 {
198 }
199
200 /*
201 * Compare two vnodes.
202 */
203 /*ARGSUSED2*/
204 int
205 fs_cmp(vnode_t *vp1, vnode_t *vp2, caller_context_t *ct)
206 {
207 return (vp1 == vp2);
208 }
209
210 /*
211 * No-op seek operation.
212 */
213 /* ARGSUSED */
214 int
215 fs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, caller_context_t *ct)
216 {
217 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
218 }
219
220 /*
221 * File and record locking.
222 */
223 /* ARGSUSED */
224 int
225 fs_frlock(vnode_t *vp, int cmd, struct flock64 *bfp, int flag, offset_t offset,
226 flk_callback_t *flk_cbp, cred_t *cr, caller_context_t *ct)
227 {
228 int frcmd;
229 int nlmid;
230 int error = 0;
231 boolean_t skip_lock = B_FALSE;
232 flk_callback_t serialize_callback;
233 int serialize = 0;
234 v_mode_t mode;
235
236 switch (cmd) {
237
238 case F_GETLK:
239 case F_O_GETLK:
240 if (flag & F_REMOTELOCK) {
241 frcmd = RCMDLCK;
242 } else if (flag & F_PXFSLOCK) {
243 frcmd = PCMDLCK;
244 } else {
245 frcmd = 0;
246 bfp->l_pid = ttoproc(curthread)->p_pid;
247 bfp->l_sysid = 0;
248 }
249 break;
250
251 case F_OFD_GETLK:
252 /*
253 * TBD we do not support remote OFD locks at this time.
254 */
255 if (flag & (F_REMOTELOCK | F_PXFSLOCK)) {
256 error = EINVAL;
257 goto done;
258 }
259 skip_lock = B_TRUE;
260 break;
261
262 case F_SETLK_NBMAND:
263 /*
264 * Are NBMAND locks allowed on this file?
265 */
266 if (!vp->v_vfsp ||
267 !(vp->v_vfsp->vfs_flag & VFS_NBMAND)) {
268 error = EINVAL;
269 goto done;
270 }
271 if (vp->v_type != VREG) {
272 error = EINVAL;
273 goto done;
274 }
275 /*FALLTHROUGH*/
276
277 case F_SETLK:
278 if (flag & F_REMOTELOCK) {
279 frcmd = SETFLCK|RCMDLCK;
280 } else if (flag & F_PXFSLOCK) {
281 frcmd = SETFLCK|PCMDLCK;
282 } else {
283 frcmd = SETFLCK;
284 bfp->l_pid = ttoproc(curthread)->p_pid;
285 bfp->l_sysid = 0;
286 }
287 if (cmd == F_SETLK_NBMAND &&
288 (bfp->l_type == F_RDLCK || bfp->l_type == F_WRLCK)) {
289 frcmd |= NBMLCK;
290 }
291
292 if (nbl_need_check(vp)) {
293 nbl_start_crit(vp, RW_WRITER);
294 serialize = 1;
295 if (frcmd & NBMLCK) {
296 mode = (bfp->l_type == F_RDLCK) ?
297 V_READ : V_RDANDWR;
298 if (vn_is_mapped(vp, mode)) {
299 error = EAGAIN;
300 goto done;
301 }
302 }
303 }
304 break;
305
306 case F_SETLKW:
307 if (flag & F_REMOTELOCK) {
308 frcmd = SETFLCK|SLPFLCK|RCMDLCK;
309 } else if (flag & F_PXFSLOCK) {
310 frcmd = SETFLCK|SLPFLCK|PCMDLCK;
311 } else {
312 frcmd = SETFLCK|SLPFLCK;
313 bfp->l_pid = ttoproc(curthread)->p_pid;
314 bfp->l_sysid = 0;
315 }
316
317 if (nbl_need_check(vp)) {
318 nbl_start_crit(vp, RW_WRITER);
319 serialize = 1;
320 }
321 break;
322
323 case F_OFD_SETLK:
324 case F_OFD_SETLKW:
325 case F_FLOCK:
326 case F_FLOCKW:
327 /*
328 * TBD we do not support remote OFD locks at this time.
329 */
330 if (flag & (F_REMOTELOCK | F_PXFSLOCK)) {
331 error = EINVAL;
332 goto done;
333 }
334 skip_lock = B_TRUE;
335 break;
336
337 case F_HASREMOTELOCKS:
338 nlmid = GETNLMID(bfp->l_sysid);
339 if (nlmid != 0) { /* booted as a cluster */
340 l_has_rmt(bfp) =
341 cl_flk_has_remote_locks_for_nlmid(vp, nlmid);
342 } else { /* not booted as a cluster */
343 l_has_rmt(bfp) = flk_has_remote_locks(vp);
344 }
345
346 goto done;
347
348 default:
349 error = EINVAL;
350 goto done;
351 }
352
353 /*
354 * If this is a blocking lock request and we're serializing lock
355 * requests, modify the callback list to leave the critical region
356 * while we're waiting for the lock.
357 */
358
359 if (serialize && (frcmd & SLPFLCK) != 0) {
360 flk_add_callback(&serialize_callback,
361 frlock_serialize_blocked, vp, flk_cbp);
362 flk_cbp = &serialize_callback;
363 }
364
365 if (!skip_lock)
366 error = reclock(vp, bfp, frcmd, flag, offset, flk_cbp);
367
368 if (serialize && (frcmd & SLPFLCK) != 0)
369 flk_del_callback(&serialize_callback);
370
371 done:
372 if (serialize)
373 nbl_end_crit(vp);
374
375 return (error);
376 }
377
378 /*
379 * Callback when a lock request blocks and we are serializing requests. If
380 * before sleeping, leave the critical region. If after wakeup, reenter
381 * the critical region.
382 */
383
384 static callb_cpr_t *
385 frlock_serialize_blocked(flk_cb_when_t when, void *infop)
386 {
387 vnode_t *vp = (vnode_t *)infop;
388
389 if (when == FLK_BEFORE_SLEEP)
390 nbl_end_crit(vp);
391 else {
392 nbl_start_crit(vp, RW_WRITER);
393 }
394
395 return (NULL);
396 }
397
398 /*
399 * Allow any flags.
400 */
401 /* ARGSUSED */
402 int
403 fs_setfl(vnode_t *vp, int oflags, int nflags, cred_t *cr, caller_context_t *ct)
404 {
405 return (0);
406 }
407
408 /*
409 * Return the answer requested to poll() for non-device files.
410 * Only POLLIN, POLLRDNORM, and POLLOUT are recognized.
411 */
412 struct pollhead fs_pollhd;
413
414 /* ARGSUSED */
415 int
416 fs_poll(vnode_t *vp, short events, int anyyet, short *reventsp,
417 struct pollhead **phpp, caller_context_t *ct)
418 {
419 /*
420 * Reject all attempts for edge-triggered polling. These should only
421 * occur when regular files are added to a /dev/poll handle which is in
422 * epoll mode. The Linux epoll does not allow epoll-ing on regular
423 * files at all, so rejecting EPOLLET requests is congruent with those
424 * expectations.
425 */
426 if (events & POLLET) {
427 return (EPERM);
428 }
429
430 *reventsp = 0;
431 if (events & POLLIN)
432 *reventsp |= POLLIN;
433 if (events & POLLRDNORM)
434 *reventsp |= POLLRDNORM;
435 if (events & POLLRDBAND)
436 *reventsp |= POLLRDBAND;
437 if (events & POLLOUT)
438 *reventsp |= POLLOUT;
439 if (events & POLLWRBAND)
440 *reventsp |= POLLWRBAND;
441 /*
442 * Emitting a pollhead without the intention of issuing pollwakeup()
443 * calls against it is a recipe for trouble. It's only acceptable in
444 * this case since the above logic matches practically all useful
445 * events.
446 */
447 if (*reventsp == 0 && !anyyet) {
448 *phpp = &fs_pollhd;
449 }
450 return (0);
451 }
452
453 /*
454 * POSIX pathconf() support.
455 */
456 /* ARGSUSED */
457 int
458 fs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
459 caller_context_t *ct)
460 {
461 ulong_t val;
462 int error = 0;
463 struct statvfs64 vfsbuf;
464
465 switch (cmd) {
466
467 case _PC_LINK_MAX:
468 val = MAXLINK;
469 break;
470
471 case _PC_MAX_CANON:
472 val = MAX_CANON;
473 break;
474
475 case _PC_MAX_INPUT:
476 val = MAX_INPUT;
477 break;
478
479 case _PC_NAME_MAX:
480 bzero(&vfsbuf, sizeof (vfsbuf));
481 if (error = VFS_STATVFS(vp->v_vfsp, &vfsbuf))
482 break;
483 val = vfsbuf.f_namemax;
484 break;
485
486 case _PC_PATH_MAX:
487 case _PC_SYMLINK_MAX:
488 val = MAXPATHLEN;
489 break;
490
491 case _PC_PIPE_BUF:
492 val = PIPE_BUF;
493 break;
494
495 case _PC_NO_TRUNC:
496 if (vp->v_vfsp->vfs_flag & VFS_NOTRUNC)
497 val = 1; /* NOTRUNC is enabled for vp */
498 else
499 val = (ulong_t)-1;
500 break;
501
502 case _PC_VDISABLE:
503 val = _POSIX_VDISABLE;
504 break;
505
506 case _PC_CHOWN_RESTRICTED:
507 if (rstchown)
508 val = rstchown; /* chown restricted enabled */
509 else
510 val = (ulong_t)-1;
511 break;
512
513 case _PC_FILESIZEBITS:
514
515 /*
516 * If ever we come here it means that underlying file system
517 * does not recognise the command and therefore this
518 * configurable limit cannot be determined. We return -1
519 * and don't change errno.
520 */
521
522 val = (ulong_t)-1; /* large file support */
523 break;
524
525 case _PC_ACL_ENABLED:
526 val = 0;
527 break;
528
529 case _PC_CASE_BEHAVIOR:
530 val = _CASE_SENSITIVE;
531 if (vfs_has_feature(vp->v_vfsp, VFSFT_CASEINSENSITIVE) == 1)
532 val |= _CASE_INSENSITIVE;
533 if (vfs_has_feature(vp->v_vfsp, VFSFT_NOCASESENSITIVE) == 1)
534 val &= ~_CASE_SENSITIVE;
535 break;
536
537 case _PC_SATTR_ENABLED:
538 case _PC_SATTR_EXISTS:
539 val = 0;
540 break;
541
542 case _PC_ACCESS_FILTERING:
543 val = 0;
544 break;
545
546 default:
547 error = EINVAL;
548 break;
549 }
550
551 if (error == 0)
552 *valp = val;
553 return (error);
554 }
555
556 /*
557 * Dispose of a page.
558 */
559 /* ARGSUSED */
560 void
561 fs_dispose(struct vnode *vp, page_t *pp, int fl, int dn, struct cred *cr,
562 caller_context_t *ct)
563 {
564
565 ASSERT(fl == B_FREE || fl == B_INVAL);
566
567 if (fl == B_FREE)
568 page_free(pp, dn);
569 else
570 page_destroy(pp, dn);
571 }
572
573 /* ARGSUSED */
574 void
575 fs_nodispose(struct vnode *vp, page_t *pp, int fl, int dn, struct cred *cr,
576 caller_context_t *ct)
577 {
578 cmn_err(CE_PANIC, "fs_nodispose invoked");
579 }
580
581 /*
582 * fabricate acls for file systems that do not support acls.
583 */
584 /* ARGSUSED */
585 int
586 fs_fab_acl(vnode_t *vp, vsecattr_t *vsecattr, int flag, cred_t *cr,
587 caller_context_t *ct)
588 {
589 aclent_t *aclentp;
590 struct vattr vattr;
591 int error;
592 size_t aclsize;
593
594 vsecattr->vsa_aclcnt = 0;
595 vsecattr->vsa_aclentsz = 0;
596 vsecattr->vsa_aclentp = NULL;
597 vsecattr->vsa_dfaclcnt = 0; /* Default ACLs are not fabricated */
598 vsecattr->vsa_dfaclentp = NULL;
599
600 vattr.va_mask = AT_MODE | AT_UID | AT_GID;
601 if (error = VOP_GETATTR(vp, &vattr, 0, cr, ct))
602 return (error);
603
604 if (vsecattr->vsa_mask & (VSA_ACLCNT | VSA_ACL)) {
605 aclsize = 4 * sizeof (aclent_t);
606 vsecattr->vsa_aclcnt = 4; /* USER, GROUP, OTHER, and CLASS */
607 vsecattr->vsa_aclentp = kmem_zalloc(aclsize, KM_SLEEP);
608 aclentp = vsecattr->vsa_aclentp;
609
610 aclentp->a_type = USER_OBJ; /* Owner */
611 aclentp->a_perm = ((ushort_t)(vattr.va_mode & 0700)) >> 6;
612 aclentp->a_id = vattr.va_uid; /* Really undefined */
613 aclentp++;
614
615 aclentp->a_type = GROUP_OBJ; /* Group */
616 aclentp->a_perm = ((ushort_t)(vattr.va_mode & 0070)) >> 3;
617 aclentp->a_id = vattr.va_gid; /* Really undefined */
618 aclentp++;
619
620 aclentp->a_type = OTHER_OBJ; /* Other */
621 aclentp->a_perm = vattr.va_mode & 0007;
622 aclentp->a_id = (gid_t)-1; /* Really undefined */
623 aclentp++;
624
625 aclentp->a_type = CLASS_OBJ; /* Class */
626 aclentp->a_perm = (ushort_t)(0007);
627 aclentp->a_id = (gid_t)-1; /* Really undefined */
628 } else if (vsecattr->vsa_mask & (VSA_ACECNT | VSA_ACE)) {
629 VERIFY(0 == acl_trivial_create(vattr.va_mode,
630 (vp->v_type == VDIR), (ace_t **)&vsecattr->vsa_aclentp,
631 &vsecattr->vsa_aclcnt));
632 vsecattr->vsa_aclentsz = vsecattr->vsa_aclcnt * sizeof (ace_t);
633 }
634
635 return (error);
636 }
637
638 /*
639 * Common code for implementing DOS share reservations
640 */
641 /* ARGSUSED4 */
642 int
643 fs_shrlock(struct vnode *vp, int cmd, struct shrlock *shr, int flag, cred_t *cr,
644 caller_context_t *ct)
645 {
646 int error;
647
648 /*
649 * Make sure that the file was opened with permissions appropriate
650 * for the request, and make sure the caller isn't trying to sneak
651 * in an NBMAND request.
652 */
653 if (cmd == F_SHARE) {
654 if (((shr->s_access & F_RDACC) && (flag & FREAD) == 0) ||
655 ((shr->s_access & F_WRACC) && (flag & FWRITE) == 0))
656 return (EBADF);
657 if (shr->s_access & (F_RMACC | F_MDACC))
658 return (EINVAL);
659 if (shr->s_deny & (F_MANDDNY | F_RMDNY))
660 return (EINVAL);
661 }
662 if (cmd == F_SHARE_NBMAND) {
663 /* make sure nbmand is allowed on the file */
664 if (!vp->v_vfsp ||
665 !(vp->v_vfsp->vfs_flag & VFS_NBMAND)) {
666 return (EINVAL);
667 }
668 if (vp->v_type != VREG) {
669 return (EINVAL);
670 }
671 }
672
673 nbl_start_crit(vp, RW_WRITER);
674
675 switch (cmd) {
676
677 case F_SHARE_NBMAND:
678 shr->s_deny |= F_MANDDNY;
679 /*FALLTHROUGH*/
680 case F_SHARE:
681 error = add_share(vp, shr);
682 break;
683
684 case F_UNSHARE:
685 error = del_share(vp, shr);
686 break;
687
688 case F_HASREMOTELOCKS:
689 /*
690 * We are overloading this command to refer to remote
691 * shares as well as remote locks, despite its name.
692 */
693 shr->s_access = shr_has_remote_shares(vp, shr->s_sysid);
694 error = 0;
695 break;
696
697 default:
698 error = EINVAL;
699 break;
700 }
701
702 nbl_end_crit(vp);
703 return (error);
704 }
705
706 /*ARGSUSED1*/
707 int
708 fs_vnevent_nosupport(vnode_t *vp, vnevent_t e, vnode_t *dvp, char *fnm,
709 caller_context_t *ct)
710 {
711 ASSERT(vp != NULL);
712 return (ENOTSUP);
713 }
714
715 /*ARGSUSED1*/
716 int
717 fs_vnevent_support(vnode_t *vp, vnevent_t e, vnode_t *dvp, char *fnm,
718 caller_context_t *ct)
719 {
720 ASSERT(vp != NULL);
721 return (0);
722 }
723
724 /*
725 * return 1 for non-trivial ACL.
726 *
727 * NB: It is not necessary for the caller to VOP_RWLOCK since
728 * we only issue VOP_GETSECATTR.
729 *
730 * Returns 0 == trivial
731 * 1 == NOT Trivial
732 * <0 could not determine.
733 */
734 int
735 fs_acl_nontrivial(vnode_t *vp, cred_t *cr)
736 {
737 ulong_t acl_styles;
738 ulong_t acl_flavor;
739 vsecattr_t vsecattr;
740 int error;
741 int isnontrivial;
742
743 /* determine the forms of ACLs maintained */
744 error = VOP_PATHCONF(vp, _PC_ACL_ENABLED, &acl_styles, cr, NULL);
745
746 /* clear bits we don't understand and establish default acl_style */
747 acl_styles &= (_ACL_ACLENT_ENABLED | _ACL_ACE_ENABLED);
748 if (error || (acl_styles == 0))
749 acl_styles = _ACL_ACLENT_ENABLED;
750
751 vsecattr.vsa_aclentp = NULL;
752 vsecattr.vsa_dfaclentp = NULL;
753 vsecattr.vsa_aclcnt = 0;
754 vsecattr.vsa_dfaclcnt = 0;
755
756 while (acl_styles) {
757 /* select one of the styles as current flavor */
758 acl_flavor = 0;
759 if (acl_styles & _ACL_ACLENT_ENABLED) {
760 acl_flavor = _ACL_ACLENT_ENABLED;
761 vsecattr.vsa_mask = VSA_ACLCNT | VSA_DFACLCNT;
762 } else if (acl_styles & _ACL_ACE_ENABLED) {
763 acl_flavor = _ACL_ACE_ENABLED;
764 vsecattr.vsa_mask = VSA_ACECNT | VSA_ACE;
765 }
766
767 ASSERT(vsecattr.vsa_mask && acl_flavor);
768 error = VOP_GETSECATTR(vp, &vsecattr, 0, cr, NULL);
769 if (error == 0)
770 break;
771
772 /* that flavor failed */
773 acl_styles &= ~acl_flavor;
774 }
775
776 /* if all styles fail then assume trivial */
777 if (acl_styles == 0)
778 return (0);
779
780 /* process the flavor that worked */
781 isnontrivial = 0;
782 if (acl_flavor & _ACL_ACLENT_ENABLED) {
783 if (vsecattr.vsa_aclcnt > MIN_ACL_ENTRIES)
784 isnontrivial = 1;
785 if (vsecattr.vsa_aclcnt && vsecattr.vsa_aclentp != NULL)
786 kmem_free(vsecattr.vsa_aclentp,
787 vsecattr.vsa_aclcnt * sizeof (aclent_t));
788 if (vsecattr.vsa_dfaclcnt && vsecattr.vsa_dfaclentp != NULL)
789 kmem_free(vsecattr.vsa_dfaclentp,
790 vsecattr.vsa_dfaclcnt * sizeof (aclent_t));
791 }
792 if (acl_flavor & _ACL_ACE_ENABLED) {
793 isnontrivial = ace_trivial(vsecattr.vsa_aclentp,
794 vsecattr.vsa_aclcnt);
795
796 if (vsecattr.vsa_aclcnt && vsecattr.vsa_aclentp != NULL)
797 kmem_free(vsecattr.vsa_aclentp,
798 vsecattr.vsa_aclcnt * sizeof (ace_t));
799 /* ACE has no vsecattr.vsa_dfaclcnt */
800 }
801 return (isnontrivial);
802 }
803
804 /*
805 * Check whether we need a retry to recover from STALE error.
806 */
807 int
808 fs_need_estale_retry(int retry_count)
809 {
810 if (retry_count < fs_estale_retry)
811 return (1);
812 else
813 return (0);
814 }
815
816
817 static int (*fs_av_scan)(vnode_t *, cred_t *, int) = NULL;
818
819 /*
820 * Routine for anti-virus scanner to call to register its scanning routine.
821 */
822 void
823 fs_vscan_register(int (*av_scan)(vnode_t *, cred_t *, int))
824 {
825 fs_av_scan = av_scan;
826 }
827
828 /*
829 * Routine for file systems to call to initiate anti-virus scanning.
830 * Scanning will only be done on REGular files (currently).
831 */
832 int
833 fs_vscan(vnode_t *vp, cred_t *cr, int async)
834 {
835 int ret = 0;
836
837 if (fs_av_scan && vp->v_type == VREG)
838 ret = (*fs_av_scan)(vp, cr, async);
839
840 return (ret);
841 }
842
843 /*
844 * support functions for reparse point
845 */
846 /*
847 * reparse_vnode_parse
848 *
849 * Read the symlink data of a reparse point specified by the vnode
850 * and return the reparse data as name-value pair in the nvlist.
851 */
852 int
853 reparse_vnode_parse(vnode_t *vp, nvlist_t *nvl)
854 {
855 int err;
856 char *lkdata;
857 struct uio uio;
858 struct iovec iov;
859
860 if (vp == NULL || nvl == NULL)
861 return (EINVAL);
862
863 lkdata = kmem_alloc(MAXREPARSELEN, KM_SLEEP);
864
865 /*
866 * Set up io vector to read sym link data
867 */
868 iov.iov_base = lkdata;
869 iov.iov_len = MAXREPARSELEN;
870 uio.uio_iov = &iov;
871 uio.uio_iovcnt = 1;
872 uio.uio_segflg = UIO_SYSSPACE;
873 uio.uio_extflg = UIO_COPY_CACHED;
874 uio.uio_loffset = (offset_t)0;
875 uio.uio_resid = MAXREPARSELEN;
876
877 if ((err = VOP_READLINK(vp, &uio, kcred, NULL)) == 0) {
878 *(lkdata + MAXREPARSELEN - uio.uio_resid) = '\0';
879 err = reparse_parse(lkdata, nvl);
880 }
881 kmem_free(lkdata, MAXREPARSELEN); /* done with lkdata */
882
883 return (err);
884 }
885
886 void
887 reparse_point_init()
888 {
889 mutex_init(&reparsed_door_lock, NULL, MUTEX_DEFAULT, NULL);
890 }
891
892 static door_handle_t
893 reparse_door_get_handle()
894 {
895 door_handle_t dh;
896
897 mutex_enter(&reparsed_door_lock);
898 if ((dh = reparsed_door) == NULL) {
899 if (door_ki_open(REPARSED_DOOR, &reparsed_door) != 0) {
900 reparsed_door = NULL;
901 dh = NULL;
902 } else
903 dh = reparsed_door;
904 }
905 mutex_exit(&reparsed_door_lock);
906 return (dh);
907 }
908
909 static void
910 reparse_door_reset_handle()
911 {
912 mutex_enter(&reparsed_door_lock);
913 reparsed_door = NULL;
914 mutex_exit(&reparsed_door_lock);
915 }
916
917 /*
918 * reparse_kderef
919 *
920 * Accepts the service-specific item from the reparse point and returns
921 * the service-specific data requested. The caller specifies the size of
922 * the buffer provided via *bufsz; the routine will fail with EOVERFLOW
923 * if the results will not fit in the buffer, in which case, *bufsz will
924 * contain the number of bytes needed to hold the results.
925 *
926 * if ok return 0 and update *bufsize with length of actual result
927 * else return error code.
928 */
929 int
930 reparse_kderef(const char *svc_type, const char *svc_data, char *buf,
931 size_t *bufsize)
932 {
933 int err, retries, need_free, retried_doorhd;
934 size_t dlen, res_len;
935 char *darg;
936 door_arg_t door_args;
937 reparsed_door_res_t *resp;
938 door_handle_t rp_door;
939
940 if (svc_type == NULL || svc_data == NULL || buf == NULL ||
941 bufsize == NULL)
942 return (EINVAL);
943
944 /* get reparsed's door handle */
945 if ((rp_door = reparse_door_get_handle()) == NULL)
946 return (EBADF);
947
948 /* setup buffer for door_call args and results */
949 dlen = strlen(svc_type) + strlen(svc_data) + 2;
950 if (*bufsize < dlen) {
951 darg = kmem_alloc(dlen, KM_SLEEP);
952 need_free = 1;
953 } else {
954 darg = buf; /* use same buffer for door's args & results */
955 need_free = 0;
956 }
957
958 /* build argument string of door call */
959 (void) snprintf(darg, dlen, "%s:%s", svc_type, svc_data);
960
961 /* setup args for door call */
962 door_args.data_ptr = darg;
963 door_args.data_size = dlen;
964 door_args.desc_ptr = NULL;
965 door_args.desc_num = 0;
966 door_args.rbuf = buf;
967 door_args.rsize = *bufsize;
968
969 /* do the door_call */
970 retried_doorhd = 0;
971 retries = 0;
972 door_ki_hold(rp_door);
973 while ((err = door_ki_upcall_limited(rp_door, &door_args,
974 NULL, SIZE_MAX, 0)) != 0) {
975 if (err == EAGAIN || err == EINTR) {
976 if (++retries < REPARSED_DOORCALL_MAX_RETRY) {
977 delay(SEC_TO_TICK(1));
978 continue;
979 }
980 } else if (err == EBADF) {
981 /* door server goes away... */
982 reparse_door_reset_handle();
983
984 if (retried_doorhd == 0) {
985 door_ki_rele(rp_door);
986 retried_doorhd++;
987 rp_door = reparse_door_get_handle();
988 if (rp_door != NULL) {
989 door_ki_hold(rp_door);
990 continue;
991 }
992 }
993 }
994 break;
995 }
996
997 if (rp_door)
998 door_ki_rele(rp_door);
999
1000 if (need_free)
1001 kmem_free(darg, dlen); /* done with args buffer */
1002
1003 if (err != 0)
1004 return (err);
1005
1006 resp = (reparsed_door_res_t *)door_args.rbuf;
1007 if ((err = resp->res_status) == 0) {
1008 /*
1009 * have to save the length of the results before the
1010 * bcopy below since it's can be an overlap copy that
1011 * overwrites the reparsed_door_res_t structure at
1012 * the beginning of the buffer.
1013 */
1014 res_len = (size_t)resp->res_len;
1015
1016 /* deref call is ok */
1017 if (res_len > *bufsize)
1018 err = EOVERFLOW;
1019 else
1020 bcopy(resp->res_data, buf, res_len);
1021 *bufsize = res_len;
1022 }
1023 if (door_args.rbuf != buf)
1024 kmem_free(door_args.rbuf, door_args.rsize);
1025
1026 return (err);
1027 }