133 VCHR, /* va_type */
134 S_IFCHR | SDEV_DEVMODE_DEFAULT, /* va_mode */
135 SDEV_UID_DEFAULT, /* va_uid */
136 SDEV_GID_DEFAULT, /* va_gid */
137 0, /* va_fsid */
138 0, /* va_nodeid */
139 0, /* va_nlink */
140 0, /* va_size */
141 0, /* va_atime */
142 0, /* va_mtime */
143 0, /* va_ctime */
144 0, /* va_rdev */
145 0, /* va_blksize */
146 0, /* va_nblocks */
147 0 /* va_vcode */
148 };
149
150 kmem_cache_t *sdev_node_cache; /* sdev_node cache */
151 int devtype; /* fstype */
152
153 static void
154 sdev_prof_free(struct sdev_node *dv)
155 {
156 ASSERT(!SDEV_IS_GLOBAL(dv));
157 nvlist_free(dv->sdev_prof.dev_name);
158 nvlist_free(dv->sdev_prof.dev_map);
159 nvlist_free(dv->sdev_prof.dev_symlink);
160 nvlist_free(dv->sdev_prof.dev_glob_incdir);
161 nvlist_free(dv->sdev_prof.dev_glob_excdir);
162 bzero(&dv->sdev_prof, sizeof (dv->sdev_prof));
163 }
164
165 /* sdev_node cache constructor */
166 /*ARGSUSED1*/
167 static int
168 i_sdev_node_ctor(void *buf, void *cfarg, int flag)
169 {
170 struct sdev_node *dv = (struct sdev_node *)buf;
171 struct vnode *vp;
172
290 devname_handle_t *dhl;
291
292 nmlen = strlen(nm) + 1;
293 if (nmlen > MAXNAMELEN) {
294 sdcmn_err9(("sdev_nodeinit: node name %s"
295 " too long\n", nm));
296 *newdv = NULL;
297 return (ENAMETOOLONG);
298 }
299
300 dv = kmem_cache_alloc(sdev_node_cache, KM_SLEEP);
301
302 dv->sdev_name = kmem_alloc(nmlen, KM_SLEEP);
303 bcopy(nm, dv->sdev_name, nmlen);
304 dv->sdev_namelen = nmlen - 1; /* '\0' not included */
305 len = strlen(ddv->sdev_path) + strlen(nm) + 2;
306 dv->sdev_path = kmem_alloc(len, KM_SLEEP);
307 (void) snprintf(dv->sdev_path, len, "%s/%s", ddv->sdev_path, nm);
308 /* overwritten for VLNK nodes */
309 dv->sdev_symlink = NULL;
310 list_link_init(&dv->sdev_plist);
311
312 vp = SDEVTOV(dv);
313 vn_reinit(vp);
314 vp->v_vfsp = SDEVTOV(ddv)->v_vfsp;
315 if (vap)
316 vp->v_type = vap->va_type;
317
318 /*
319 * initialized to the parent's vnodeops.
320 * maybe overwriten for a VDIR
321 */
322 vn_setops(vp, vn_getops(SDEVTOV(ddv)));
323 vn_exists(vp);
324
325 dv->sdev_dotdot = NULL;
326 dv->sdev_attrvp = NULL;
327 if (vap) {
328 sdev_attr_alloc(dv, vap);
329 } else {
330 dv->sdev_attr = NULL;
379 rw_enter(&dv->sdev_contents, RW_WRITER);
380 if (type == VDIR) {
381 dv->sdev_nlink = 2;
382 dv->sdev_flags &= ~SDEV_PERSIST;
383 dv->sdev_flags &= ~SDEV_DYNAMIC;
384 vn_setops(vp, sdev_get_vop(dv)); /* from internal vtab */
385 ASSERT(dv->sdev_dotdot);
386 ASSERT(SDEVTOV(dv->sdev_dotdot)->v_type == VDIR);
387 vp->v_rdev = SDEVTOV(dv->sdev_dotdot)->v_rdev;
388 avl_create(&dv->sdev_entries,
389 (int (*)(const void *, const void *))sdev_compare_nodes,
390 sizeof (struct sdev_node),
391 offsetof(struct sdev_node, sdev_avllink));
392 } else if (type == VLNK) {
393 ASSERT(args);
394 dv->sdev_nlink = 1;
395 dv->sdev_symlink = i_ddi_strdup((char *)args, KM_SLEEP);
396 } else {
397 dv->sdev_nlink = 1;
398 }
399 sdev_plugin_nodeready(dv);
400
401 if (!(SDEV_IS_GLOBAL(dv))) {
402 dv->sdev_origin = (struct sdev_node *)args;
403 dv->sdev_flags &= ~SDEV_PERSIST;
404 }
405
406 /*
407 * shadow node is created here OR
408 * if failed (indicated by dv->sdev_attrvp == NULL),
409 * created later in sdev_setattr
410 */
411 if (avp) {
412 dv->sdev_attrvp = avp;
413 } else {
414 if (dv->sdev_attr == NULL) {
415 sdev_attr_alloc(dv, vap);
416 } else {
417 sdev_attr_update(dv, vap);
418 }
419
476 } else {
477 dv->sdev_flags = SDEV_BUILD;
478 dv->sdev_flags &= ~SDEV_PERSIST;
479 bzero(&dv->sdev_prof, sizeof (dv->sdev_prof));
480 dv->sdev_ldir_gen = 0;
481 dv->sdev_devtree_gen = 0;
482 }
483
484 avl_create(&dv->sdev_entries,
485 (int (*)(const void *, const void *))sdev_compare_nodes,
486 sizeof (struct sdev_node),
487 offsetof(struct sdev_node, sdev_avllink));
488
489 rw_enter(&dv->sdev_contents, RW_WRITER);
490 sdev_set_nodestate(dv, SDEV_READY);
491 rw_exit(&dv->sdev_contents);
492 sdev_nc_node_exists(dv);
493 return (dv);
494 }
495
496 struct sdev_vop_table vtab[] = {
497 { "pts", devpts_vnodeops_tbl, &devpts_vnodeops, devpts_validate,
498 SDEV_DYNAMIC | SDEV_VTOR },
499
500 { "vt", devvt_vnodeops_tbl, &devvt_vnodeops, devvt_validate,
501 SDEV_DYNAMIC | SDEV_VTOR },
502
503 { "zvol", devzvol_vnodeops_tbl, &devzvol_vnodeops,
504 devzvol_validate, SDEV_ZONED | SDEV_DYNAMIC | SDEV_VTOR | SDEV_SUBDIR },
505
506 { "zcons", NULL, NULL, NULL, SDEV_NO_NCACHE },
507
508 { "net", devnet_vnodeops_tbl, &devnet_vnodeops, devnet_validate,
509 SDEV_DYNAMIC | SDEV_VTOR | SDEV_SUBDIR },
510
511 { "ipnet", devipnet_vnodeops_tbl, &devipnet_vnodeops,
512 devipnet_validate, SDEV_DYNAMIC | SDEV_VTOR | SDEV_NO_NCACHE },
513
514 /*
515 * SDEV_DYNAMIC: prevent calling out to devfsadm, since only the
516 * lofi driver controls child nodes.
517 *
518 * SDEV_PERSIST: ensure devfsadm knows to clean up any persisted
519 * stale nodes (e.g. from devfsadm -R).
520 *
521 * In addition, devfsadm knows not to attempt a rmdir: a zone
522 * may hold a reference, which would zombify the node,
523 * preventing a mkdir.
524 */
525
526 { "lofi", NULL, NULL, NULL,
527 SDEV_ZONED | SDEV_DYNAMIC | SDEV_PERSIST },
528 { "rlofi", NULL, NULL, NULL,
529 SDEV_ZONED | SDEV_DYNAMIC | SDEV_PERSIST },
530
531 { NULL, NULL, NULL, NULL, 0}
532 };
533
534
535 /*
536 * Build the base root inode
537 */
538 ino_t
539 sdev_mkino(struct sdev_node *dv)
540 {
541 ino_t ino;
542
543 /*
544 * for now, follow the lead of tmpfs here
545 * need to someday understand the requirements here
546 */
547 ino = (ino_t)(uint32_t)((uintptr_t)dv >> 3);
548 ino += SDEV_ROOTINO + 1;
549
550 return (ino);
551 }
552
553 int
554 sdev_getlink(struct vnode *linkvp, char **link)
555 {
793 if (dv->sdev_attr != NULL) {
794 kmem_free(dv->sdev_attr, sizeof (struct vattr));
795 dv->sdev_attr = NULL;
796 }
797
798 if (dv->sdev_name != NULL) {
799 kmem_free(dv->sdev_name, dv->sdev_namelen + 1);
800 dv->sdev_name = NULL;
801 }
802
803 if (dv->sdev_symlink != NULL) {
804 kmem_free(dv->sdev_symlink, strlen(dv->sdev_symlink) + 1);
805 dv->sdev_symlink = NULL;
806 }
807
808 if (dv->sdev_path) {
809 kmem_free(dv->sdev_path, strlen(dv->sdev_path) + 1);
810 dv->sdev_path = NULL;
811 }
812
813 if (!SDEV_IS_GLOBAL(dv)) {
814 sdev_prof_free(dv);
815 if (dv->sdev_vnode->v_type != VLNK && dv->sdev_origin != NULL)
816 SDEV_RELE(dv->sdev_origin);
817 }
818
819 if (SDEVTOV(dv)->v_type == VDIR) {
820 ASSERT(SDEV_FIRST_ENTRY(dv) == NULL);
821 avl_destroy(&dv->sdev_entries);
822 }
823
824 mutex_destroy(&dv->sdev_lookup_lock);
825 cv_destroy(&dv->sdev_lookup_cv);
826
827 /* return node to initial state as per constructor */
828 (void) memset((void *)&dv->sdev_instance_data, 0,
829 sizeof (dv->sdev_instance_data));
830 vn_invalid(SDEVTOV(dv));
831 dv->sdev_private = NULL;
832 kmem_cache_free(sdev_node_cache, dv);
833 }
834
835 /*
836 * DIRECTORY CACHE lookup
837 */
2794
2795 kmem_free(pathlist, (npaths_alloc + 1) * sizeof (char *));
2796 }
2797
2798 int
2799 sdev_modctl_devexists(const char *path)
2800 {
2801 vnode_t *vp;
2802 int error;
2803
2804 error = sdev_modctl_lookup(path, &vp);
2805 sdcmn_err11(("modctl dev exists: %s by %s: %s\n",
2806 path, curproc->p_user.u_comm,
2807 (error == 0) ? "ok" : "failed"));
2808 if (error == 0)
2809 VN_RELE(vp);
2810
2811 return (error);
2812 }
2813
2814 /*
2815 * a generic setattr() function
2816 *
2817 * note: flags only supports AT_UID and AT_GID.
2818 * Future enhancements can be done for other types, e.g. AT_MODE
2819 */
2820 int
2821 devname_setattr_func(struct vnode *vp, struct vattr *vap, int flags,
2822 struct cred *cred, int (*callback)(struct sdev_node *, struct vattr *,
2823 int), int protocol)
2824 {
2825 struct sdev_node *dv = VTOSDEV(vp);
2826 struct sdev_node *parent = dv->sdev_dotdot;
2827 struct vattr *get;
2828 uint_t mask = vap->va_mask;
2829 int error;
2830
2831 /* some sanity checks */
2832 if (vap->va_mask & AT_NOSET)
2833 return (EINVAL);
|
133 VCHR, /* va_type */
134 S_IFCHR | SDEV_DEVMODE_DEFAULT, /* va_mode */
135 SDEV_UID_DEFAULT, /* va_uid */
136 SDEV_GID_DEFAULT, /* va_gid */
137 0, /* va_fsid */
138 0, /* va_nodeid */
139 0, /* va_nlink */
140 0, /* va_size */
141 0, /* va_atime */
142 0, /* va_mtime */
143 0, /* va_ctime */
144 0, /* va_rdev */
145 0, /* va_blksize */
146 0, /* va_nblocks */
147 0 /* va_vcode */
148 };
149
150 kmem_cache_t *sdev_node_cache; /* sdev_node cache */
151 int devtype; /* fstype */
152
153 /* static */
154 static struct vnodeops *sdev_get_vop(struct sdev_node *);
155 static void sdev_set_no_negcache(struct sdev_node *);
156 static fs_operation_def_t *sdev_merge_vtab(const fs_operation_def_t []);
157 static void sdev_free_vtab(fs_operation_def_t *);
158
159 static void
160 sdev_prof_free(struct sdev_node *dv)
161 {
162 ASSERT(!SDEV_IS_GLOBAL(dv));
163 nvlist_free(dv->sdev_prof.dev_name);
164 nvlist_free(dv->sdev_prof.dev_map);
165 nvlist_free(dv->sdev_prof.dev_symlink);
166 nvlist_free(dv->sdev_prof.dev_glob_incdir);
167 nvlist_free(dv->sdev_prof.dev_glob_excdir);
168 bzero(&dv->sdev_prof, sizeof (dv->sdev_prof));
169 }
170
171 /* sdev_node cache constructor */
172 /*ARGSUSED1*/
173 static int
174 i_sdev_node_ctor(void *buf, void *cfarg, int flag)
175 {
176 struct sdev_node *dv = (struct sdev_node *)buf;
177 struct vnode *vp;
178
296 devname_handle_t *dhl;
297
298 nmlen = strlen(nm) + 1;
299 if (nmlen > MAXNAMELEN) {
300 sdcmn_err9(("sdev_nodeinit: node name %s"
301 " too long\n", nm));
302 *newdv = NULL;
303 return (ENAMETOOLONG);
304 }
305
306 dv = kmem_cache_alloc(sdev_node_cache, KM_SLEEP);
307
308 dv->sdev_name = kmem_alloc(nmlen, KM_SLEEP);
309 bcopy(nm, dv->sdev_name, nmlen);
310 dv->sdev_namelen = nmlen - 1; /* '\0' not included */
311 len = strlen(ddv->sdev_path) + strlen(nm) + 2;
312 dv->sdev_path = kmem_alloc(len, KM_SLEEP);
313 (void) snprintf(dv->sdev_path, len, "%s/%s", ddv->sdev_path, nm);
314 /* overwritten for VLNK nodes */
315 dv->sdev_symlink = NULL;
316
317 vp = SDEVTOV(dv);
318 vn_reinit(vp);
319 vp->v_vfsp = SDEVTOV(ddv)->v_vfsp;
320 if (vap)
321 vp->v_type = vap->va_type;
322
323 /*
324 * initialized to the parent's vnodeops.
325 * maybe overwriten for a VDIR
326 */
327 vn_setops(vp, vn_getops(SDEVTOV(ddv)));
328 vn_exists(vp);
329
330 dv->sdev_dotdot = NULL;
331 dv->sdev_attrvp = NULL;
332 if (vap) {
333 sdev_attr_alloc(dv, vap);
334 } else {
335 dv->sdev_attr = NULL;
384 rw_enter(&dv->sdev_contents, RW_WRITER);
385 if (type == VDIR) {
386 dv->sdev_nlink = 2;
387 dv->sdev_flags &= ~SDEV_PERSIST;
388 dv->sdev_flags &= ~SDEV_DYNAMIC;
389 vn_setops(vp, sdev_get_vop(dv)); /* from internal vtab */
390 ASSERT(dv->sdev_dotdot);
391 ASSERT(SDEVTOV(dv->sdev_dotdot)->v_type == VDIR);
392 vp->v_rdev = SDEVTOV(dv->sdev_dotdot)->v_rdev;
393 avl_create(&dv->sdev_entries,
394 (int (*)(const void *, const void *))sdev_compare_nodes,
395 sizeof (struct sdev_node),
396 offsetof(struct sdev_node, sdev_avllink));
397 } else if (type == VLNK) {
398 ASSERT(args);
399 dv->sdev_nlink = 1;
400 dv->sdev_symlink = i_ddi_strdup((char *)args, KM_SLEEP);
401 } else {
402 dv->sdev_nlink = 1;
403 }
404
405 if (!(SDEV_IS_GLOBAL(dv))) {
406 dv->sdev_origin = (struct sdev_node *)args;
407 dv->sdev_flags &= ~SDEV_PERSIST;
408 }
409
410 /*
411 * shadow node is created here OR
412 * if failed (indicated by dv->sdev_attrvp == NULL),
413 * created later in sdev_setattr
414 */
415 if (avp) {
416 dv->sdev_attrvp = avp;
417 } else {
418 if (dv->sdev_attr == NULL) {
419 sdev_attr_alloc(dv, vap);
420 } else {
421 sdev_attr_update(dv, vap);
422 }
423
480 } else {
481 dv->sdev_flags = SDEV_BUILD;
482 dv->sdev_flags &= ~SDEV_PERSIST;
483 bzero(&dv->sdev_prof, sizeof (dv->sdev_prof));
484 dv->sdev_ldir_gen = 0;
485 dv->sdev_devtree_gen = 0;
486 }
487
488 avl_create(&dv->sdev_entries,
489 (int (*)(const void *, const void *))sdev_compare_nodes,
490 sizeof (struct sdev_node),
491 offsetof(struct sdev_node, sdev_avllink));
492
493 rw_enter(&dv->sdev_contents, RW_WRITER);
494 sdev_set_nodestate(dv, SDEV_READY);
495 rw_exit(&dv->sdev_contents);
496 sdev_nc_node_exists(dv);
497 return (dv);
498 }
499
500 /* directory dependent vop table */
501 struct sdev_vop_table {
502 char *vt_name; /* subdirectory name */
503 const fs_operation_def_t *vt_service; /* vnodeops table */
504 struct vnodeops *vt_vops; /* constructed vop */
505 struct vnodeops **vt_global_vops; /* global container for vop */
506 int (*vt_vtor)(struct sdev_node *); /* validate sdev_node */
507 int vt_flags;
508 };
509
510 /*
511 * A nice improvement would be to provide a plug-in mechanism
512 * for this table instead of a const table.
513 */
514 static struct sdev_vop_table vtab[] =
515 {
516 { "pts", devpts_vnodeops_tbl, NULL, &devpts_vnodeops, devpts_validate,
517 SDEV_DYNAMIC | SDEV_VTOR },
518
519 { "vt", devvt_vnodeops_tbl, NULL, &devvt_vnodeops, devvt_validate,
520 SDEV_DYNAMIC | SDEV_VTOR },
521
522 { "zvol", devzvol_vnodeops_tbl, NULL, &devzvol_vnodeops,
523 devzvol_validate, SDEV_ZONED | SDEV_DYNAMIC | SDEV_VTOR | SDEV_SUBDIR },
524
525 { "zcons", NULL, NULL, NULL, NULL, SDEV_NO_NCACHE },
526
527 { "net", devnet_vnodeops_tbl, NULL, &devnet_vnodeops, devnet_validate,
528 SDEV_DYNAMIC | SDEV_VTOR },
529
530 { "ipnet", devipnet_vnodeops_tbl, NULL, &devipnet_vnodeops,
531 devipnet_validate, SDEV_DYNAMIC | SDEV_VTOR | SDEV_NO_NCACHE },
532
533 /*
534 * SDEV_DYNAMIC: prevent calling out to devfsadm, since only the
535 * lofi driver controls child nodes.
536 *
537 * SDEV_PERSIST: ensure devfsadm knows to clean up any persisted
538 * stale nodes (e.g. from devfsadm -R).
539 *
540 * In addition, devfsadm knows not to attempt a rmdir: a zone
541 * may hold a reference, which would zombify the node,
542 * preventing a mkdir.
543 */
544
545 { "lofi", NULL, NULL, NULL, NULL,
546 SDEV_ZONED | SDEV_DYNAMIC | SDEV_PERSIST },
547 { "rlofi", NULL, NULL, NULL, NULL,
548 SDEV_ZONED | SDEV_DYNAMIC | SDEV_PERSIST },
549
550 { NULL, NULL, NULL, NULL, NULL, 0}
551 };
552
553 /*
554 * We need to match off of the sdev_path, not the sdev_name. We are only allowed
555 * to exist directly under /dev.
556 */
557 struct sdev_vop_table *
558 sdev_match(struct sdev_node *dv)
559 {
560 int vlen;
561 int i;
562 const char *path;
563
564 if (strlen(dv->sdev_path) <= 5)
565 return (NULL);
566
567 if (strncmp(dv->sdev_path, "/dev/", 5) != 0)
568 return (NULL);
569 path = dv->sdev_path + 5;
570
571 for (i = 0; vtab[i].vt_name; i++) {
572 if (strcmp(vtab[i].vt_name, path) == 0)
573 return (&vtab[i]);
574 if (vtab[i].vt_flags & SDEV_SUBDIR) {
575 vlen = strlen(vtab[i].vt_name);
576 if ((strncmp(vtab[i].vt_name, path,
577 vlen - 1) == 0) && path[vlen] == '/')
578 return (&vtab[i]);
579 }
580
581 }
582 return (NULL);
583 }
584
585 /*
586 * sets a directory's vnodeops if the directory is in the vtab;
587 */
588 static struct vnodeops *
589 sdev_get_vop(struct sdev_node *dv)
590 {
591 struct sdev_vop_table *vtp;
592 char *path;
593
594 path = dv->sdev_path;
595 ASSERT(path);
596
597 /* gets the relative path to /dev/ */
598 path += 5;
599
600 /* gets the vtab entry it matches */
601 if ((vtp = sdev_match(dv)) != NULL) {
602 dv->sdev_flags |= vtp->vt_flags;
603 if (SDEV_IS_PERSIST(dv->sdev_dotdot) &&
604 (SDEV_IS_PERSIST(dv) || !SDEV_IS_DYNAMIC(dv)))
605 dv->sdev_flags |= SDEV_PERSIST;
606
607 if (vtp->vt_vops) {
608 if (vtp->vt_global_vops)
609 *(vtp->vt_global_vops) = vtp->vt_vops;
610
611 return (vtp->vt_vops);
612 }
613
614 if (vtp->vt_service) {
615 fs_operation_def_t *templ;
616 templ = sdev_merge_vtab(vtp->vt_service);
617 if (vn_make_ops(vtp->vt_name,
618 (const fs_operation_def_t *)templ,
619 &vtp->vt_vops) != 0) {
620 cmn_err(CE_PANIC, "%s: malformed vnode ops\n",
621 vtp->vt_name);
622 /*NOTREACHED*/
623 }
624 if (vtp->vt_global_vops) {
625 *(vtp->vt_global_vops) = vtp->vt_vops;
626 }
627 sdev_free_vtab(templ);
628
629 return (vtp->vt_vops);
630 }
631
632 return (sdev_vnodeops);
633 }
634
635 /* child inherits the persistence of the parent */
636 if (SDEV_IS_PERSIST(dv->sdev_dotdot))
637 dv->sdev_flags |= SDEV_PERSIST;
638
639 return (sdev_vnodeops);
640 }
641
642 static void
643 sdev_set_no_negcache(struct sdev_node *dv)
644 {
645 int i;
646 char *path;
647
648 ASSERT(dv->sdev_path);
649 path = dv->sdev_path + strlen("/dev/");
650
651 for (i = 0; vtab[i].vt_name; i++) {
652 if (strcmp(vtab[i].vt_name, path) == 0) {
653 if (vtab[i].vt_flags & SDEV_NO_NCACHE)
654 dv->sdev_flags |= SDEV_NO_NCACHE;
655 break;
656 }
657 }
658 }
659
660 void *
661 sdev_get_vtor(struct sdev_node *dv)
662 {
663 struct sdev_vop_table *vtp;
664
665 vtp = sdev_match(dv);
666 if (vtp)
667 return ((void *)vtp->vt_vtor);
668 else
669 return (NULL);
670 }
671
672 /*
673 * Build the base root inode
674 */
675 ino_t
676 sdev_mkino(struct sdev_node *dv)
677 {
678 ino_t ino;
679
680 /*
681 * for now, follow the lead of tmpfs here
682 * need to someday understand the requirements here
683 */
684 ino = (ino_t)(uint32_t)((uintptr_t)dv >> 3);
685 ino += SDEV_ROOTINO + 1;
686
687 return (ino);
688 }
689
690 int
691 sdev_getlink(struct vnode *linkvp, char **link)
692 {
930 if (dv->sdev_attr != NULL) {
931 kmem_free(dv->sdev_attr, sizeof (struct vattr));
932 dv->sdev_attr = NULL;
933 }
934
935 if (dv->sdev_name != NULL) {
936 kmem_free(dv->sdev_name, dv->sdev_namelen + 1);
937 dv->sdev_name = NULL;
938 }
939
940 if (dv->sdev_symlink != NULL) {
941 kmem_free(dv->sdev_symlink, strlen(dv->sdev_symlink) + 1);
942 dv->sdev_symlink = NULL;
943 }
944
945 if (dv->sdev_path) {
946 kmem_free(dv->sdev_path, strlen(dv->sdev_path) + 1);
947 dv->sdev_path = NULL;
948 }
949
950 if (!SDEV_IS_GLOBAL(dv))
951 sdev_prof_free(dv);
952
953 if (SDEVTOV(dv)->v_type == VDIR) {
954 ASSERT(SDEV_FIRST_ENTRY(dv) == NULL);
955 avl_destroy(&dv->sdev_entries);
956 }
957
958 mutex_destroy(&dv->sdev_lookup_lock);
959 cv_destroy(&dv->sdev_lookup_cv);
960
961 /* return node to initial state as per constructor */
962 (void) memset((void *)&dv->sdev_instance_data, 0,
963 sizeof (dv->sdev_instance_data));
964 vn_invalid(SDEVTOV(dv));
965 dv->sdev_private = NULL;
966 kmem_cache_free(sdev_node_cache, dv);
967 }
968
969 /*
970 * DIRECTORY CACHE lookup
971 */
2928
2929 kmem_free(pathlist, (npaths_alloc + 1) * sizeof (char *));
2930 }
2931
2932 int
2933 sdev_modctl_devexists(const char *path)
2934 {
2935 vnode_t *vp;
2936 int error;
2937
2938 error = sdev_modctl_lookup(path, &vp);
2939 sdcmn_err11(("modctl dev exists: %s by %s: %s\n",
2940 path, curproc->p_user.u_comm,
2941 (error == 0) ? "ok" : "failed"));
2942 if (error == 0)
2943 VN_RELE(vp);
2944
2945 return (error);
2946 }
2947
2948 extern int sdev_vnodeops_tbl_size;
2949
2950 /*
2951 * construct a new template with overrides from vtab
2952 */
2953 static fs_operation_def_t *
2954 sdev_merge_vtab(const fs_operation_def_t tab[])
2955 {
2956 fs_operation_def_t *new;
2957 const fs_operation_def_t *tab_entry;
2958
2959 /* make a copy of standard vnode ops table */
2960 new = kmem_alloc(sdev_vnodeops_tbl_size, KM_SLEEP);
2961 bcopy((void *)sdev_vnodeops_tbl, new, sdev_vnodeops_tbl_size);
2962
2963 /* replace the overrides from tab */
2964 for (tab_entry = tab; tab_entry->name != NULL; tab_entry++) {
2965 fs_operation_def_t *std_entry = new;
2966 while (std_entry->name) {
2967 if (strcmp(tab_entry->name, std_entry->name) == 0) {
2968 std_entry->func = tab_entry->func;
2969 break;
2970 }
2971 std_entry++;
2972 }
2973 if (std_entry->name == NULL)
2974 cmn_err(CE_NOTE, "sdev_merge_vtab: entry %s unused.",
2975 tab_entry->name);
2976 }
2977
2978 return (new);
2979 }
2980
2981 /* free memory allocated by sdev_merge_vtab */
2982 static void
2983 sdev_free_vtab(fs_operation_def_t *new)
2984 {
2985 kmem_free(new, sdev_vnodeops_tbl_size);
2986 }
2987
2988 /*
2989 * a generic setattr() function
2990 *
2991 * note: flags only supports AT_UID and AT_GID.
2992 * Future enhancements can be done for other types, e.g. AT_MODE
2993 */
2994 int
2995 devname_setattr_func(struct vnode *vp, struct vattr *vap, int flags,
2996 struct cred *cred, int (*callback)(struct sdev_node *, struct vattr *,
2997 int), int protocol)
2998 {
2999 struct sdev_node *dv = VTOSDEV(vp);
3000 struct sdev_node *parent = dv->sdev_dotdot;
3001 struct vattr *get;
3002 uint_t mask = vap->va_mask;
3003 int error;
3004
3005 /* some sanity checks */
3006 if (vap->va_mask & AT_NOSET)
3007 return (EINVAL);
|