1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23 *
24 * Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T.
25 * All rights reserved.
26 */
27
28 #include <sys/param.h>
29 #include <sys/types.h>
30 #include <sys/systm.h>
31 #include <sys/thread.h>
32 #include <sys/t_lock.h>
33 #include <sys/time.h>
34 #include <sys/vnode.h>
35 #include <sys/vfs.h>
36 #include <sys/errno.h>
37 #include <sys/buf.h>
38 #include <sys/stat.h>
39 #include <sys/cred.h>
40 #include <sys/kmem.h>
41 #include <sys/debug.h>
42 #include <sys/dnlc.h>
43 #include <sys/vmsystm.h>
44 #include <sys/flock.h>
45 #include <sys/share.h>
46 #include <sys/cmn_err.h>
47 #include <sys/tiuser.h>
48 #include <sys/sysmacros.h>
49 #include <sys/callb.h>
50 #include <sys/acl.h>
51 #include <sys/kstat.h>
52 #include <sys/signal.h>
53 #include <sys/list.h>
54 #include <sys/zone.h>
55
56 #include <rpc/types.h>
57 #include <rpc/xdr.h>
58 #include <rpc/auth.h>
59 #include <rpc/clnt.h>
60
61 #include <nfs/nfs.h>
62 #include <nfs/nfs_clnt.h>
63
64 #include <nfs/rnode.h>
65 #include <nfs/nfs_acl.h>
66 #include <nfs/lm.h>
67
68 #include <vm/hat.h>
69 #include <vm/as.h>
70 #include <vm/page.h>
71 #include <vm/pvn.h>
72 #include <vm/seg.h>
73 #include <vm/seg_map.h>
74 #include <vm/seg_vn.h>
75
76 static void nfs3_attr_cache(vnode_t *, vattr_t *, vattr_t *, hrtime_t,
77 cred_t *);
78 static int nfs_getattr_cache(vnode_t *, struct vattr *);
79 static int nfs_remove_locking_id(vnode_t *, int, char *, char *, int *);
80
81 struct mi_globals {
82 kmutex_t mig_lock; /* lock protecting mig_list */
1164 case NF3SOCK:
1165 case NF3FIFO:
1166 default:
1167 vap->va_rdev = 0;
1168 vap->va_blksize = MAXBSIZE;
1169 vap->va_nblocks = 0;
1170 break;
1171 }
1172 vap->va_seq = 0;
1173 return (0);
1174 }
1175
1176 /*
1177 * Asynchronous I/O parameters. nfs_async_threads is the high-water mark
1178 * for the demand-based allocation of async threads per-mount. The
1179 * nfs_async_timeout is the amount of time a thread will live after it
1180 * becomes idle, unless new I/O requests are received before the thread
1181 * dies. See nfs_async_putpage and nfs_async_start.
1182 */
1183
1184 int nfs_async_timeout = -1; /* uninitialized */
1185
1186 static void nfs_async_start(struct vfs *);
1187 static void nfs_async_pgops_start(struct vfs *);
1188 static void nfs_async_common_start(struct vfs *, int);
1189
1190 static void
1191 free_async_args(struct nfs_async_reqs *args)
1192 {
1193 rnode_t *rp;
1194
1195 if (args->a_io != NFS_INACTIVE) {
1196 rp = VTOR(args->a_vp);
1197 mutex_enter(&rp->r_statelock);
1198 rp->r_count--;
1199 if (args->a_io == NFS_PUTAPAGE ||
1200 args->a_io == NFS_PAGEIO)
1201 rp->r_awcount--;
1202 cv_broadcast(&rp->r_cv);
1203 mutex_exit(&rp->r_statelock);
1204 VN_RELE(args->a_vp);
1337 /*
1338 * Signal (and wait for) the async manager thread to clean up and go away.
1339 */
1340 void
1341 nfs_async_manager_stop(vfs_t *vfsp)
1342 {
1343 mntinfo_t *mi = VFTOMI(vfsp);
1344
1345 mutex_enter(&mi->mi_async_lock);
1346 mutex_enter(&mi->mi_lock);
1347 mi->mi_flags |= MI_ASYNC_MGR_STOP;
1348 mutex_exit(&mi->mi_lock);
1349 cv_broadcast(&mi->mi_async_reqs_cv);
1350 while (mi->mi_manager_thread != NULL)
1351 cv_wait(&mi->mi_async_cv, &mi->mi_async_lock);
1352 mutex_exit(&mi->mi_async_lock);
1353 }
1354
1355 int
1356 nfs_async_readahead(vnode_t *vp, u_offset_t blkoff, caddr_t addr,
1357 struct seg *seg, cred_t *cr, void (*readahead)(vnode_t *,
1358 u_offset_t, caddr_t, struct seg *, cred_t *))
1359 {
1360 rnode_t *rp;
1361 mntinfo_t *mi;
1362 struct nfs_async_reqs *args;
1363
1364 rp = VTOR(vp);
1365 ASSERT(rp->r_freef == NULL);
1366
1367 mi = VTOMI(vp);
1368
1369 /*
1370 * If addr falls in a different segment, don't bother doing readahead.
1371 */
1372 if (addr >= seg->s_base + seg->s_size)
1373 return (-1);
1374
1375 /*
1376 * If we can't allocate a request structure, punt on the readahead.
1377 */
1378 if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL)
1437
1438 mi->mi_async_req_count++;
1439 ASSERT(mi->mi_async_req_count != 0);
1440 cv_signal(&mi->mi_async_reqs_cv);
1441 mutex_exit(&mi->mi_async_lock);
1442 return (0);
1443
1444 noasync:
1445 mutex_enter(&rp->r_statelock);
1446 rp->r_count--;
1447 cv_broadcast(&rp->r_cv);
1448 mutex_exit(&rp->r_statelock);
1449 VN_RELE(vp);
1450 crfree(cr);
1451 kmem_free(args, sizeof (*args));
1452 return (-1);
1453 }
1454
1455 int
1456 nfs_async_putapage(vnode_t *vp, page_t *pp, u_offset_t off, size_t len,
1457 int flags, cred_t *cr, int (*putapage)(vnode_t *, page_t *,
1458 u_offset_t, size_t, int, cred_t *))
1459 {
1460 rnode_t *rp;
1461 mntinfo_t *mi;
1462 struct nfs_async_reqs *args;
1463
1464 ASSERT(flags & B_ASYNC);
1465 ASSERT(vp->v_vfsp != NULL);
1466
1467 rp = VTOR(vp);
1468 ASSERT(rp->r_count > 0);
1469
1470 mi = VTOMI(vp);
1471
1472 /*
1473 * If we can't allocate a request structure, do the putpage
1474 * operation synchronously in this thread's context.
1475 */
1476 if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL)
1477 goto noasync;
1478
1789 mutex_enter(&rp->r_statelock);
1790 ASSERT(rdc->flags & RDDIR);
1791 rdc->flags &= ~RDDIR;
1792 rdc->flags |= RDDIRREQ;
1793 /*
1794 * Check the flag to see if RDDIRWAIT is set. If RDDIRWAIT
1795 * is set, wakeup the thread sleeping in cv_wait_sig().
1796 * The woken up thread will reset the flag to RDDIR and will
1797 * continue with the readdir opeartion.
1798 */
1799 if (rdc->flags & RDDIRWAIT) {
1800 rdc->flags &= ~RDDIRWAIT;
1801 cv_broadcast(&rdc->cv);
1802 }
1803 mutex_exit(&rp->r_statelock);
1804 rddir_cache_rele(rdc);
1805 }
1806
1807 void
1808 nfs_async_commit(vnode_t *vp, page_t *plist, offset3 offset, count3 count,
1809 cred_t *cr, void (*commit)(vnode_t *, page_t *, offset3, count3,
1810 cred_t *))
1811 {
1812 rnode_t *rp;
1813 mntinfo_t *mi;
1814 struct nfs_async_reqs *args;
1815 page_t *pp;
1816
1817 rp = VTOR(vp);
1818 mi = VTOMI(vp);
1819
1820 /*
1821 * If we can't allocate a request structure, do the commit
1822 * operation synchronously in this thread's context.
1823 */
1824 if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL)
1825 goto noasync;
1826
1827 args->a_next = NULL;
1828 #ifdef DEBUG
1829 args->a_queuer = curthread;
1830 #endif
2527 rp = VTOR(vp);
2528 mutex_enter(&rp->r_statelock);
2529 while (rp->r_flags & RTRUNCATE)
2530 cv_wait(&rp->r_cv, &rp->r_statelock);
2531 rp->r_flags |= RTRUNCATE;
2532 if (off == (u_offset_t)0) {
2533 rp->r_flags &= ~RDIRTY;
2534 if (!(rp->r_flags & RSTALE))
2535 rp->r_error = 0;
2536 }
2537 rp->r_truncaddr = off;
2538 mutex_exit(&rp->r_statelock);
2539 (void) pvn_vplist_dirty(vp, off, rp->r_putapage,
2540 B_INVAL | B_TRUNC, cr);
2541 mutex_enter(&rp->r_statelock);
2542 rp->r_flags &= ~RTRUNCATE;
2543 cv_broadcast(&rp->r_cv);
2544 mutex_exit(&rp->r_statelock);
2545 }
2546
2547 static int nfs_write_error_to_cons_only = 0;
2548 #define MSG(x) (nfs_write_error_to_cons_only ? (x) : (x) + 1)
2549
2550 /*
2551 * Print a file handle
2552 */
2553 void
2554 nfs_printfhandle(nfs_fhandle *fhp)
2555 {
2556 int *ip;
2557 char *buf;
2558 size_t bufsize;
2559 char *cp;
2560
2561 /*
2562 * 13 == "(file handle:"
2563 * maximum of NFS_FHANDLE / sizeof (*ip) elements in fh_buf times
2564 * 1 == ' '
2565 * 8 == maximum strlen of "%x"
2566 * 3 == ")\n\0"
2567 */
2577 for (ip = (int *)fhp->fh_buf;
2578 ip < (int *)&fhp->fh_buf[fhp->fh_len];
2579 ip++) {
2580 (void) sprintf(cp, " %x", *ip);
2581 while (*cp != '\0')
2582 cp++;
2583 }
2584 (void) strcpy(cp, ")\n");
2585
2586 zcmn_err(getzoneid(), CE_CONT, MSG("^%s"), buf);
2587
2588 kmem_free(buf, bufsize);
2589 }
2590
2591 /*
2592 * Notify the system administrator that an NFS write error has
2593 * occurred.
2594 */
2595
2596 /* seconds between ENOSPC/EDQUOT messages */
2597 clock_t nfs_write_error_interval = 5;
2598
2599 void
2600 nfs_write_error(vnode_t *vp, int error, cred_t *cr)
2601 {
2602 mntinfo_t *mi;
2603 clock_t now;
2604
2605 mi = VTOMI(vp);
2606 /*
2607 * In case of forced unmount or zone shutdown, do not print any
2608 * messages since it can flood the console with error messages.
2609 */
2610 if (FS_OR_ZONE_GONE(mi->mi_vfsp))
2611 return;
2612
2613 /*
2614 * No use in flooding the console with ENOSPC
2615 * messages from the same file system.
2616 */
2617 now = ddi_get_lbolt();
2811 ASSERT(nfs_clntup == B_FALSE);
2812 #endif
2813
2814 error = nfs_subrinit();
2815 if (error)
2816 return (error);
2817
2818 error = nfs_vfsinit();
2819 if (error) {
2820 /*
2821 * Cleanup nfs_subrinit() work
2822 */
2823 nfs_subrfini();
2824 return (error);
2825 }
2826 zone_key_create(&mi_list_key, nfs_mi_init, nfs_mi_shutdown,
2827 nfs_mi_destroy);
2828
2829 nfs4_clnt_init();
2830
2831 #ifdef DEBUG
2832 nfs_clntup = B_TRUE;
2833 #endif
2834
2835 return (0);
2836 }
2837
2838 /*
2839 * This routine is only called if the NFS Client has been initialized but
2840 * the module failed to be installed. This routine will cleanup the previously
2841 * allocated/initialized work.
2842 */
2843 void
2844 nfs_clntfini(void)
2845 {
2846 (void) zone_key_delete(mi_list_key);
2847 nfs_subrfini();
2848 nfs_vfsfini();
2849 nfs4_clnt_fini();
2850 }
2851
2852 /*
2853 * nfs_lockrelease:
2854 *
2855 * Release any locks on the given vnode that are held by the current
2856 * process.
2857 */
2858 void
2859 nfs_lockrelease(vnode_t *vp, int flag, offset_t offset, cred_t *cr)
2860 {
2861 flock64_t ld;
2862 struct shrlock shr;
2863 char *buf;
2864 int remote_lock_possible;
2865 int ret;
2866
2867 ASSERT((uintptr_t)vp > KERNELBASE);
2868
2869 /*
|
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T.
28 * All rights reserved.
29 */
30
31 /*
32 * Copyright 2018 Nexenta Systems, Inc.
33 */
34
35 #include <sys/param.h>
36 #include <sys/types.h>
37 #include <sys/systm.h>
38 #include <sys/thread.h>
39 #include <sys/t_lock.h>
40 #include <sys/time.h>
41 #include <sys/vnode.h>
42 #include <sys/vfs.h>
43 #include <sys/errno.h>
44 #include <sys/buf.h>
45 #include <sys/stat.h>
46 #include <sys/cred.h>
47 #include <sys/kmem.h>
48 #include <sys/debug.h>
49 #include <sys/dnlc.h>
50 #include <sys/vmsystm.h>
51 #include <sys/flock.h>
52 #include <sys/share.h>
53 #include <sys/cmn_err.h>
54 #include <sys/tiuser.h>
55 #include <sys/sysmacros.h>
56 #include <sys/callb.h>
57 #include <sys/acl.h>
58 #include <sys/kstat.h>
59 #include <sys/signal.h>
60 #include <sys/list.h>
61 #include <sys/zone.h>
62
63 #include <rpc/types.h>
64 #include <rpc/xdr.h>
65 #include <rpc/auth.h>
66 #include <rpc/clnt.h>
67
68 #include <nfs/nfs.h>
69 #include <nfs/nfs_clnt.h>
70 #include <nfs/nfs_cmd.h>
71
72 #include <nfs/rnode.h>
73 #include <nfs/nfs_acl.h>
74 #include <nfs/lm.h>
75
76 #include <vm/hat.h>
77 #include <vm/as.h>
78 #include <vm/page.h>
79 #include <vm/pvn.h>
80 #include <vm/seg.h>
81 #include <vm/seg_map.h>
82 #include <vm/seg_vn.h>
83
84 static void nfs3_attr_cache(vnode_t *, vattr_t *, vattr_t *, hrtime_t,
85 cred_t *);
86 static int nfs_getattr_cache(vnode_t *, struct vattr *);
87 static int nfs_remove_locking_id(vnode_t *, int, char *, char *, int *);
88
89 struct mi_globals {
90 kmutex_t mig_lock; /* lock protecting mig_list */
1172 case NF3SOCK:
1173 case NF3FIFO:
1174 default:
1175 vap->va_rdev = 0;
1176 vap->va_blksize = MAXBSIZE;
1177 vap->va_nblocks = 0;
1178 break;
1179 }
1180 vap->va_seq = 0;
1181 return (0);
1182 }
1183
1184 /*
1185 * Asynchronous I/O parameters. nfs_async_threads is the high-water mark
1186 * for the demand-based allocation of async threads per-mount. The
1187 * nfs_async_timeout is the amount of time a thread will live after it
1188 * becomes idle, unless new I/O requests are received before the thread
1189 * dies. See nfs_async_putpage and nfs_async_start.
1190 */
1191
1192 volatile int nfs_async_timeout = -1; /* uninitialized */
1193
1194 static void nfs_async_start(struct vfs *);
1195 static void nfs_async_pgops_start(struct vfs *);
1196 static void nfs_async_common_start(struct vfs *, int);
1197
1198 static void
1199 free_async_args(struct nfs_async_reqs *args)
1200 {
1201 rnode_t *rp;
1202
1203 if (args->a_io != NFS_INACTIVE) {
1204 rp = VTOR(args->a_vp);
1205 mutex_enter(&rp->r_statelock);
1206 rp->r_count--;
1207 if (args->a_io == NFS_PUTAPAGE ||
1208 args->a_io == NFS_PAGEIO)
1209 rp->r_awcount--;
1210 cv_broadcast(&rp->r_cv);
1211 mutex_exit(&rp->r_statelock);
1212 VN_RELE(args->a_vp);
1345 /*
1346 * Signal (and wait for) the async manager thread to clean up and go away.
1347 */
1348 void
1349 nfs_async_manager_stop(vfs_t *vfsp)
1350 {
1351 mntinfo_t *mi = VFTOMI(vfsp);
1352
1353 mutex_enter(&mi->mi_async_lock);
1354 mutex_enter(&mi->mi_lock);
1355 mi->mi_flags |= MI_ASYNC_MGR_STOP;
1356 mutex_exit(&mi->mi_lock);
1357 cv_broadcast(&mi->mi_async_reqs_cv);
1358 while (mi->mi_manager_thread != NULL)
1359 cv_wait(&mi->mi_async_cv, &mi->mi_async_lock);
1360 mutex_exit(&mi->mi_async_lock);
1361 }
1362
1363 int
1364 nfs_async_readahead(vnode_t *vp, u_offset_t blkoff, caddr_t addr,
1365 struct seg *seg, cred_t *cr, void (*readahead)(vnode_t *, u_offset_t,
1366 caddr_t, struct seg *, cred_t *))
1367 {
1368 rnode_t *rp;
1369 mntinfo_t *mi;
1370 struct nfs_async_reqs *args;
1371
1372 rp = VTOR(vp);
1373 ASSERT(rp->r_freef == NULL);
1374
1375 mi = VTOMI(vp);
1376
1377 /*
1378 * If addr falls in a different segment, don't bother doing readahead.
1379 */
1380 if (addr >= seg->s_base + seg->s_size)
1381 return (-1);
1382
1383 /*
1384 * If we can't allocate a request structure, punt on the readahead.
1385 */
1386 if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL)
1445
1446 mi->mi_async_req_count++;
1447 ASSERT(mi->mi_async_req_count != 0);
1448 cv_signal(&mi->mi_async_reqs_cv);
1449 mutex_exit(&mi->mi_async_lock);
1450 return (0);
1451
1452 noasync:
1453 mutex_enter(&rp->r_statelock);
1454 rp->r_count--;
1455 cv_broadcast(&rp->r_cv);
1456 mutex_exit(&rp->r_statelock);
1457 VN_RELE(vp);
1458 crfree(cr);
1459 kmem_free(args, sizeof (*args));
1460 return (-1);
1461 }
1462
1463 int
1464 nfs_async_putapage(vnode_t *vp, page_t *pp, u_offset_t off, size_t len,
1465 int flags, cred_t *cr, int (*putapage)(vnode_t *, page_t *, u_offset_t,
1466 size_t, int, cred_t *))
1467 {
1468 rnode_t *rp;
1469 mntinfo_t *mi;
1470 struct nfs_async_reqs *args;
1471
1472 ASSERT(flags & B_ASYNC);
1473 ASSERT(vp->v_vfsp != NULL);
1474
1475 rp = VTOR(vp);
1476 ASSERT(rp->r_count > 0);
1477
1478 mi = VTOMI(vp);
1479
1480 /*
1481 * If we can't allocate a request structure, do the putpage
1482 * operation synchronously in this thread's context.
1483 */
1484 if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL)
1485 goto noasync;
1486
1797 mutex_enter(&rp->r_statelock);
1798 ASSERT(rdc->flags & RDDIR);
1799 rdc->flags &= ~RDDIR;
1800 rdc->flags |= RDDIRREQ;
1801 /*
1802 * Check the flag to see if RDDIRWAIT is set. If RDDIRWAIT
1803 * is set, wakeup the thread sleeping in cv_wait_sig().
1804 * The woken up thread will reset the flag to RDDIR and will
1805 * continue with the readdir opeartion.
1806 */
1807 if (rdc->flags & RDDIRWAIT) {
1808 rdc->flags &= ~RDDIRWAIT;
1809 cv_broadcast(&rdc->cv);
1810 }
1811 mutex_exit(&rp->r_statelock);
1812 rddir_cache_rele(rdc);
1813 }
1814
1815 void
1816 nfs_async_commit(vnode_t *vp, page_t *plist, offset3 offset, count3 count,
1817 cred_t *cr, void (*commit)(vnode_t *, page_t *, offset3, count3, cred_t *))
1818 {
1819 rnode_t *rp;
1820 mntinfo_t *mi;
1821 struct nfs_async_reqs *args;
1822 page_t *pp;
1823
1824 rp = VTOR(vp);
1825 mi = VTOMI(vp);
1826
1827 /*
1828 * If we can't allocate a request structure, do the commit
1829 * operation synchronously in this thread's context.
1830 */
1831 if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL)
1832 goto noasync;
1833
1834 args->a_next = NULL;
1835 #ifdef DEBUG
1836 args->a_queuer = curthread;
1837 #endif
2534 rp = VTOR(vp);
2535 mutex_enter(&rp->r_statelock);
2536 while (rp->r_flags & RTRUNCATE)
2537 cv_wait(&rp->r_cv, &rp->r_statelock);
2538 rp->r_flags |= RTRUNCATE;
2539 if (off == (u_offset_t)0) {
2540 rp->r_flags &= ~RDIRTY;
2541 if (!(rp->r_flags & RSTALE))
2542 rp->r_error = 0;
2543 }
2544 rp->r_truncaddr = off;
2545 mutex_exit(&rp->r_statelock);
2546 (void) pvn_vplist_dirty(vp, off, rp->r_putapage,
2547 B_INVAL | B_TRUNC, cr);
2548 mutex_enter(&rp->r_statelock);
2549 rp->r_flags &= ~RTRUNCATE;
2550 cv_broadcast(&rp->r_cv);
2551 mutex_exit(&rp->r_statelock);
2552 }
2553
2554 volatile int nfs_write_error_to_cons_only = 0;
2555 #define MSG(x) (nfs_write_error_to_cons_only ? (x) : (x) + 1)
2556
2557 /*
2558 * Print a file handle
2559 */
2560 void
2561 nfs_printfhandle(nfs_fhandle *fhp)
2562 {
2563 int *ip;
2564 char *buf;
2565 size_t bufsize;
2566 char *cp;
2567
2568 /*
2569 * 13 == "(file handle:"
2570 * maximum of NFS_FHANDLE / sizeof (*ip) elements in fh_buf times
2571 * 1 == ' '
2572 * 8 == maximum strlen of "%x"
2573 * 3 == ")\n\0"
2574 */
2584 for (ip = (int *)fhp->fh_buf;
2585 ip < (int *)&fhp->fh_buf[fhp->fh_len];
2586 ip++) {
2587 (void) sprintf(cp, " %x", *ip);
2588 while (*cp != '\0')
2589 cp++;
2590 }
2591 (void) strcpy(cp, ")\n");
2592
2593 zcmn_err(getzoneid(), CE_CONT, MSG("^%s"), buf);
2594
2595 kmem_free(buf, bufsize);
2596 }
2597
2598 /*
2599 * Notify the system administrator that an NFS write error has
2600 * occurred.
2601 */
2602
2603 /* seconds between ENOSPC/EDQUOT messages */
2604 volatile clock_t nfs_write_error_interval = 5;
2605
2606 void
2607 nfs_write_error(vnode_t *vp, int error, cred_t *cr)
2608 {
2609 mntinfo_t *mi;
2610 clock_t now;
2611
2612 mi = VTOMI(vp);
2613 /*
2614 * In case of forced unmount or zone shutdown, do not print any
2615 * messages since it can flood the console with error messages.
2616 */
2617 if (FS_OR_ZONE_GONE(mi->mi_vfsp))
2618 return;
2619
2620 /*
2621 * No use in flooding the console with ENOSPC
2622 * messages from the same file system.
2623 */
2624 now = ddi_get_lbolt();
2818 ASSERT(nfs_clntup == B_FALSE);
2819 #endif
2820
2821 error = nfs_subrinit();
2822 if (error)
2823 return (error);
2824
2825 error = nfs_vfsinit();
2826 if (error) {
2827 /*
2828 * Cleanup nfs_subrinit() work
2829 */
2830 nfs_subrfini();
2831 return (error);
2832 }
2833 zone_key_create(&mi_list_key, nfs_mi_init, nfs_mi_shutdown,
2834 nfs_mi_destroy);
2835
2836 nfs4_clnt_init();
2837
2838 nfscmd_init();
2839
2840 #ifdef DEBUG
2841 nfs_clntup = B_TRUE;
2842 #endif
2843
2844 return (0);
2845 }
2846
2847 /*
2848 * This routine is only called if the NFS Client has been initialized but
2849 * the module failed to be installed. This routine will cleanup the previously
2850 * allocated/initialized work.
2851 */
2852 void
2853 nfs_clntfini(void)
2854 {
2855 (void) zone_key_delete(mi_list_key);
2856 nfs_subrfini();
2857 nfs_vfsfini();
2858 nfs4_clnt_fini();
2859 nfscmd_fini();
2860 }
2861
2862 /*
2863 * nfs_lockrelease:
2864 *
2865 * Release any locks on the given vnode that are held by the current
2866 * process.
2867 */
2868 void
2869 nfs_lockrelease(vnode_t *vp, int flag, offset_t offset, cred_t *cr)
2870 {
2871 flock64_t ld;
2872 struct shrlock shr;
2873 char *buf;
2874 int remote_lock_possible;
2875 int ret;
2876
2877 ASSERT((uintptr_t)vp > KERNELBASE);
2878
2879 /*
|