1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2017 by Delphix. All rights reserved.
24 */
25
26 /*
27 * Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T.
28 * All Rights Reserved
29 */
30
31 #include <sys/param.h>
32 #include <sys/types.h>
33 #include <sys/systm.h>
34 #include <sys/thread.h>
35 #include <sys/t_lock.h>
36 #include <sys/time.h>
37 #include <sys/vnode.h>
38 #include <sys/vfs.h>
39 #include <sys/errno.h>
40 #include <sys/buf.h>
41 #include <sys/stat.h>
42 #include <sys/cred.h>
43 #include <sys/kmem.h>
44 #include <sys/debug.h>
45 #include <sys/dnlc.h>
46 #include <sys/vmsystm.h>
47 #include <sys/flock.h>
48 #include <sys/share.h>
49 #include <sys/cmn_err.h>
50 #include <sys/tiuser.h>
51 #include <sys/sysmacros.h>
52 #include <sys/callb.h>
53 #include <sys/acl.h>
54 #include <sys/kstat.h>
55 #include <sys/signal.h>
56 #include <sys/disp.h>
57 #include <sys/atomic.h>
58 #include <sys/list.h>
59 #include <sys/sdt.h>
60
61 #include <rpc/types.h>
62 #include <rpc/xdr.h>
63 #include <rpc/auth.h>
64 #include <rpc/clnt.h>
65
66 #include <nfs/nfs.h>
67 #include <nfs/nfs_clnt.h>
68 #include <nfs/nfs_acl.h>
69
70 #include <nfs/nfs4.h>
71 #include <nfs/rnode4.h>
72 #include <nfs/nfs4_clnt.h>
73
74 #include <vm/hat.h>
75 #include <vm/as.h>
76 #include <vm/page.h>
77 #include <vm/pvn.h>
78 #include <vm/seg.h>
79 #include <vm/seg_map.h>
80 #include <vm/seg_vn.h>
81
82 #include <sys/ddi.h>
83
84 /*
85 * Arguments to page-flush thread.
86 */
87 typedef struct {
88 vnode_t *vp;
89 cred_t *cr;
90 } pgflush_t;
91
92 #ifdef DEBUG
93 int nfs4_client_lease_debug;
94 int nfs4_sharedfh_debug;
95 int nfs4_fname_debug;
96
97 /* temporary: panic if v_type is inconsistent with r_attr va_type */
98 int nfs4_vtype_debug;
99
100 uint_t nfs4_tsd_key;
101 #endif
102
103 static time_t nfs4_client_resumed = 0;
104 static callb_id_t cid = 0;
105
106 static int nfs4renew(nfs4_server_t *);
107 static void nfs4_attrcache_va(vnode_t *, nfs4_ga_res_t *, int);
108 static void nfs4_pgflush_thread(pgflush_t *);
109
110 static boolean_t nfs4_client_cpr_callb(void *, int);
111
112 struct mi4_globals {
113 kmutex_t mig_lock; /* lock protecting mig_list */
114 list_t mig_list; /* list of NFS v4 mounts in zone */
115 boolean_t mig_destructor_called;
116 };
117
118 static zone_key_t mi4_list_key;
119
120 /*
121 * Attributes caching:
122 *
123 * Attributes are cached in the rnode in struct vattr form.
124 * There is a time associated with the cached attributes (r_time_attr_inval)
125 * which tells whether the attributes are valid. The time is initialized
126 * to the difference between current time and the modify time of the vnode
127 * when new attributes are cached. This allows the attributes for
128 * files that have changed recently to be timed out sooner than for files
129 * that have not changed for a long time. There are minimum and maximum
130 * timeout values that can be set per mount point.
131 */
132
133 /*
134 * If a cache purge is in progress, wait for it to finish.
135 *
136 * The current thread must not be in the middle of an
137 * nfs4_start_op/nfs4_end_op region. Otherwise, there could be a deadlock
138 * between this thread, a recovery thread, and the page flush thread.
139 */
140 int
141 nfs4_waitfor_purge_complete(vnode_t *vp)
142 {
143 rnode4_t *rp;
144 k_sigset_t smask;
145
146 rp = VTOR4(vp);
147 if ((rp->r_serial != NULL && rp->r_serial != curthread) ||
148 ((rp->r_flags & R4PGFLUSH) && rp->r_pgflush != curthread)) {
149 mutex_enter(&rp->r_statelock);
150 sigintr(&smask, VTOMI4(vp)->mi_flags & MI4_INT);
151 while ((rp->r_serial != NULL && rp->r_serial != curthread) ||
152 ((rp->r_flags & R4PGFLUSH) &&
153 rp->r_pgflush != curthread)) {
154 if (!cv_wait_sig(&rp->r_cv, &rp->r_statelock)) {
155 sigunintr(&smask);
156 mutex_exit(&rp->r_statelock);
157 return (EINTR);
158 }
159 }
160 sigunintr(&smask);
161 mutex_exit(&rp->r_statelock);
162 }
163 return (0);
164 }
165
166 /*
167 * Validate caches by checking cached attributes. If they have timed out,
168 * then get new attributes from the server. As a side effect, cache
169 * invalidation is done if the attributes have changed.
170 *
171 * If the attributes have not timed out and if there is a cache
172 * invalidation being done by some other thread, then wait until that
173 * thread has completed the cache invalidation.
174 */
175 int
176 nfs4_validate_caches(vnode_t *vp, cred_t *cr)
177 {
178 int error;
179 nfs4_ga_res_t gar;
180
181 if (ATTRCACHE4_VALID(vp)) {
182 error = nfs4_waitfor_purge_complete(vp);
183 if (error)
184 return (error);
185 return (0);
186 }
187
188 return (nfs4_getattr_otw(vp, &gar, cr, 0));
189 }
190
191 /*
192 * Fill in attribute from the cache.
193 * If valid, then return 0 to indicate that no error occurred,
194 * otherwise return 1 to indicate that an error occurred.
195 */
196 static int
197 nfs4_getattr_cache(vnode_t *vp, struct vattr *vap)
198 {
199 rnode4_t *rp;
200
201 rp = VTOR4(vp);
202 mutex_enter(&rp->r_statelock);
203 mutex_enter(&rp->r_statev4_lock);
204 if (ATTRCACHE4_VALID(vp)) {
205 mutex_exit(&rp->r_statev4_lock);
206 /*
207 * Cached attributes are valid
208 */
209 *vap = rp->r_attr;
210 mutex_exit(&rp->r_statelock);
211 return (0);
212 }
213 mutex_exit(&rp->r_statev4_lock);
214 mutex_exit(&rp->r_statelock);
215 return (1);
216 }
217
218
219 /*
220 * If returned error is ESTALE flush all caches. The nfs4_purge_caches()
221 * call is synchronous because all the pages were invalidated by the
222 * nfs4_invalidate_pages() call.
223 */
224 void
225 nfs4_purge_stale_fh(int errno, vnode_t *vp, cred_t *cr)
226 {
227 struct rnode4 *rp = VTOR4(vp);
228
229 /* Ensure that the ..._end_op() call has been done */
230 ASSERT(tsd_get(nfs4_tsd_key) == NULL);
231
232 if (errno != ESTALE)
233 return;
234
235 mutex_enter(&rp->r_statelock);
236 rp->r_flags |= R4STALE;
237 if (!rp->r_error)
238 rp->r_error = errno;
239 mutex_exit(&rp->r_statelock);
240 if (nfs4_has_pages(vp))
241 nfs4_invalidate_pages(vp, (u_offset_t)0, cr);
242 nfs4_purge_caches(vp, NFS4_PURGE_DNLC, cr, FALSE);
243 }
244
245 /*
246 * Purge all of the various NFS `data' caches. If "asyncpg" is TRUE, the
247 * page purge is done asynchronously.
248 */
249 void
250 nfs4_purge_caches(vnode_t *vp, int purge_dnlc, cred_t *cr, int asyncpg)
251 {
252 rnode4_t *rp;
253 char *contents;
254 vnode_t *xattr;
255 int size;
256 int pgflush; /* are we the page flush thread? */
257
258 /*
259 * Purge the DNLC for any entries which refer to this file.
260 */
261 if (vp->v_count > 1 &&
262 (vp->v_type == VDIR || purge_dnlc == NFS4_PURGE_DNLC))
263 dnlc_purge_vp(vp);
264
265 /*
266 * Clear any readdir state bits and purge the readlink response cache.
267 */
268 rp = VTOR4(vp);
269 mutex_enter(&rp->r_statelock);
270 rp->r_flags &= ~R4LOOKUP;
271 contents = rp->r_symlink.contents;
272 size = rp->r_symlink.size;
273 rp->r_symlink.contents = NULL;
274
275 xattr = rp->r_xattr_dir;
276 rp->r_xattr_dir = NULL;
277
278 /*
279 * Purge pathconf cache too.
280 */
281 rp->r_pathconf.pc4_xattr_valid = 0;
282 rp->r_pathconf.pc4_cache_valid = 0;
283
284 pgflush = (curthread == rp->r_pgflush);
285 mutex_exit(&rp->r_statelock);
286
287 if (contents != NULL) {
288
289 kmem_free((void *)contents, size);
290 }
291
292 if (xattr != NULL)
293 VN_RELE(xattr);
294
295 /*
296 * Flush the page cache. If the current thread is the page flush
297 * thread, don't initiate a new page flush. There's no need for
298 * it, and doing it correctly is hard.
299 */
300 if (nfs4_has_pages(vp) && !pgflush) {
301 if (!asyncpg) {
302 (void) nfs4_waitfor_purge_complete(vp);
303 nfs4_flush_pages(vp, cr);
304 } else {
305 pgflush_t *args;
306
307 /*
308 * We don't hold r_statelock while creating the
309 * thread, in case the call blocks. So we use a
310 * flag to indicate that a page flush thread is
311 * active.
312 */
313 mutex_enter(&rp->r_statelock);
314 if (rp->r_flags & R4PGFLUSH) {
315 mutex_exit(&rp->r_statelock);
316 } else {
317 rp->r_flags |= R4PGFLUSH;
318 mutex_exit(&rp->r_statelock);
319
320 args = kmem_alloc(sizeof (pgflush_t),
321 KM_SLEEP);
322 args->vp = vp;
323 VN_HOLD(args->vp);
324 args->cr = cr;
325 crhold(args->cr);
326 (void) zthread_create(NULL, 0,
327 nfs4_pgflush_thread, args, 0,
328 minclsyspri);
329 }
330 }
331 }
332
333 /*
334 * Flush the readdir response cache.
335 */
336 nfs4_purge_rddir_cache(vp);
337 }
338
339 /*
340 * Invalidate all pages for the given file, after writing back the dirty
341 * ones.
342 */
343
344 void
345 nfs4_flush_pages(vnode_t *vp, cred_t *cr)
346 {
347 int error;
348 rnode4_t *rp = VTOR4(vp);
349
350 error = VOP_PUTPAGE(vp, (u_offset_t)0, 0, B_INVAL, cr, NULL);
351 if (error == ENOSPC || error == EDQUOT) {
352 mutex_enter(&rp->r_statelock);
353 if (!rp->r_error)
354 rp->r_error = error;
355 mutex_exit(&rp->r_statelock);
356 }
357 }
358
359 /*
360 * Page flush thread.
361 */
362
363 static void
364 nfs4_pgflush_thread(pgflush_t *args)
365 {
366 rnode4_t *rp = VTOR4(args->vp);
367
368 /* remember which thread we are, so we don't deadlock ourselves */
369 mutex_enter(&rp->r_statelock);
370 ASSERT(rp->r_pgflush == NULL);
371 rp->r_pgflush = curthread;
372 mutex_exit(&rp->r_statelock);
373
374 nfs4_flush_pages(args->vp, args->cr);
375
376 mutex_enter(&rp->r_statelock);
377 rp->r_pgflush = NULL;
378 rp->r_flags &= ~R4PGFLUSH;
379 cv_broadcast(&rp->r_cv);
380 mutex_exit(&rp->r_statelock);
381
382 VN_RELE(args->vp);
383 crfree(args->cr);
384 kmem_free(args, sizeof (pgflush_t));
385 zthread_exit();
386 }
387
388 /*
389 * Purge the readdir cache of all entries which are not currently
390 * being filled.
391 */
392 void
393 nfs4_purge_rddir_cache(vnode_t *vp)
394 {
395 rnode4_t *rp;
396
397 rp = VTOR4(vp);
398
399 mutex_enter(&rp->r_statelock);
400 rp->r_direof = NULL;
401 rp->r_flags &= ~R4LOOKUP;
402 rp->r_flags |= R4READDIRWATTR;
403 rddir4_cache_purge(rp);
404 mutex_exit(&rp->r_statelock);
405 }
406
407 /*
408 * Set attributes cache for given vnode using virtual attributes. There is
409 * no cache validation, but if the attributes are deemed to be stale, they
410 * are ignored. This corresponds to nfs3_attrcache().
411 *
412 * Set the timeout value on the attribute cache and fill it
413 * with the passed in attributes.
414 */
415 void
416 nfs4_attrcache_noinval(vnode_t *vp, nfs4_ga_res_t *garp, hrtime_t t)
417 {
418 rnode4_t *rp = VTOR4(vp);
419
420 mutex_enter(&rp->r_statelock);
421 if (rp->r_time_attr_saved <= t)
422 nfs4_attrcache_va(vp, garp, FALSE);
423 mutex_exit(&rp->r_statelock);
424 }
425
426 /*
427 * Use the passed in virtual attributes to check to see whether the
428 * data and metadata caches are valid, cache the new attributes, and
429 * then do the cache invalidation if required.
430 *
431 * The cache validation and caching of the new attributes is done
432 * atomically via the use of the mutex, r_statelock. If required,
433 * the cache invalidation is done atomically w.r.t. the cache
434 * validation and caching of the attributes via the pseudo lock,
435 * r_serial.
436 *
437 * This routine is used to do cache validation and attributes caching
438 * for operations with a single set of post operation attributes.
439 */
440
441 void
442 nfs4_attr_cache(vnode_t *vp, nfs4_ga_res_t *garp,
443 hrtime_t t, cred_t *cr, int async,
444 change_info4 *cinfo)
445 {
446 rnode4_t *rp;
447 int mtime_changed = 0;
448 int ctime_changed = 0;
449 vsecattr_t *vsp;
450 int was_serial, set_time_cache_inval, recov;
451 vattr_t *vap = &garp->n4g_va;
452 mntinfo4_t *mi = VTOMI4(vp);
453 len_t preattr_rsize;
454 boolean_t writemodify_set = B_FALSE;
455 boolean_t cachepurge_set = B_FALSE;
456
457 ASSERT(mi->mi_vfsp->vfs_dev == garp->n4g_va.va_fsid);
458
459 /* Is curthread the recovery thread? */
460 mutex_enter(&mi->mi_lock);
461 recov = (VTOMI4(vp)->mi_recovthread == curthread);
462 mutex_exit(&mi->mi_lock);
463
464 rp = VTOR4(vp);
465 mutex_enter(&rp->r_statelock);
466 was_serial = (rp->r_serial == curthread);
467 if (rp->r_serial && !was_serial) {
468 klwp_t *lwp = ttolwp(curthread);
469
470 /*
471 * If we're the recovery thread, then purge current attrs
472 * and bail out to avoid potential deadlock between another
473 * thread caching attrs (r_serial thread), recov thread,
474 * and an async writer thread.
475 */
476 if (recov) {
477 PURGE_ATTRCACHE4_LOCKED(rp);
478 mutex_exit(&rp->r_statelock);
479 return;
480 }
481
482 if (lwp != NULL)
483 lwp->lwp_nostop++;
484 while (rp->r_serial != NULL) {
485 if (!cv_wait_sig(&rp->r_cv, &rp->r_statelock)) {
486 mutex_exit(&rp->r_statelock);
487 if (lwp != NULL)
488 lwp->lwp_nostop--;
489 return;
490 }
491 }
492 if (lwp != NULL)
493 lwp->lwp_nostop--;
494 }
495
496 /*
497 * If there is a page flush thread, the current thread needs to
498 * bail out, to prevent a possible deadlock between the current
499 * thread (which might be in a start_op/end_op region), the
500 * recovery thread, and the page flush thread. Expire the
501 * attribute cache, so that any attributes the current thread was
502 * going to set are not lost.
503 */
504 if ((rp->r_flags & R4PGFLUSH) && rp->r_pgflush != curthread) {
505 PURGE_ATTRCACHE4_LOCKED(rp);
506 mutex_exit(&rp->r_statelock);
507 return;
508 }
509
510 if (rp->r_time_attr_saved > t) {
511 /*
512 * Attributes have been cached since these attributes were
513 * probably made. If there is an inconsistency in what is
514 * cached, mark them invalid. If not, don't act on them.
515 */
516 if (!CACHE4_VALID(rp, vap->va_mtime, vap->va_size))
517 PURGE_ATTRCACHE4_LOCKED(rp);
518 mutex_exit(&rp->r_statelock);
519 return;
520 }
521 set_time_cache_inval = 0;
522 if (cinfo) {
523 /*
524 * Only directory modifying callers pass non-NULL cinfo.
525 */
526 ASSERT(vp->v_type == VDIR);
527 /*
528 * If the cache timeout either doesn't exist or hasn't expired,
529 * and dir didn't changed on server before dirmod op
530 * and dir didn't change after dirmod op but before getattr
531 * then there's a chance that the client's cached data for
532 * this object is current (not stale). No immediate cache
533 * flush is required.
534 *
535 */
536 if ((! rp->r_time_cache_inval || t < rp->r_time_cache_inval) &&
537 cinfo->before == rp->r_change &&
538 (garp->n4g_change_valid &&
539 cinfo->after == garp->n4g_change)) {
540
541 /*
542 * If atomic isn't set, then the before/after info
543 * cannot be blindly trusted. For this case, we tell
544 * nfs4_attrcache_va to cache the attrs but also
545 * establish an absolute maximum cache timeout. When
546 * the timeout is reached, caches will be flushed.
547 */
548 if (! cinfo->atomic)
549 set_time_cache_inval = 1;
550 } else {
551
552 /*
553 * We're not sure exactly what changed, but we know
554 * what to do. flush all caches for dir. remove the
555 * attr timeout.
556 *
557 * a) timeout expired. flush all caches.
558 * b) r_change != cinfo.before. flush all caches.
559 * c) r_change == cinfo.before, but cinfo.after !=
560 * post-op getattr(change). flush all caches.
561 * d) post-op getattr(change) not provided by server.
562 * flush all caches.
563 */
564 mtime_changed = 1;
565 ctime_changed = 1;
566 rp->r_time_cache_inval = 0;
567 }
568 } else {
569 /*
570 * Write thread after writing data to file on remote server,
571 * will always set R4WRITEMODIFIED to indicate that file on
572 * remote server was modified with a WRITE operation and would
573 * have marked attribute cache as timed out. If R4WRITEMODIFIED
574 * is set, then do not check for mtime and ctime change.
575 */
576 if (!(rp->r_flags & R4WRITEMODIFIED)) {
577 if (!CACHE4_VALID(rp, vap->va_mtime, vap->va_size))
578 mtime_changed = 1;
579
580 if (rp->r_attr.va_ctime.tv_sec !=
581 vap->va_ctime.tv_sec ||
582 rp->r_attr.va_ctime.tv_nsec !=
583 vap->va_ctime.tv_nsec)
584 ctime_changed = 1;
585
586 /*
587 * If the change attribute was not provided by server
588 * or it differs, then flush all caches.
589 */
590 if (!garp->n4g_change_valid ||
591 rp->r_change != garp->n4g_change) {
592 mtime_changed = 1;
593 ctime_changed = 1;
594 }
595 } else {
596 writemodify_set = B_TRUE;
597 }
598 }
599
600 preattr_rsize = rp->r_size;
601
602 nfs4_attrcache_va(vp, garp, set_time_cache_inval);
603
604 /*
605 * If we have updated filesize in nfs4_attrcache_va, as soon as we
606 * drop statelock we will be in transition of purging all
607 * our caches and updating them. It is possible for another
608 * thread to pick this new file size and read in zeroed data.
609 * stall other threads till cache purge is complete.
610 */
611 if ((!cinfo) && (rp->r_size != preattr_rsize)) {
612 /*
613 * If R4WRITEMODIFIED was set and we have updated the file
614 * size, Server's returned file size need not necessarily
615 * be because of this Client's WRITE. We need to purge
616 * all caches.
617 */
618 if (writemodify_set)
619 mtime_changed = 1;
620
621 if (mtime_changed && !(rp->r_flags & R4INCACHEPURGE)) {
622 rp->r_flags |= R4INCACHEPURGE;
623 cachepurge_set = B_TRUE;
624 }
625 }
626
627 if (!mtime_changed && !ctime_changed) {
628 mutex_exit(&rp->r_statelock);
629 return;
630 }
631
632 rp->r_serial = curthread;
633
634 mutex_exit(&rp->r_statelock);
635
636 /*
637 * If we're the recov thread, then force async nfs4_purge_caches
638 * to avoid potential deadlock.
639 */
640 if (mtime_changed)
641 nfs4_purge_caches(vp, NFS4_NOPURGE_DNLC, cr, recov ? 1 : async);
642
643 if ((rp->r_flags & R4INCACHEPURGE) && cachepurge_set) {
644 mutex_enter(&rp->r_statelock);
645 rp->r_flags &= ~R4INCACHEPURGE;
646 cv_broadcast(&rp->r_cv);
647 mutex_exit(&rp->r_statelock);
648 cachepurge_set = B_FALSE;
649 }
650
651 if (ctime_changed) {
652 (void) nfs4_access_purge_rp(rp);
653 if (rp->r_secattr != NULL) {
654 mutex_enter(&rp->r_statelock);
655 vsp = rp->r_secattr;
656 rp->r_secattr = NULL;
657 mutex_exit(&rp->r_statelock);
658 if (vsp != NULL)
659 nfs4_acl_free_cache(vsp);
660 }
661 }
662
663 if (!was_serial) {
664 mutex_enter(&rp->r_statelock);
665 rp->r_serial = NULL;
666 cv_broadcast(&rp->r_cv);
667 mutex_exit(&rp->r_statelock);
668 }
669 }
670
671 /*
672 * Set attributes cache for given vnode using virtual attributes.
673 *
674 * Set the timeout value on the attribute cache and fill it
675 * with the passed in attributes.
676 *
677 * The caller must be holding r_statelock.
678 */
679 static void
680 nfs4_attrcache_va(vnode_t *vp, nfs4_ga_res_t *garp, int set_cache_timeout)
681 {
682 rnode4_t *rp;
683 mntinfo4_t *mi;
684 hrtime_t delta;
685 hrtime_t now;
686 vattr_t *vap = &garp->n4g_va;
687
688 rp = VTOR4(vp);
689
690 ASSERT(MUTEX_HELD(&rp->r_statelock));
691 ASSERT(vap->va_mask == AT_ALL);
692
693 /* Switch to master before checking v_flag */
694 if (IS_SHADOW(vp, rp))
695 vp = RTOV4(rp);
696
697 now = gethrtime();
698
699 mi = VTOMI4(vp);
700
701 /*
702 * Only establish a new cache timeout (if requested). Never
703 * extend a timeout. Never clear a timeout. Clearing a timeout
704 * is done by nfs4_update_dircaches (ancestor in our call chain)
705 */
706 if (set_cache_timeout && ! rp->r_time_cache_inval)
707 rp->r_time_cache_inval = now + mi->mi_acdirmax;
708
709 /*
710 * Delta is the number of nanoseconds that we will
711 * cache the attributes of the file. It is based on
712 * the number of nanoseconds since the last time that
713 * we detected a change. The assumption is that files
714 * that changed recently are likely to change again.
715 * There is a minimum and a maximum for regular files
716 * and for directories which is enforced though.
717 *
718 * Using the time since last change was detected
719 * eliminates direct comparison or calculation
720 * using mixed client and server times. NFS does
721 * not make any assumptions regarding the client
722 * and server clocks being synchronized.
723 */
724 if (vap->va_mtime.tv_sec != rp->r_attr.va_mtime.tv_sec ||
725 vap->va_mtime.tv_nsec != rp->r_attr.va_mtime.tv_nsec ||
726 vap->va_size != rp->r_attr.va_size) {
727 rp->r_time_attr_saved = now;
728 }
729
730 if ((mi->mi_flags & MI4_NOAC) || (vp->v_flag & VNOCACHE))
731 delta = 0;
732 else {
733 delta = now - rp->r_time_attr_saved;
734 if (vp->v_type == VDIR) {
735 if (delta < mi->mi_acdirmin)
736 delta = mi->mi_acdirmin;
737 else if (delta > mi->mi_acdirmax)
738 delta = mi->mi_acdirmax;
739 } else {
740 if (delta < mi->mi_acregmin)
741 delta = mi->mi_acregmin;
742 else if (delta > mi->mi_acregmax)
743 delta = mi->mi_acregmax;
744 }
745 }
746 rp->r_time_attr_inval = now + delta;
747
748 rp->r_attr = *vap;
749 if (garp->n4g_change_valid)
750 rp->r_change = garp->n4g_change;
751
752 /*
753 * The attributes that were returned may be valid and can
754 * be used, but they may not be allowed to be cached.
755 * Reset the timers to cause immediate invalidation and
756 * clear r_change so no VERIFY operations will suceed
757 */
758 if (garp->n4g_attrwhy == NFS4_GETATTR_NOCACHE_OK) {
759 rp->r_time_attr_inval = now;
760 rp->r_time_attr_saved = now;
761 rp->r_change = 0;
762 }
763
764 /*
765 * If mounted_on_fileid returned AND the object is a stub,
766 * then set object's va_nodeid to the mounted over fid
767 * returned by server.
768 *
769 * If mounted_on_fileid not provided/supported, then
770 * just set it to 0 for now. Eventually it would be
771 * better to set it to a hashed version of FH. This
772 * would probably be good enough to provide a unique
773 * fid/d_ino within a dir.
774 *
775 * We don't need to carry mounted_on_fileid in the
776 * rnode as long as the client never requests fileid
777 * without also requesting mounted_on_fileid. For
778 * now, it stays.
779 */
780 if (garp->n4g_mon_fid_valid) {
781 rp->r_mntd_fid = garp->n4g_mon_fid;
782
783 if (RP_ISSTUB(rp))
784 rp->r_attr.va_nodeid = rp->r_mntd_fid;
785 }
786
787 /*
788 * Check to see if there are valid pathconf bits to
789 * cache in the rnode.
790 */
791 if (garp->n4g_ext_res) {
792 if (garp->n4g_ext_res->n4g_pc4.pc4_cache_valid) {
793 rp->r_pathconf = garp->n4g_ext_res->n4g_pc4;
794 } else {
795 if (garp->n4g_ext_res->n4g_pc4.pc4_xattr_valid) {
796 rp->r_pathconf.pc4_xattr_valid = TRUE;
797 rp->r_pathconf.pc4_xattr_exists =
798 garp->n4g_ext_res->n4g_pc4.pc4_xattr_exists;
799 }
800 }
801 }
802 /*
803 * Update the size of the file if there is no cached data or if
804 * the cached data is clean and there is no data being written
805 * out.
806 */
807 if (rp->r_size != vap->va_size &&
808 (!vn_has_cached_data(vp) ||
809 (!(rp->r_flags & R4DIRTY) && rp->r_count == 0))) {
810 rp->r_size = vap->va_size;
811 }
812 nfs_setswaplike(vp, vap);
813 rp->r_flags &= ~R4WRITEMODIFIED;
814 }
815
816 /*
817 * Get attributes over-the-wire and update attributes cache
818 * if no error occurred in the over-the-wire operation.
819 * Return 0 if successful, otherwise error.
820 */
821 int
822 nfs4_getattr_otw(vnode_t *vp, nfs4_ga_res_t *garp, cred_t *cr, int get_acl)
823 {
824 mntinfo4_t *mi = VTOMI4(vp);
825 hrtime_t t;
826 nfs4_recov_state_t recov_state;
827 nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS };
828
829 recov_state.rs_flags = 0;
830 recov_state.rs_num_retry_despite_err = 0;
831
832 /* Save the original mount point security flavor */
833 (void) save_mnt_secinfo(mi->mi_curr_serv);
834
835 recov_retry:
836
837 if ((e.error = nfs4_start_fop(mi, vp, NULL, OH_GETATTR,
838 &recov_state, NULL))) {
839 (void) check_mnt_secinfo(mi->mi_curr_serv, vp);
840 return (e.error);
841 }
842
843 t = gethrtime();
844
845 nfs4_getattr_otw_norecovery(vp, garp, &e, cr, get_acl);
846
847 if (nfs4_needs_recovery(&e, FALSE, vp->v_vfsp)) {
848 if (nfs4_start_recovery(&e, VTOMI4(vp), vp, NULL, NULL,
849 NULL, OP_GETATTR, NULL, NULL, NULL) == FALSE) {
850 nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_GETATTR,
851 &recov_state, 1);
852 goto recov_retry;
853 }
854 }
855
856 nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_GETATTR, &recov_state, 0);
857
858 if (!e.error) {
859 if (e.stat == NFS4_OK) {
860 nfs4_attr_cache(vp, garp, t, cr, FALSE, NULL);
861 } else {
862 e.error = geterrno4(e.stat);
863
864 nfs4_purge_stale_fh(e.error, vp, cr);
865 }
866 }
867
868 /*
869 * If getattr a node that is a stub for a crossed
870 * mount point, keep the original secinfo flavor for
871 * the current file system, not the crossed one.
872 */
873 (void) check_mnt_secinfo(mi->mi_curr_serv, vp);
874
875 return (e.error);
876 }
877
878 /*
879 * Generate a compound to get attributes over-the-wire.
880 */
881 void
882 nfs4_getattr_otw_norecovery(vnode_t *vp, nfs4_ga_res_t *garp,
883 nfs4_error_t *ep, cred_t *cr, int get_acl)
884 {
885 COMPOUND4args_clnt args;
886 COMPOUND4res_clnt res;
887 int doqueue;
888 rnode4_t *rp = VTOR4(vp);
889 nfs_argop4 argop[2];
890
891 args.ctag = TAG_GETATTR;
892
893 args.array_len = 2;
894 args.array = argop;
895
896 /* putfh */
897 argop[0].argop = OP_CPUTFH;
898 argop[0].nfs_argop4_u.opcputfh.sfh = rp->r_fh;
899
900 /* getattr */
901 /*
902 * Unlike nfs version 2 and 3, where getattr returns all the
903 * attributes, nfs version 4 returns only the ones explicitly
904 * asked for. This creates problems, as some system functions
905 * (e.g. cache check) require certain attributes and if the
906 * cached node lacks some attributes such as uid/gid, it can
907 * affect system utilities (e.g. "ls") that rely on the information
908 * to be there. This can lead to anything from system crashes to
909 * corrupted information processed by user apps.
910 * So to ensure that all bases are covered, request at least
911 * the AT_ALL attribute mask.
912 */
913 argop[1].argop = OP_GETATTR;
914 argop[1].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
915 if (get_acl)
916 argop[1].nfs_argop4_u.opgetattr.attr_request |= FATTR4_ACL_MASK;
917 argop[1].nfs_argop4_u.opgetattr.mi = VTOMI4(vp);
918
919 doqueue = 1;
920
921 rfs4call(VTOMI4(vp), &args, &res, cr, &doqueue, 0, ep);
922
923 if (ep->error)
924 return;
925
926 if (res.status != NFS4_OK) {
927 xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
928 return;
929 }
930
931 *garp = res.array[1].nfs_resop4_u.opgetattr.ga_res;
932
933 xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
934 }
935
936 /*
937 * Return either cached or remote attributes. If get remote attr
938 * use them to check and invalidate caches, then cache the new attributes.
939 */
940 int
941 nfs4getattr(vnode_t *vp, vattr_t *vap, cred_t *cr)
942 {
943 int error;
944 rnode4_t *rp;
945 nfs4_ga_res_t gar;
946
947 ASSERT(nfs4_consistent_type(vp));
948
949 /*
950 * If we've got cached attributes, we're done, otherwise go
951 * to the server to get attributes, which will update the cache
952 * in the process. Either way, use the cached attributes for
953 * the caller's vattr_t.
954 *
955 * Note that we ignore the gar set by the OTW call: the attr caching
956 * code may make adjustments when storing to the rnode, and we want
957 * to see those changes here.
958 */
959 rp = VTOR4(vp);
960 error = 0;
961 mutex_enter(&rp->r_statelock);
962 if (!ATTRCACHE4_VALID(vp)) {
963 mutex_exit(&rp->r_statelock);
964 error = nfs4_getattr_otw(vp, &gar, cr, 0);
965 mutex_enter(&rp->r_statelock);
966 }
967
968 if (!error)
969 *vap = rp->r_attr;
970
971 /* Return the client's view of file size */
972 vap->va_size = rp->r_size;
973
974 mutex_exit(&rp->r_statelock);
975
976 ASSERT(nfs4_consistent_type(vp));
977
978 return (error);
979 }
980
981 int
982 nfs4_attr_otw(vnode_t *vp, nfs4_tag_type_t tag_type,
983 nfs4_ga_res_t *garp, bitmap4 reqbitmap, cred_t *cr)
984 {
985 COMPOUND4args_clnt args;
986 COMPOUND4res_clnt res;
987 int doqueue;
988 nfs_argop4 argop[2];
989 mntinfo4_t *mi = VTOMI4(vp);
990 bool_t needrecov = FALSE;
991 nfs4_recov_state_t recov_state;
992 nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS };
993 nfs4_ga_ext_res_t *gerp;
994
995 recov_state.rs_flags = 0;
996 recov_state.rs_num_retry_despite_err = 0;
997
998 recov_retry:
999 args.ctag = tag_type;
1000
1001 args.array_len = 2;
1002 args.array = argop;
1003
1004 e.error = nfs4_start_fop(mi, vp, NULL, OH_GETATTR, &recov_state, NULL);
1005 if (e.error)
1006 return (e.error);
1007
1008 /* putfh */
1009 argop[0].argop = OP_CPUTFH;
1010 argop[0].nfs_argop4_u.opcputfh.sfh = VTOR4(vp)->r_fh;
1011
1012 /* getattr */
1013 argop[1].argop = OP_GETATTR;
1014 argop[1].nfs_argop4_u.opgetattr.attr_request = reqbitmap;
1015 argop[1].nfs_argop4_u.opgetattr.mi = mi;
1016
1017 doqueue = 1;
1018
1019 NFS4_DEBUG(nfs4_client_call_debug, (CE_NOTE,
1020 "nfs4_attr_otw: %s call, rp %s", needrecov ? "recov" : "first",
1021 rnode4info(VTOR4(vp))));
1022
1023 rfs4call(mi, &args, &res, cr, &doqueue, 0, &e);
1024
1025 needrecov = nfs4_needs_recovery(&e, FALSE, vp->v_vfsp);
1026 if (!needrecov && e.error) {
1027 nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_GETATTR, &recov_state,
1028 needrecov);
1029 return (e.error);
1030 }
1031
1032 if (needrecov) {
1033 bool_t abort;
1034
1035 NFS4_DEBUG(nfs4_client_recov_debug, (CE_NOTE,
1036 "nfs4_attr_otw: initiating recovery\n"));
1037
1038 abort = nfs4_start_recovery(&e, VTOMI4(vp), vp, NULL, NULL,
1039 NULL, OP_GETATTR, NULL, NULL, NULL);
1040 nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_GETATTR, &recov_state,
1041 needrecov);
1042 if (!e.error) {
1043 xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
1044 e.error = geterrno4(res.status);
1045 }
1046 if (abort == FALSE)
1047 goto recov_retry;
1048 return (e.error);
1049 }
1050
1051 if (res.status) {
1052 e.error = geterrno4(res.status);
1053 } else {
1054 gerp = garp->n4g_ext_res;
1055 bcopy(&res.array[1].nfs_resop4_u.opgetattr.ga_res,
1056 garp, sizeof (nfs4_ga_res_t));
1057 garp->n4g_ext_res = gerp;
1058 if (garp->n4g_ext_res &&
1059 res.array[1].nfs_resop4_u.opgetattr.ga_res.n4g_ext_res)
1060 bcopy(res.array[1].nfs_resop4_u.opgetattr.
1061 ga_res.n4g_ext_res,
1062 garp->n4g_ext_res, sizeof (nfs4_ga_ext_res_t));
1063 }
1064 xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
1065 nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_GETATTR, &recov_state,
1066 needrecov);
1067 return (e.error);
1068 }
1069
1070 /*
1071 * Asynchronous I/O parameters. nfs_async_threads is the high-water mark
1072 * for the demand-based allocation of async threads per-mount. The
1073 * nfs_async_timeout is the amount of time a thread will live after it
1074 * becomes idle, unless new I/O requests are received before the thread
1075 * dies. See nfs4_async_putpage and nfs4_async_start.
1076 */
1077
1078 static void nfs4_async_start(struct vfs *);
1079 static void nfs4_async_pgops_start(struct vfs *);
1080 static void nfs4_async_common_start(struct vfs *, int);
1081
1082 static void
1083 free_async_args4(struct nfs4_async_reqs *args)
1084 {
1085 rnode4_t *rp;
1086
1087 if (args->a_io != NFS4_INACTIVE) {
1088 rp = VTOR4(args->a_vp);
1089 mutex_enter(&rp->r_statelock);
1090 rp->r_count--;
1091 if (args->a_io == NFS4_PUTAPAGE ||
1092 args->a_io == NFS4_PAGEIO)
1093 rp->r_awcount--;
1094 cv_broadcast(&rp->r_cv);
1095 mutex_exit(&rp->r_statelock);
1096 VN_RELE(args->a_vp);
1097 }
1098 crfree(args->a_cred);
1099 kmem_free(args, sizeof (*args));
1100 }
1101
1102 /*
1103 * Cross-zone thread creation and NFS access is disallowed, yet fsflush() and
1104 * pageout(), running in the global zone, have legitimate reasons to do
1105 * VOP_PUTPAGE(B_ASYNC) on other zones' NFS mounts. We avoid the problem by
1106 * use of a a per-mount "asynchronous requests manager thread" which is
1107 * signaled by the various asynchronous work routines when there is
1108 * asynchronous work to be done. It is responsible for creating new
1109 * worker threads if necessary, and notifying existing worker threads
1110 * that there is work to be done.
1111 *
1112 * In other words, it will "take the specifications from the customers and
1113 * give them to the engineers."
1114 *
1115 * Worker threads die off of their own accord if they are no longer
1116 * needed.
1117 *
1118 * This thread is killed when the zone is going away or the filesystem
1119 * is being unmounted.
1120 */
1121 void
1122 nfs4_async_manager(vfs_t *vfsp)
1123 {
1124 callb_cpr_t cprinfo;
1125 mntinfo4_t *mi;
1126 uint_t max_threads;
1127
1128 mi = VFTOMI4(vfsp);
1129
1130 CALLB_CPR_INIT(&cprinfo, &mi->mi_async_lock, callb_generic_cpr,
1131 "nfs4_async_manager");
1132
1133 mutex_enter(&mi->mi_async_lock);
1134 /*
1135 * We want to stash the max number of threads that this mount was
1136 * allowed so we can use it later when the variable is set to zero as
1137 * part of the zone/mount going away.
1138 *
1139 * We want to be able to create at least one thread to handle
1140 * asynchronous inactive calls.
1141 */
1142 max_threads = MAX(mi->mi_max_threads, 1);
1143 /*
1144 * We don't want to wait for mi_max_threads to go to zero, since that
1145 * happens as part of a failed unmount, but this thread should only
1146 * exit when the mount is really going away.
1147 *
1148 * Once MI4_ASYNC_MGR_STOP is set, no more async operations will be
1149 * attempted: the various _async_*() functions know to do things
1150 * inline if mi_max_threads == 0. Henceforth we just drain out the
1151 * outstanding requests.
1152 *
1153 * Note that we still create zthreads even if we notice the zone is
1154 * shutting down (MI4_ASYNC_MGR_STOP is set); this may cause the zone
1155 * shutdown sequence to take slightly longer in some cases, but
1156 * doesn't violate the protocol, as all threads will exit as soon as
1157 * they're done processing the remaining requests.
1158 */
1159 for (;;) {
1160 while (mi->mi_async_req_count > 0) {
1161 /*
1162 * Paranoia: If the mount started out having
1163 * (mi->mi_max_threads == 0), and the value was
1164 * later changed (via a debugger or somesuch),
1165 * we could be confused since we will think we
1166 * can't create any threads, and the calling
1167 * code (which looks at the current value of
1168 * mi->mi_max_threads, now non-zero) thinks we
1169 * can.
1170 *
1171 * So, because we're paranoid, we create threads
1172 * up to the maximum of the original and the
1173 * current value. This means that future
1174 * (debugger-induced) alterations of
1175 * mi->mi_max_threads are ignored for our
1176 * purposes, but who told them they could change
1177 * random values on a live kernel anyhow?
1178 */
1179 if (mi->mi_threads[NFS4_ASYNC_QUEUE] <
1180 MAX(mi->mi_max_threads, max_threads)) {
1181 mi->mi_threads[NFS4_ASYNC_QUEUE]++;
1182 mutex_exit(&mi->mi_async_lock);
1183 MI4_HOLD(mi);
1184 VFS_HOLD(vfsp); /* hold for new thread */
1185 (void) zthread_create(NULL, 0, nfs4_async_start,
1186 vfsp, 0, minclsyspri);
1187 mutex_enter(&mi->mi_async_lock);
1188 } else if (mi->mi_threads[NFS4_ASYNC_PGOPS_QUEUE] <
1189 NUM_ASYNC_PGOPS_THREADS) {
1190 mi->mi_threads[NFS4_ASYNC_PGOPS_QUEUE]++;
1191 mutex_exit(&mi->mi_async_lock);
1192 MI4_HOLD(mi);
1193 VFS_HOLD(vfsp); /* hold for new thread */
1194 (void) zthread_create(NULL, 0,
1195 nfs4_async_pgops_start, vfsp, 0,
1196 minclsyspri);
1197 mutex_enter(&mi->mi_async_lock);
1198 }
1199 NFS4_WAKE_ASYNC_WORKER(mi->mi_async_work_cv);
1200 ASSERT(mi->mi_async_req_count != 0);
1201 mi->mi_async_req_count--;
1202 }
1203
1204 mutex_enter(&mi->mi_lock);
1205 if (mi->mi_flags & MI4_ASYNC_MGR_STOP) {
1206 mutex_exit(&mi->mi_lock);
1207 break;
1208 }
1209 mutex_exit(&mi->mi_lock);
1210
1211 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1212 cv_wait(&mi->mi_async_reqs_cv, &mi->mi_async_lock);
1213 CALLB_CPR_SAFE_END(&cprinfo, &mi->mi_async_lock);
1214 }
1215
1216 NFS4_DEBUG(nfs4_client_zone_debug, (CE_NOTE,
1217 "nfs4_async_manager exiting for vfs %p\n", (void *)mi->mi_vfsp));
1218 /*
1219 * Let everyone know we're done.
1220 */
1221 mi->mi_manager_thread = NULL;
1222 /*
1223 * Wake up the inactive thread.
1224 */
1225 cv_broadcast(&mi->mi_inact_req_cv);
1226 /*
1227 * Wake up anyone sitting in nfs4_async_manager_stop()
1228 */
1229 cv_broadcast(&mi->mi_async_cv);
1230 /*
1231 * There is no explicit call to mutex_exit(&mi->mi_async_lock)
1232 * since CALLB_CPR_EXIT is actually responsible for releasing
1233 * 'mi_async_lock'.
1234 */
1235 CALLB_CPR_EXIT(&cprinfo);
1236 VFS_RELE(vfsp); /* release thread's hold */
1237 MI4_RELE(mi);
1238 zthread_exit();
1239 }
1240
1241 /*
1242 * Signal (and wait for) the async manager thread to clean up and go away.
1243 */
1244 void
1245 nfs4_async_manager_stop(vfs_t *vfsp)
1246 {
1247 mntinfo4_t *mi = VFTOMI4(vfsp);
1248
1249 mutex_enter(&mi->mi_async_lock);
1250 mutex_enter(&mi->mi_lock);
1251 mi->mi_flags |= MI4_ASYNC_MGR_STOP;
1252 mutex_exit(&mi->mi_lock);
1253 cv_broadcast(&mi->mi_async_reqs_cv);
1254 /*
1255 * Wait for the async manager thread to die.
1256 */
1257 while (mi->mi_manager_thread != NULL)
1258 cv_wait(&mi->mi_async_cv, &mi->mi_async_lock);
1259 mutex_exit(&mi->mi_async_lock);
1260 }
1261
1262 int
1263 nfs4_async_readahead(vnode_t *vp, u_offset_t blkoff, caddr_t addr,
1264 struct seg *seg, cred_t *cr, void (*readahead)(vnode_t *,
1265 u_offset_t, caddr_t, struct seg *, cred_t *))
1266 {
1267 rnode4_t *rp;
1268 mntinfo4_t *mi;
1269 struct nfs4_async_reqs *args;
1270
1271 rp = VTOR4(vp);
1272 ASSERT(rp->r_freef == NULL);
1273
1274 mi = VTOMI4(vp);
1275
1276 /*
1277 * If addr falls in a different segment, don't bother doing readahead.
1278 */
1279 if (addr >= seg->s_base + seg->s_size)
1280 return (-1);
1281
1282 /*
1283 * If we can't allocate a request structure, punt on the readahead.
1284 */
1285 if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL)
1286 return (-1);
1287
1288 /*
1289 * If a lock operation is pending, don't initiate any new
1290 * readaheads. Otherwise, bump r_count to indicate the new
1291 * asynchronous I/O.
1292 */
1293 if (!nfs_rw_tryenter(&rp->r_lkserlock, RW_READER)) {
1294 kmem_free(args, sizeof (*args));
1295 return (-1);
1296 }
1297 mutex_enter(&rp->r_statelock);
1298 rp->r_count++;
1299 mutex_exit(&rp->r_statelock);
1300 nfs_rw_exit(&rp->r_lkserlock);
1301
1302 args->a_next = NULL;
1303 #ifdef DEBUG
1304 args->a_queuer = curthread;
1305 #endif
1306 VN_HOLD(vp);
1307 args->a_vp = vp;
1308 ASSERT(cr != NULL);
1309 crhold(cr);
1310 args->a_cred = cr;
1311 args->a_io = NFS4_READ_AHEAD;
1312 args->a_nfs4_readahead = readahead;
1313 args->a_nfs4_blkoff = blkoff;
1314 args->a_nfs4_seg = seg;
1315 args->a_nfs4_addr = addr;
1316
1317 mutex_enter(&mi->mi_async_lock);
1318
1319 /*
1320 * If asyncio has been disabled, don't bother readahead.
1321 */
1322 if (mi->mi_max_threads == 0) {
1323 mutex_exit(&mi->mi_async_lock);
1324 goto noasync;
1325 }
1326
1327 /*
1328 * Link request structure into the async list and
1329 * wakeup async thread to do the i/o.
1330 */
1331 if (mi->mi_async_reqs[NFS4_READ_AHEAD] == NULL) {
1332 mi->mi_async_reqs[NFS4_READ_AHEAD] = args;
1333 mi->mi_async_tail[NFS4_READ_AHEAD] = args;
1334 } else {
1335 mi->mi_async_tail[NFS4_READ_AHEAD]->a_next = args;
1336 mi->mi_async_tail[NFS4_READ_AHEAD] = args;
1337 }
1338
1339 if (mi->mi_io_kstats) {
1340 mutex_enter(&mi->mi_lock);
1341 kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
1342 mutex_exit(&mi->mi_lock);
1343 }
1344
1345 mi->mi_async_req_count++;
1346 ASSERT(mi->mi_async_req_count != 0);
1347 cv_signal(&mi->mi_async_reqs_cv);
1348 mutex_exit(&mi->mi_async_lock);
1349 return (0);
1350
1351 noasync:
1352 mutex_enter(&rp->r_statelock);
1353 rp->r_count--;
1354 cv_broadcast(&rp->r_cv);
1355 mutex_exit(&rp->r_statelock);
1356 VN_RELE(vp);
1357 crfree(cr);
1358 kmem_free(args, sizeof (*args));
1359 return (-1);
1360 }
1361
1362 static void
1363 nfs4_async_start(struct vfs *vfsp)
1364 {
1365 nfs4_async_common_start(vfsp, NFS4_ASYNC_QUEUE);
1366 }
1367
1368 static void
1369 nfs4_async_pgops_start(struct vfs *vfsp)
1370 {
1371 nfs4_async_common_start(vfsp, NFS4_ASYNC_PGOPS_QUEUE);
1372 }
1373
1374 /*
1375 * The async queues for each mounted file system are arranged as a
1376 * set of queues, one for each async i/o type. Requests are taken
1377 * from the queues in a round-robin fashion. A number of consecutive
1378 * requests are taken from each queue before moving on to the next
1379 * queue. This functionality may allow the NFS Version 2 server to do
1380 * write clustering, even if the client is mixing writes and reads
1381 * because it will take multiple write requests from the queue
1382 * before processing any of the other async i/o types.
1383 *
1384 * XXX The nfs4_async_common_start thread is unsafe in the light of the present
1385 * model defined by cpr to suspend the system. Specifically over the
1386 * wire calls are cpr-unsafe. The thread should be reevaluated in
1387 * case of future updates to the cpr model.
1388 */
1389 static void
1390 nfs4_async_common_start(struct vfs *vfsp, int async_queue)
1391 {
1392 struct nfs4_async_reqs *args;
1393 mntinfo4_t *mi = VFTOMI4(vfsp);
1394 clock_t time_left = 1;
1395 callb_cpr_t cprinfo;
1396 int i;
1397 extern int nfs_async_timeout;
1398 int async_types;
1399 kcondvar_t *async_work_cv;
1400
1401 if (async_queue == NFS4_ASYNC_QUEUE) {
1402 async_types = NFS4_ASYNC_TYPES;
1403 async_work_cv = &mi->mi_async_work_cv[NFS4_ASYNC_QUEUE];
1404 } else {
1405 async_types = NFS4_ASYNC_PGOPS_TYPES;
1406 async_work_cv = &mi->mi_async_work_cv[NFS4_ASYNC_PGOPS_QUEUE];
1407 }
1408
1409 /*
1410 * Dynamic initialization of nfs_async_timeout to allow nfs to be
1411 * built in an implementation independent manner.
1412 */
1413 if (nfs_async_timeout == -1)
1414 nfs_async_timeout = NFS_ASYNC_TIMEOUT;
1415
1416 CALLB_CPR_INIT(&cprinfo, &mi->mi_async_lock, callb_generic_cpr, "nas");
1417
1418 mutex_enter(&mi->mi_async_lock);
1419 for (;;) {
1420 /*
1421 * Find the next queue containing an entry. We start
1422 * at the current queue pointer and then round robin
1423 * through all of them until we either find a non-empty
1424 * queue or have looked through all of them.
1425 */
1426 for (i = 0; i < async_types; i++) {
1427 args = *mi->mi_async_curr[async_queue];
1428 if (args != NULL)
1429 break;
1430 mi->mi_async_curr[async_queue]++;
1431 if (mi->mi_async_curr[async_queue] ==
1432 &mi->mi_async_reqs[async_types]) {
1433 mi->mi_async_curr[async_queue] =
1434 &mi->mi_async_reqs[0];
1435 }
1436 }
1437 /*
1438 * If we didn't find a entry, then block until woken up
1439 * again and then look through the queues again.
1440 */
1441 if (args == NULL) {
1442 /*
1443 * Exiting is considered to be safe for CPR as well
1444 */
1445 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1446
1447 /*
1448 * Wakeup thread waiting to unmount the file
1449 * system only if all async threads are inactive.
1450 *
1451 * If we've timed-out and there's nothing to do,
1452 * then get rid of this thread.
1453 */
1454 if (mi->mi_max_threads == 0 || time_left <= 0) {
1455 --mi->mi_threads[async_queue];
1456
1457 if (mi->mi_threads[NFS4_ASYNC_QUEUE] == 0 &&
1458 mi->mi_threads[NFS4_ASYNC_PGOPS_QUEUE] == 0)
1459 cv_signal(&mi->mi_async_cv);
1460 CALLB_CPR_EXIT(&cprinfo);
1461 VFS_RELE(vfsp); /* release thread's hold */
1462 MI4_RELE(mi);
1463 zthread_exit();
1464 /* NOTREACHED */
1465 }
1466 time_left = cv_reltimedwait(async_work_cv,
1467 &mi->mi_async_lock, nfs_async_timeout,
1468 TR_CLOCK_TICK);
1469
1470 CALLB_CPR_SAFE_END(&cprinfo, &mi->mi_async_lock);
1471
1472 continue;
1473 } else {
1474 time_left = 1;
1475 }
1476
1477 /*
1478 * Remove the request from the async queue and then
1479 * update the current async request queue pointer. If
1480 * the current queue is empty or we have removed enough
1481 * consecutive entries from it, then reset the counter
1482 * for this queue and then move the current pointer to
1483 * the next queue.
1484 */
1485 *mi->mi_async_curr[async_queue] = args->a_next;
1486 if (*mi->mi_async_curr[async_queue] == NULL ||
1487 --mi->mi_async_clusters[args->a_io] == 0) {
1488 mi->mi_async_clusters[args->a_io] =
1489 mi->mi_async_init_clusters;
1490 mi->mi_async_curr[async_queue]++;
1491 if (mi->mi_async_curr[async_queue] ==
1492 &mi->mi_async_reqs[async_types]) {
1493 mi->mi_async_curr[async_queue] =
1494 &mi->mi_async_reqs[0];
1495 }
1496 }
1497
1498 if (args->a_io != NFS4_INACTIVE && mi->mi_io_kstats) {
1499 mutex_enter(&mi->mi_lock);
1500 kstat_waitq_exit(KSTAT_IO_PTR(mi->mi_io_kstats));
1501 mutex_exit(&mi->mi_lock);
1502 }
1503
1504 mutex_exit(&mi->mi_async_lock);
1505
1506 /*
1507 * Obtain arguments from the async request structure.
1508 */
1509 if (args->a_io == NFS4_READ_AHEAD && mi->mi_max_threads > 0) {
1510 (*args->a_nfs4_readahead)(args->a_vp,
1511 args->a_nfs4_blkoff, args->a_nfs4_addr,
1512 args->a_nfs4_seg, args->a_cred);
1513 } else if (args->a_io == NFS4_PUTAPAGE) {
1514 (void) (*args->a_nfs4_putapage)(args->a_vp,
1515 args->a_nfs4_pp, args->a_nfs4_off,
1516 args->a_nfs4_len, args->a_nfs4_flags,
1517 args->a_cred);
1518 } else if (args->a_io == NFS4_PAGEIO) {
1519 (void) (*args->a_nfs4_pageio)(args->a_vp,
1520 args->a_nfs4_pp, args->a_nfs4_off,
1521 args->a_nfs4_len, args->a_nfs4_flags,
1522 args->a_cred);
1523 } else if (args->a_io == NFS4_READDIR) {
1524 (void) ((*args->a_nfs4_readdir)(args->a_vp,
1525 args->a_nfs4_rdc, args->a_cred));
1526 } else if (args->a_io == NFS4_COMMIT) {
1527 (*args->a_nfs4_commit)(args->a_vp, args->a_nfs4_plist,
1528 args->a_nfs4_offset, args->a_nfs4_count,
1529 args->a_cred);
1530 } else if (args->a_io == NFS4_INACTIVE) {
1531 nfs4_inactive_otw(args->a_vp, args->a_cred);
1532 }
1533
1534 /*
1535 * Now, release the vnode and free the credentials
1536 * structure.
1537 */
1538 free_async_args4(args);
1539 /*
1540 * Reacquire the mutex because it will be needed above.
1541 */
1542 mutex_enter(&mi->mi_async_lock);
1543 }
1544 }
1545
1546 /*
1547 * nfs4_inactive_thread - look for vnodes that need over-the-wire calls as
1548 * part of VOP_INACTIVE.
1549 */
1550
1551 void
1552 nfs4_inactive_thread(mntinfo4_t *mi)
1553 {
1554 struct nfs4_async_reqs *args;
1555 callb_cpr_t cprinfo;
1556 vfs_t *vfsp = mi->mi_vfsp;
1557
1558 CALLB_CPR_INIT(&cprinfo, &mi->mi_async_lock, callb_generic_cpr,
1559 "nfs4_inactive_thread");
1560
1561 for (;;) {
1562 mutex_enter(&mi->mi_async_lock);
1563 args = mi->mi_async_reqs[NFS4_INACTIVE];
1564 if (args == NULL) {
1565 mutex_enter(&mi->mi_lock);
1566 /*
1567 * We don't want to exit until the async manager is done
1568 * with its work; hence the check for mi_manager_thread
1569 * being NULL.
1570 *
1571 * The async manager thread will cv_broadcast() on
1572 * mi_inact_req_cv when it's done, at which point we'll
1573 * wake up and exit.
1574 */
1575 if (mi->mi_manager_thread == NULL)
1576 goto die;
1577 mi->mi_flags |= MI4_INACTIVE_IDLE;
1578 mutex_exit(&mi->mi_lock);
1579 cv_signal(&mi->mi_async_cv);
1580 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1581 cv_wait(&mi->mi_inact_req_cv, &mi->mi_async_lock);
1582 CALLB_CPR_SAFE_END(&cprinfo, &mi->mi_async_lock);
1583 mutex_exit(&mi->mi_async_lock);
1584 } else {
1585 mutex_enter(&mi->mi_lock);
1586 mi->mi_flags &= ~MI4_INACTIVE_IDLE;
1587 mutex_exit(&mi->mi_lock);
1588 mi->mi_async_reqs[NFS4_INACTIVE] = args->a_next;
1589 mutex_exit(&mi->mi_async_lock);
1590 nfs4_inactive_otw(args->a_vp, args->a_cred);
1591 crfree(args->a_cred);
1592 kmem_free(args, sizeof (*args));
1593 }
1594 }
1595 die:
1596 mutex_exit(&mi->mi_lock);
1597 mi->mi_inactive_thread = NULL;
1598 cv_signal(&mi->mi_async_cv);
1599
1600 /*
1601 * There is no explicit call to mutex_exit(&mi->mi_async_lock) since
1602 * CALLB_CPR_EXIT is actually responsible for releasing 'mi_async_lock'.
1603 */
1604 CALLB_CPR_EXIT(&cprinfo);
1605
1606 NFS4_DEBUG(nfs4_client_zone_debug, (CE_NOTE,
1607 "nfs4_inactive_thread exiting for vfs %p\n", (void *)vfsp));
1608
1609 MI4_RELE(mi);
1610 zthread_exit();
1611 /* NOTREACHED */
1612 }
1613
1614 /*
1615 * nfs_async_stop:
1616 * Wait for all outstanding putpage operations and the inactive thread to
1617 * complete; nfs4_async_stop_sig() without interruptibility.
1618 */
1619 void
1620 nfs4_async_stop(struct vfs *vfsp)
1621 {
1622 mntinfo4_t *mi = VFTOMI4(vfsp);
1623
1624 /*
1625 * Wait for all outstanding async operations to complete and for
1626 * worker threads to exit.
1627 */
1628 mutex_enter(&mi->mi_async_lock);
1629 mi->mi_max_threads = 0;
1630 NFS4_WAKEALL_ASYNC_WORKERS(mi->mi_async_work_cv);
1631 while (mi->mi_threads[NFS4_ASYNC_QUEUE] != 0 ||
1632 mi->mi_threads[NFS4_ASYNC_PGOPS_QUEUE] != 0)
1633 cv_wait(&mi->mi_async_cv, &mi->mi_async_lock);
1634
1635 /*
1636 * Wait for the inactive thread to finish doing what it's doing. It
1637 * won't exit until the last reference to the vfs_t goes away.
1638 */
1639 if (mi->mi_inactive_thread != NULL) {
1640 mutex_enter(&mi->mi_lock);
1641 while (!(mi->mi_flags & MI4_INACTIVE_IDLE) ||
1642 (mi->mi_async_reqs[NFS4_INACTIVE] != NULL)) {
1643 mutex_exit(&mi->mi_lock);
1644 cv_wait(&mi->mi_async_cv, &mi->mi_async_lock);
1645 mutex_enter(&mi->mi_lock);
1646 }
1647 mutex_exit(&mi->mi_lock);
1648 }
1649 mutex_exit(&mi->mi_async_lock);
1650 }
1651
1652 /*
1653 * nfs_async_stop_sig:
1654 * Wait for all outstanding putpage operations and the inactive thread to
1655 * complete. If a signal is delivered we will abort and return non-zero;
1656 * otherwise return 0. Since this routine is called from nfs4_unmount, we
1657 * need to make it interruptible.
1658 */
1659 int
1660 nfs4_async_stop_sig(struct vfs *vfsp)
1661 {
1662 mntinfo4_t *mi = VFTOMI4(vfsp);
1663 ushort_t omax;
1664 bool_t intr = FALSE;
1665
1666 /*
1667 * Wait for all outstanding putpage operations to complete and for
1668 * worker threads to exit.
1669 */
1670 mutex_enter(&mi->mi_async_lock);
1671 omax = mi->mi_max_threads;
1672 mi->mi_max_threads = 0;
1673 NFS4_WAKEALL_ASYNC_WORKERS(mi->mi_async_work_cv);
1674 while (mi->mi_threads[NFS4_ASYNC_QUEUE] != 0 ||
1675 mi->mi_threads[NFS4_ASYNC_PGOPS_QUEUE] != 0) {
1676 if (!cv_wait_sig(&mi->mi_async_cv, &mi->mi_async_lock)) {
1677 intr = TRUE;
1678 goto interrupted;
1679 }
1680 }
1681
1682 /*
1683 * Wait for the inactive thread to finish doing what it's doing. It
1684 * won't exit until the a last reference to the vfs_t goes away.
1685 */
1686 if (mi->mi_inactive_thread != NULL) {
1687 mutex_enter(&mi->mi_lock);
1688 while (!(mi->mi_flags & MI4_INACTIVE_IDLE) ||
1689 (mi->mi_async_reqs[NFS4_INACTIVE] != NULL)) {
1690 mutex_exit(&mi->mi_lock);
1691 if (!cv_wait_sig(&mi->mi_async_cv,
1692 &mi->mi_async_lock)) {
1693 intr = TRUE;
1694 goto interrupted;
1695 }
1696 mutex_enter(&mi->mi_lock);
1697 }
1698 mutex_exit(&mi->mi_lock);
1699 }
1700 interrupted:
1701 if (intr)
1702 mi->mi_max_threads = omax;
1703 mutex_exit(&mi->mi_async_lock);
1704
1705 return (intr);
1706 }
1707
1708 int
1709 nfs4_async_putapage(vnode_t *vp, page_t *pp, u_offset_t off, size_t len,
1710 int flags, cred_t *cr, int (*putapage)(vnode_t *, page_t *,
1711 u_offset_t, size_t, int, cred_t *))
1712 {
1713 rnode4_t *rp;
1714 mntinfo4_t *mi;
1715 struct nfs4_async_reqs *args;
1716
1717 ASSERT(flags & B_ASYNC);
1718 ASSERT(vp->v_vfsp != NULL);
1719
1720 rp = VTOR4(vp);
1721 ASSERT(rp->r_count > 0);
1722
1723 mi = VTOMI4(vp);
1724
1725 /*
1726 * If we can't allocate a request structure, do the putpage
1727 * operation synchronously in this thread's context.
1728 */
1729 if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL)
1730 goto noasync;
1731
1732 args->a_next = NULL;
1733 #ifdef DEBUG
1734 args->a_queuer = curthread;
1735 #endif
1736 VN_HOLD(vp);
1737 args->a_vp = vp;
1738 ASSERT(cr != NULL);
1739 crhold(cr);
1740 args->a_cred = cr;
1741 args->a_io = NFS4_PUTAPAGE;
1742 args->a_nfs4_putapage = putapage;
1743 args->a_nfs4_pp = pp;
1744 args->a_nfs4_off = off;
1745 args->a_nfs4_len = (uint_t)len;
1746 args->a_nfs4_flags = flags;
1747
1748 mutex_enter(&mi->mi_async_lock);
1749
1750 /*
1751 * If asyncio has been disabled, then make a synchronous request.
1752 * This check is done a second time in case async io was diabled
1753 * while this thread was blocked waiting for memory pressure to
1754 * reduce or for the queue to drain.
1755 */
1756 if (mi->mi_max_threads == 0) {
1757 mutex_exit(&mi->mi_async_lock);
1758
1759 VN_RELE(vp);
1760 crfree(cr);
1761 kmem_free(args, sizeof (*args));
1762 goto noasync;
1763 }
1764
1765 /*
1766 * Link request structure into the async list and
1767 * wakeup async thread to do the i/o.
1768 */
1769 if (mi->mi_async_reqs[NFS4_PUTAPAGE] == NULL) {
1770 mi->mi_async_reqs[NFS4_PUTAPAGE] = args;
1771 mi->mi_async_tail[NFS4_PUTAPAGE] = args;
1772 } else {
1773 mi->mi_async_tail[NFS4_PUTAPAGE]->a_next = args;
1774 mi->mi_async_tail[NFS4_PUTAPAGE] = args;
1775 }
1776
1777 mutex_enter(&rp->r_statelock);
1778 rp->r_count++;
1779 rp->r_awcount++;
1780 mutex_exit(&rp->r_statelock);
1781
1782 if (mi->mi_io_kstats) {
1783 mutex_enter(&mi->mi_lock);
1784 kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
1785 mutex_exit(&mi->mi_lock);
1786 }
1787
1788 mi->mi_async_req_count++;
1789 ASSERT(mi->mi_async_req_count != 0);
1790 cv_signal(&mi->mi_async_reqs_cv);
1791 mutex_exit(&mi->mi_async_lock);
1792 return (0);
1793
1794 noasync:
1795
1796 if (curproc == proc_pageout || curproc == proc_fsflush) {
1797 /*
1798 * If we get here in the context of the pageout/fsflush,
1799 * or we have run out of memory or we're attempting to
1800 * unmount we refuse to do a sync write, because this may
1801 * hang pageout/fsflush and the machine. In this case,
1802 * we just re-mark the page as dirty and punt on the page.
1803 *
1804 * Make sure B_FORCE isn't set. We can re-mark the
1805 * pages as dirty and unlock the pages in one swoop by
1806 * passing in B_ERROR to pvn_write_done(). However,
1807 * we should make sure B_FORCE isn't set - we don't
1808 * want the page tossed before it gets written out.
1809 */
1810 if (flags & B_FORCE)
1811 flags &= ~(B_INVAL | B_FORCE);
1812 pvn_write_done(pp, flags | B_ERROR);
1813 return (0);
1814 }
1815
1816 if (nfs_zone() != mi->mi_zone) {
1817 /*
1818 * So this was a cross-zone sync putpage.
1819 *
1820 * We pass in B_ERROR to pvn_write_done() to re-mark the pages
1821 * as dirty and unlock them.
1822 *
1823 * We don't want to clear B_FORCE here as the caller presumably
1824 * knows what they're doing if they set it.
1825 */
1826 pvn_write_done(pp, flags | B_ERROR);
1827 return (EPERM);
1828 }
1829 return ((*putapage)(vp, pp, off, len, flags, cr));
1830 }
1831
1832 int
1833 nfs4_async_pageio(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len,
1834 int flags, cred_t *cr, int (*pageio)(vnode_t *, page_t *, u_offset_t,
1835 size_t, int, cred_t *))
1836 {
1837 rnode4_t *rp;
1838 mntinfo4_t *mi;
1839 struct nfs4_async_reqs *args;
1840
1841 ASSERT(flags & B_ASYNC);
1842 ASSERT(vp->v_vfsp != NULL);
1843
1844 rp = VTOR4(vp);
1845 ASSERT(rp->r_count > 0);
1846
1847 mi = VTOMI4(vp);
1848
1849 /*
1850 * If we can't allocate a request structure, do the pageio
1851 * request synchronously in this thread's context.
1852 */
1853 if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL)
1854 goto noasync;
1855
1856 args->a_next = NULL;
1857 #ifdef DEBUG
1858 args->a_queuer = curthread;
1859 #endif
1860 VN_HOLD(vp);
1861 args->a_vp = vp;
1862 ASSERT(cr != NULL);
1863 crhold(cr);
1864 args->a_cred = cr;
1865 args->a_io = NFS4_PAGEIO;
1866 args->a_nfs4_pageio = pageio;
1867 args->a_nfs4_pp = pp;
1868 args->a_nfs4_off = io_off;
1869 args->a_nfs4_len = (uint_t)io_len;
1870 args->a_nfs4_flags = flags;
1871
1872 mutex_enter(&mi->mi_async_lock);
1873
1874 /*
1875 * If asyncio has been disabled, then make a synchronous request.
1876 * This check is done a second time in case async io was diabled
1877 * while this thread was blocked waiting for memory pressure to
1878 * reduce or for the queue to drain.
1879 */
1880 if (mi->mi_max_threads == 0) {
1881 mutex_exit(&mi->mi_async_lock);
1882
1883 VN_RELE(vp);
1884 crfree(cr);
1885 kmem_free(args, sizeof (*args));
1886 goto noasync;
1887 }
1888
1889 /*
1890 * Link request structure into the async list and
1891 * wakeup async thread to do the i/o.
1892 */
1893 if (mi->mi_async_reqs[NFS4_PAGEIO] == NULL) {
1894 mi->mi_async_reqs[NFS4_PAGEIO] = args;
1895 mi->mi_async_tail[NFS4_PAGEIO] = args;
1896 } else {
1897 mi->mi_async_tail[NFS4_PAGEIO]->a_next = args;
1898 mi->mi_async_tail[NFS4_PAGEIO] = args;
1899 }
1900
1901 mutex_enter(&rp->r_statelock);
1902 rp->r_count++;
1903 rp->r_awcount++;
1904 mutex_exit(&rp->r_statelock);
1905
1906 if (mi->mi_io_kstats) {
1907 mutex_enter(&mi->mi_lock);
1908 kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
1909 mutex_exit(&mi->mi_lock);
1910 }
1911
1912 mi->mi_async_req_count++;
1913 ASSERT(mi->mi_async_req_count != 0);
1914 cv_signal(&mi->mi_async_reqs_cv);
1915 mutex_exit(&mi->mi_async_lock);
1916 return (0);
1917
1918 noasync:
1919 /*
1920 * If we can't do it ASYNC, for reads we do nothing (but cleanup
1921 * the page list), for writes we do it synchronously, except for
1922 * proc_pageout/proc_fsflush as described below.
1923 */
1924 if (flags & B_READ) {
1925 pvn_read_done(pp, flags | B_ERROR);
1926 return (0);
1927 }
1928
1929 if (curproc == proc_pageout || curproc == proc_fsflush) {
1930 /*
1931 * If we get here in the context of the pageout/fsflush,
1932 * we refuse to do a sync write, because this may hang
1933 * pageout/fsflush (and the machine). In this case, we just
1934 * re-mark the page as dirty and punt on the page.
1935 *
1936 * Make sure B_FORCE isn't set. We can re-mark the
1937 * pages as dirty and unlock the pages in one swoop by
1938 * passing in B_ERROR to pvn_write_done(). However,
1939 * we should make sure B_FORCE isn't set - we don't
1940 * want the page tossed before it gets written out.
1941 */
1942 if (flags & B_FORCE)
1943 flags &= ~(B_INVAL | B_FORCE);
1944 pvn_write_done(pp, flags | B_ERROR);
1945 return (0);
1946 }
1947
1948 if (nfs_zone() != mi->mi_zone) {
1949 /*
1950 * So this was a cross-zone sync pageio. We pass in B_ERROR
1951 * to pvn_write_done() to re-mark the pages as dirty and unlock
1952 * them.
1953 *
1954 * We don't want to clear B_FORCE here as the caller presumably
1955 * knows what they're doing if they set it.
1956 */
1957 pvn_write_done(pp, flags | B_ERROR);
1958 return (EPERM);
1959 }
1960 return ((*pageio)(vp, pp, io_off, io_len, flags, cr));
1961 }
1962
1963 void
1964 nfs4_async_readdir(vnode_t *vp, rddir4_cache *rdc, cred_t *cr,
1965 int (*readdir)(vnode_t *, rddir4_cache *, cred_t *))
1966 {
1967 rnode4_t *rp;
1968 mntinfo4_t *mi;
1969 struct nfs4_async_reqs *args;
1970
1971 rp = VTOR4(vp);
1972 ASSERT(rp->r_freef == NULL);
1973
1974 mi = VTOMI4(vp);
1975
1976 /*
1977 * If we can't allocate a request structure, skip the readdir.
1978 */
1979 if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL)
1980 goto noasync;
1981
1982 args->a_next = NULL;
1983 #ifdef DEBUG
1984 args->a_queuer = curthread;
1985 #endif
1986 VN_HOLD(vp);
1987 args->a_vp = vp;
1988 ASSERT(cr != NULL);
1989 crhold(cr);
1990 args->a_cred = cr;
1991 args->a_io = NFS4_READDIR;
1992 args->a_nfs4_readdir = readdir;
1993 args->a_nfs4_rdc = rdc;
1994
1995 mutex_enter(&mi->mi_async_lock);
1996
1997 /*
1998 * If asyncio has been disabled, then skip this request
1999 */
2000 if (mi->mi_max_threads == 0) {
2001 mutex_exit(&mi->mi_async_lock);
2002
2003 VN_RELE(vp);
2004 crfree(cr);
2005 kmem_free(args, sizeof (*args));
2006 goto noasync;
2007 }
2008
2009 /*
2010 * Link request structure into the async list and
2011 * wakeup async thread to do the i/o.
2012 */
2013 if (mi->mi_async_reqs[NFS4_READDIR] == NULL) {
2014 mi->mi_async_reqs[NFS4_READDIR] = args;
2015 mi->mi_async_tail[NFS4_READDIR] = args;
2016 } else {
2017 mi->mi_async_tail[NFS4_READDIR]->a_next = args;
2018 mi->mi_async_tail[NFS4_READDIR] = args;
2019 }
2020
2021 mutex_enter(&rp->r_statelock);
2022 rp->r_count++;
2023 mutex_exit(&rp->r_statelock);
2024
2025 if (mi->mi_io_kstats) {
2026 mutex_enter(&mi->mi_lock);
2027 kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
2028 mutex_exit(&mi->mi_lock);
2029 }
2030
2031 mi->mi_async_req_count++;
2032 ASSERT(mi->mi_async_req_count != 0);
2033 cv_signal(&mi->mi_async_reqs_cv);
2034 mutex_exit(&mi->mi_async_lock);
2035 return;
2036
2037 noasync:
2038 mutex_enter(&rp->r_statelock);
2039 rdc->entries = NULL;
2040 /*
2041 * Indicate that no one is trying to fill this entry and
2042 * it still needs to be filled.
2043 */
2044 rdc->flags &= ~RDDIR;
2045 rdc->flags |= RDDIRREQ;
2046 rddir4_cache_rele(rp, rdc);
2047 mutex_exit(&rp->r_statelock);
2048 }
2049
2050 void
2051 nfs4_async_commit(vnode_t *vp, page_t *plist, offset3 offset, count3 count,
2052 cred_t *cr, void (*commit)(vnode_t *, page_t *, offset3, count3,
2053 cred_t *))
2054 {
2055 rnode4_t *rp;
2056 mntinfo4_t *mi;
2057 struct nfs4_async_reqs *args;
2058 page_t *pp;
2059
2060 rp = VTOR4(vp);
2061 mi = VTOMI4(vp);
2062
2063 /*
2064 * If we can't allocate a request structure, do the commit
2065 * operation synchronously in this thread's context.
2066 */
2067 if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL)
2068 goto noasync;
2069
2070 args->a_next = NULL;
2071 #ifdef DEBUG
2072 args->a_queuer = curthread;
2073 #endif
2074 VN_HOLD(vp);
2075 args->a_vp = vp;
2076 ASSERT(cr != NULL);
2077 crhold(cr);
2078 args->a_cred = cr;
2079 args->a_io = NFS4_COMMIT;
2080 args->a_nfs4_commit = commit;
2081 args->a_nfs4_plist = plist;
2082 args->a_nfs4_offset = offset;
2083 args->a_nfs4_count = count;
2084
2085 mutex_enter(&mi->mi_async_lock);
2086
2087 /*
2088 * If asyncio has been disabled, then make a synchronous request.
2089 * This check is done a second time in case async io was diabled
2090 * while this thread was blocked waiting for memory pressure to
2091 * reduce or for the queue to drain.
2092 */
2093 if (mi->mi_max_threads == 0) {
2094 mutex_exit(&mi->mi_async_lock);
2095
2096 VN_RELE(vp);
2097 crfree(cr);
2098 kmem_free(args, sizeof (*args));
2099 goto noasync;
2100 }
2101
2102 /*
2103 * Link request structure into the async list and
2104 * wakeup async thread to do the i/o.
2105 */
2106 if (mi->mi_async_reqs[NFS4_COMMIT] == NULL) {
2107 mi->mi_async_reqs[NFS4_COMMIT] = args;
2108 mi->mi_async_tail[NFS4_COMMIT] = args;
2109 } else {
2110 mi->mi_async_tail[NFS4_COMMIT]->a_next = args;
2111 mi->mi_async_tail[NFS4_COMMIT] = args;
2112 }
2113
2114 mutex_enter(&rp->r_statelock);
2115 rp->r_count++;
2116 mutex_exit(&rp->r_statelock);
2117
2118 if (mi->mi_io_kstats) {
2119 mutex_enter(&mi->mi_lock);
2120 kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
2121 mutex_exit(&mi->mi_lock);
2122 }
2123
2124 mi->mi_async_req_count++;
2125 ASSERT(mi->mi_async_req_count != 0);
2126 cv_signal(&mi->mi_async_reqs_cv);
2127 mutex_exit(&mi->mi_async_lock);
2128 return;
2129
2130 noasync:
2131 if (curproc == proc_pageout || curproc == proc_fsflush ||
2132 nfs_zone() != mi->mi_zone) {
2133 while (plist != NULL) {
2134 pp = plist;
2135 page_sub(&plist, pp);
2136 pp->p_fsdata = C_COMMIT;
2137 page_unlock(pp);
2138 }
2139 return;
2140 }
2141 (*commit)(vp, plist, offset, count, cr);
2142 }
2143
2144 /*
2145 * nfs4_async_inactive - hand off a VOP_INACTIVE call to a thread. The
2146 * reference to the vnode is handed over to the thread; the caller should
2147 * no longer refer to the vnode.
2148 *
2149 * Unlike most of the async routines, this handoff is needed for
2150 * correctness reasons, not just performance. So doing operations in the
2151 * context of the current thread is not an option.
2152 */
2153 void
2154 nfs4_async_inactive(vnode_t *vp, cred_t *cr)
2155 {
2156 mntinfo4_t *mi;
2157 struct nfs4_async_reqs *args;
2158 boolean_t signal_inactive_thread = B_FALSE;
2159
2160 mi = VTOMI4(vp);
2161
2162 args = kmem_alloc(sizeof (*args), KM_SLEEP);
2163 args->a_next = NULL;
2164 #ifdef DEBUG
2165 args->a_queuer = curthread;
2166 #endif
2167 args->a_vp = vp;
2168 ASSERT(cr != NULL);
2169 crhold(cr);
2170 args->a_cred = cr;
2171 args->a_io = NFS4_INACTIVE;
2172
2173 /*
2174 * Note that we don't check mi->mi_max_threads here, since we
2175 * *need* to get rid of this vnode regardless of whether someone
2176 * set nfs4_max_threads to zero in /etc/system.
2177 *
2178 * The manager thread knows about this and is willing to create
2179 * at least one thread to accommodate us.
2180 */
2181 mutex_enter(&mi->mi_async_lock);
2182 if (mi->mi_inactive_thread == NULL) {
2183 rnode4_t *rp;
2184 vnode_t *unldvp = NULL;
2185 char *unlname;
2186 cred_t *unlcred;
2187
2188 mutex_exit(&mi->mi_async_lock);
2189 /*
2190 * We just need to free up the memory associated with the
2191 * vnode, which can be safely done from within the current
2192 * context.
2193 */
2194 crfree(cr); /* drop our reference */
2195 kmem_free(args, sizeof (*args));
2196 rp = VTOR4(vp);
2197 mutex_enter(&rp->r_statelock);
2198 if (rp->r_unldvp != NULL) {
2199 unldvp = rp->r_unldvp;
2200 rp->r_unldvp = NULL;
2201 unlname = rp->r_unlname;
2202 rp->r_unlname = NULL;
2203 unlcred = rp->r_unlcred;
2204 rp->r_unlcred = NULL;
2205 }
2206 mutex_exit(&rp->r_statelock);
2207 /*
2208 * No need to explicitly throw away any cached pages. The
2209 * eventual r4inactive() will attempt a synchronous
2210 * VOP_PUTPAGE() which will immediately fail since the request
2211 * is coming from the wrong zone, and then will proceed to call
2212 * nfs4_invalidate_pages() which will clean things up for us.
2213 *
2214 * Throw away the delegation here so rp4_addfree()'s attempt to
2215 * return any existing delegations becomes a no-op.
2216 */
2217 if (rp->r_deleg_type != OPEN_DELEGATE_NONE) {
2218 (void) nfs_rw_enter_sig(&mi->mi_recovlock, RW_READER,
2219 FALSE);
2220 (void) nfs4delegreturn(rp, NFS4_DR_DISCARD);
2221 nfs_rw_exit(&mi->mi_recovlock);
2222 }
2223 nfs4_clear_open_streams(rp);
2224
2225 rp4_addfree(rp, cr);
2226 if (unldvp != NULL) {
2227 kmem_free(unlname, MAXNAMELEN);
2228 VN_RELE(unldvp);
2229 crfree(unlcred);
2230 }
2231 return;
2232 }
2233
2234 if (mi->mi_manager_thread == NULL) {
2235 /*
2236 * We want to talk to the inactive thread.
2237 */
2238 signal_inactive_thread = B_TRUE;
2239 }
2240
2241 /*
2242 * Enqueue the vnode and wake up either the special thread (empty
2243 * list) or an async thread.
2244 */
2245 if (mi->mi_async_reqs[NFS4_INACTIVE] == NULL) {
2246 mi->mi_async_reqs[NFS4_INACTIVE] = args;
2247 mi->mi_async_tail[NFS4_INACTIVE] = args;
2248 signal_inactive_thread = B_TRUE;
2249 } else {
2250 mi->mi_async_tail[NFS4_INACTIVE]->a_next = args;
2251 mi->mi_async_tail[NFS4_INACTIVE] = args;
2252 }
2253 if (signal_inactive_thread) {
2254 cv_signal(&mi->mi_inact_req_cv);
2255 } else {
2256 mi->mi_async_req_count++;
2257 ASSERT(mi->mi_async_req_count != 0);
2258 cv_signal(&mi->mi_async_reqs_cv);
2259 }
2260
2261 mutex_exit(&mi->mi_async_lock);
2262 }
2263
2264 int
2265 writerp4(rnode4_t *rp, caddr_t base, int tcount, struct uio *uio, int pgcreated)
2266 {
2267 int pagecreate;
2268 int n;
2269 int saved_n;
2270 caddr_t saved_base;
2271 u_offset_t offset;
2272 int error;
2273 int sm_error;
2274 vnode_t *vp = RTOV(rp);
2275
2276 ASSERT(tcount <= MAXBSIZE && tcount <= uio->uio_resid);
2277 ASSERT(nfs_rw_lock_held(&rp->r_rwlock, RW_WRITER));
2278 if (!vpm_enable) {
2279 ASSERT(((uintptr_t)base & MAXBOFFSET) + tcount <= MAXBSIZE);
2280 }
2281
2282 /*
2283 * Move bytes in at most PAGESIZE chunks. We must avoid
2284 * spanning pages in uiomove() because page faults may cause
2285 * the cache to be invalidated out from under us. The r_size is not
2286 * updated until after the uiomove. If we push the last page of a
2287 * file before r_size is correct, we will lose the data written past
2288 * the current (and invalid) r_size.
2289 */
2290 do {
2291 offset = uio->uio_loffset;
2292 pagecreate = 0;
2293
2294 /*
2295 * n is the number of bytes required to satisfy the request
2296 * or the number of bytes to fill out the page.
2297 */
2298 n = (int)MIN((PAGESIZE - (offset & PAGEOFFSET)), tcount);
2299
2300 /*
2301 * Check to see if we can skip reading in the page
2302 * and just allocate the memory. We can do this
2303 * if we are going to rewrite the entire mapping
2304 * or if we are going to write to or beyond the current
2305 * end of file from the beginning of the mapping.
2306 *
2307 * The read of r_size is now protected by r_statelock.
2308 */
2309 mutex_enter(&rp->r_statelock);
2310 /*
2311 * When pgcreated is nonzero the caller has already done
2312 * a segmap_getmapflt with forcefault 0 and S_WRITE. With
2313 * segkpm this means we already have at least one page
2314 * created and mapped at base.
2315 */
2316 pagecreate = pgcreated ||
2317 ((offset & PAGEOFFSET) == 0 &&
2318 (n == PAGESIZE || ((offset + n) >= rp->r_size)));
2319
2320 mutex_exit(&rp->r_statelock);
2321
2322 if (!vpm_enable && pagecreate) {
2323 /*
2324 * The last argument tells segmap_pagecreate() to
2325 * always lock the page, as opposed to sometimes
2326 * returning with the page locked. This way we avoid a
2327 * fault on the ensuing uiomove(), but also
2328 * more importantly (to fix bug 1094402) we can
2329 * call segmap_fault() to unlock the page in all
2330 * cases. An alternative would be to modify
2331 * segmap_pagecreate() to tell us when it is
2332 * locking a page, but that's a fairly major
2333 * interface change.
2334 */
2335 if (pgcreated == 0)
2336 (void) segmap_pagecreate(segkmap, base,
2337 (uint_t)n, 1);
2338 saved_base = base;
2339 saved_n = n;
2340 }
2341
2342 /*
2343 * The number of bytes of data in the last page can not
2344 * be accurately be determined while page is being
2345 * uiomove'd to and the size of the file being updated.
2346 * Thus, inform threads which need to know accurately
2347 * how much data is in the last page of the file. They
2348 * will not do the i/o immediately, but will arrange for
2349 * the i/o to happen later when this modify operation
2350 * will have finished.
2351 */
2352 ASSERT(!(rp->r_flags & R4MODINPROGRESS));
2353 mutex_enter(&rp->r_statelock);
2354 rp->r_flags |= R4MODINPROGRESS;
2355 rp->r_modaddr = (offset & MAXBMASK);
2356 mutex_exit(&rp->r_statelock);
2357
2358 if (vpm_enable) {
2359 /*
2360 * Copy data. If new pages are created, part of
2361 * the page that is not written will be initizliazed
2362 * with zeros.
2363 */
2364 error = vpm_data_copy(vp, offset, n, uio,
2365 !pagecreate, NULL, 0, S_WRITE);
2366 } else {
2367 error = uiomove(base, n, UIO_WRITE, uio);
2368 }
2369
2370 /*
2371 * r_size is the maximum number of
2372 * bytes known to be in the file.
2373 * Make sure it is at least as high as the
2374 * first unwritten byte pointed to by uio_loffset.
2375 */
2376 mutex_enter(&rp->r_statelock);
2377 if (rp->r_size < uio->uio_loffset)
2378 rp->r_size = uio->uio_loffset;
2379 rp->r_flags &= ~R4MODINPROGRESS;
2380 rp->r_flags |= R4DIRTY;
2381 mutex_exit(&rp->r_statelock);
2382
2383 /* n = # of bytes written */
2384 n = (int)(uio->uio_loffset - offset);
2385
2386 if (!vpm_enable) {
2387 base += n;
2388 }
2389
2390 tcount -= n;
2391 /*
2392 * If we created pages w/o initializing them completely,
2393 * we need to zero the part that wasn't set up.
2394 * This happens on a most EOF write cases and if
2395 * we had some sort of error during the uiomove.
2396 */
2397 if (!vpm_enable && pagecreate) {
2398 if ((uio->uio_loffset & PAGEOFFSET) || n == 0)
2399 (void) kzero(base, PAGESIZE - n);
2400
2401 if (pgcreated) {
2402 /*
2403 * Caller is responsible for this page,
2404 * it was not created in this loop.
2405 */
2406 pgcreated = 0;
2407 } else {
2408 /*
2409 * For bug 1094402: segmap_pagecreate locks
2410 * page. Unlock it. This also unlocks the
2411 * pages allocated by page_create_va() in
2412 * segmap_pagecreate().
2413 */
2414 sm_error = segmap_fault(kas.a_hat, segkmap,
2415 saved_base, saved_n,
2416 F_SOFTUNLOCK, S_WRITE);
2417 if (error == 0)
2418 error = sm_error;
2419 }
2420 }
2421 } while (tcount > 0 && error == 0);
2422
2423 return (error);
2424 }
2425
2426 int
2427 nfs4_putpages(vnode_t *vp, u_offset_t off, size_t len, int flags, cred_t *cr)
2428 {
2429 rnode4_t *rp;
2430 page_t *pp;
2431 u_offset_t eoff;
2432 u_offset_t io_off;
2433 size_t io_len;
2434 int error;
2435 int rdirty;
2436 int err;
2437
2438 rp = VTOR4(vp);
2439 ASSERT(rp->r_count > 0);
2440
2441 if (!nfs4_has_pages(vp))
2442 return (0);
2443
2444 ASSERT(vp->v_type != VCHR);
2445
2446 /*
2447 * If R4OUTOFSPACE is set, then all writes turn into B_INVAL
2448 * writes. B_FORCE is set to force the VM system to actually
2449 * invalidate the pages, even if the i/o failed. The pages
2450 * need to get invalidated because they can't be written out
2451 * because there isn't any space left on either the server's
2452 * file system or in the user's disk quota. The B_FREE bit
2453 * is cleared to avoid confusion as to whether this is a
2454 * request to place the page on the freelist or to destroy
2455 * it.
2456 */
2457 if ((rp->r_flags & R4OUTOFSPACE) ||
2458 (vp->v_vfsp->vfs_flag & VFS_UNMOUNTED))
2459 flags = (flags & ~B_FREE) | B_INVAL | B_FORCE;
2460
2461 if (len == 0) {
2462 /*
2463 * If doing a full file synchronous operation, then clear
2464 * the R4DIRTY bit. If a page gets dirtied while the flush
2465 * is happening, then R4DIRTY will get set again. The
2466 * R4DIRTY bit must get cleared before the flush so that
2467 * we don't lose this information.
2468 *
2469 * If there are no full file async write operations
2470 * pending and RDIRTY bit is set, clear it.
2471 */
2472 if (off == (u_offset_t)0 &&
2473 !(flags & B_ASYNC) &&
2474 (rp->r_flags & R4DIRTY)) {
2475 mutex_enter(&rp->r_statelock);
2476 rdirty = (rp->r_flags & R4DIRTY);
2477 rp->r_flags &= ~R4DIRTY;
2478 mutex_exit(&rp->r_statelock);
2479 } else if (flags & B_ASYNC && off == (u_offset_t)0) {
2480 mutex_enter(&rp->r_statelock);
2481 if (rp->r_flags & R4DIRTY && rp->r_awcount == 0) {
2482 rdirty = (rp->r_flags & R4DIRTY);
2483 rp->r_flags &= ~R4DIRTY;
2484 }
2485 mutex_exit(&rp->r_statelock);
2486 } else
2487 rdirty = 0;
2488
2489 /*
2490 * Search the entire vp list for pages >= off, and flush
2491 * the dirty pages.
2492 */
2493 error = pvn_vplist_dirty(vp, off, rp->r_putapage,
2494 flags, cr);
2495
2496 /*
2497 * If an error occurred and the file was marked as dirty
2498 * before and we aren't forcibly invalidating pages, then
2499 * reset the R4DIRTY flag.
2500 */
2501 if (error && rdirty &&
2502 (flags & (B_INVAL | B_FORCE)) != (B_INVAL | B_FORCE)) {
2503 mutex_enter(&rp->r_statelock);
2504 rp->r_flags |= R4DIRTY;
2505 mutex_exit(&rp->r_statelock);
2506 }
2507 } else {
2508 /*
2509 * Do a range from [off...off + len) looking for pages
2510 * to deal with.
2511 */
2512 error = 0;
2513 io_len = 0;
2514 eoff = off + len;
2515 mutex_enter(&rp->r_statelock);
2516 for (io_off = off; io_off < eoff && io_off < rp->r_size;
2517 io_off += io_len) {
2518 mutex_exit(&rp->r_statelock);
2519 /*
2520 * If we are not invalidating, synchronously
2521 * freeing or writing pages use the routine
2522 * page_lookup_nowait() to prevent reclaiming
2523 * them from the free list.
2524 */
2525 if ((flags & B_INVAL) || !(flags & B_ASYNC)) {
2526 pp = page_lookup(vp, io_off,
2527 (flags & (B_INVAL | B_FREE)) ?
2528 SE_EXCL : SE_SHARED);
2529 } else {
2530 pp = page_lookup_nowait(vp, io_off,
2531 (flags & B_FREE) ? SE_EXCL : SE_SHARED);
2532 }
2533
2534 if (pp == NULL || !pvn_getdirty(pp, flags))
2535 io_len = PAGESIZE;
2536 else {
2537 err = (*rp->r_putapage)(vp, pp, &io_off,
2538 &io_len, flags, cr);
2539 if (!error)
2540 error = err;
2541 /*
2542 * "io_off" and "io_len" are returned as
2543 * the range of pages we actually wrote.
2544 * This allows us to skip ahead more quickly
2545 * since several pages may've been dealt
2546 * with by this iteration of the loop.
2547 */
2548 }
2549 mutex_enter(&rp->r_statelock);
2550 }
2551 mutex_exit(&rp->r_statelock);
2552 }
2553
2554 return (error);
2555 }
2556
2557 void
2558 nfs4_invalidate_pages(vnode_t *vp, u_offset_t off, cred_t *cr)
2559 {
2560 rnode4_t *rp;
2561
2562 rp = VTOR4(vp);
2563 if (IS_SHADOW(vp, rp))
2564 vp = RTOV4(rp);
2565 mutex_enter(&rp->r_statelock);
2566 while (rp->r_flags & R4TRUNCATE)
2567 cv_wait(&rp->r_cv, &rp->r_statelock);
2568 rp->r_flags |= R4TRUNCATE;
2569 if (off == (u_offset_t)0) {
2570 rp->r_flags &= ~R4DIRTY;
2571 if (!(rp->r_flags & R4STALE))
2572 rp->r_error = 0;
2573 }
2574 rp->r_truncaddr = off;
2575 mutex_exit(&rp->r_statelock);
2576 (void) pvn_vplist_dirty(vp, off, rp->r_putapage,
2577 B_INVAL | B_TRUNC, cr);
2578 mutex_enter(&rp->r_statelock);
2579 rp->r_flags &= ~R4TRUNCATE;
2580 cv_broadcast(&rp->r_cv);
2581 mutex_exit(&rp->r_statelock);
2582 }
2583
2584 static int
2585 nfs4_mnt_kstat_update(kstat_t *ksp, int rw)
2586 {
2587 mntinfo4_t *mi;
2588 struct mntinfo_kstat *mik;
2589 vfs_t *vfsp;
2590
2591 /* this is a read-only kstat. Bail out on a write */
2592 if (rw == KSTAT_WRITE)
2593 return (EACCES);
2594
2595
2596 /*
2597 * We don't want to wait here as kstat_chain_lock could be held by
2598 * dounmount(). dounmount() takes vfs_reflock before the chain lock
2599 * and thus could lead to a deadlock.
2600 */
2601 vfsp = (struct vfs *)ksp->ks_private;
2602
2603 mi = VFTOMI4(vfsp);
2604 mik = (struct mntinfo_kstat *)ksp->ks_data;
2605
2606 (void) strcpy(mik->mik_proto, mi->mi_curr_serv->sv_knconf->knc_proto);
2607
2608 mik->mik_vers = (uint32_t)mi->mi_vers;
2609 mik->mik_flags = mi->mi_flags;
2610 /*
2611 * The sv_secdata holds the flavor the client specifies.
2612 * If the client uses default and a security negotiation
2613 * occurs, sv_currsec will point to the current flavor
2614 * selected from the server flavor list.
2615 * sv_currsec is NULL if no security negotiation takes place.
2616 */
2617 mik->mik_secmod = mi->mi_curr_serv->sv_currsec ?
2618 mi->mi_curr_serv->sv_currsec->secmod :
2619 mi->mi_curr_serv->sv_secdata->secmod;
2620 mik->mik_curread = (uint32_t)mi->mi_curread;
2621 mik->mik_curwrite = (uint32_t)mi->mi_curwrite;
2622 mik->mik_retrans = mi->mi_retrans;
2623 mik->mik_timeo = mi->mi_timeo;
2624 mik->mik_acregmin = HR2SEC(mi->mi_acregmin);
2625 mik->mik_acregmax = HR2SEC(mi->mi_acregmax);
2626 mik->mik_acdirmin = HR2SEC(mi->mi_acdirmin);
2627 mik->mik_acdirmax = HR2SEC(mi->mi_acdirmax);
2628 mik->mik_noresponse = (uint32_t)mi->mi_noresponse;
2629 mik->mik_failover = (uint32_t)mi->mi_failover;
2630 mik->mik_remap = (uint32_t)mi->mi_remap;
2631
2632 (void) strcpy(mik->mik_curserver, mi->mi_curr_serv->sv_hostname);
2633
2634 return (0);
2635 }
2636
2637 void
2638 nfs4_mnt_kstat_init(struct vfs *vfsp)
2639 {
2640 mntinfo4_t *mi = VFTOMI4(vfsp);
2641
2642 /*
2643 * PSARC 2001/697 Contract Private Interface
2644 * All nfs kstats are under SunMC contract
2645 * Please refer to the PSARC listed above and contact
2646 * SunMC before making any changes!
2647 *
2648 * Changes must be reviewed by Solaris File Sharing
2649 * Changes must be communicated to contract-2001-697@sun.com
2650 *
2651 */
2652
2653 mi->mi_io_kstats = kstat_create_zone("nfs", getminor(vfsp->vfs_dev),
2654 NULL, "nfs", KSTAT_TYPE_IO, 1, 0, mi->mi_zone->zone_id);
2655 if (mi->mi_io_kstats) {
2656 if (mi->mi_zone->zone_id != GLOBAL_ZONEID)
2657 kstat_zone_add(mi->mi_io_kstats, GLOBAL_ZONEID);
2658 mi->mi_io_kstats->ks_lock = &mi->mi_lock;
2659 kstat_install(mi->mi_io_kstats);
2660 }
2661
2662 if ((mi->mi_ro_kstats = kstat_create_zone("nfs",
2663 getminor(vfsp->vfs_dev), "mntinfo", "misc", KSTAT_TYPE_RAW,
2664 sizeof (struct mntinfo_kstat), 0, mi->mi_zone->zone_id)) != NULL) {
2665 if (mi->mi_zone->zone_id != GLOBAL_ZONEID)
2666 kstat_zone_add(mi->mi_ro_kstats, GLOBAL_ZONEID);
2667 mi->mi_ro_kstats->ks_update = nfs4_mnt_kstat_update;
2668 mi->mi_ro_kstats->ks_private = (void *)vfsp;
2669 kstat_install(mi->mi_ro_kstats);
2670 }
2671
2672 nfs4_mnt_recov_kstat_init(vfsp);
2673 }
2674
2675 void
2676 nfs4_write_error(vnode_t *vp, int error, cred_t *cr)
2677 {
2678 mntinfo4_t *mi;
2679 clock_t now = ddi_get_lbolt();
2680
2681 mi = VTOMI4(vp);
2682 /*
2683 * In case of forced unmount, do not print any messages
2684 * since it can flood the console with error messages.
2685 */
2686 if (mi->mi_vfsp->vfs_flag & VFS_UNMOUNTED)
2687 return;
2688
2689 /*
2690 * If the mount point is dead, not recoverable, do not
2691 * print error messages that can flood the console.
2692 */
2693 if (mi->mi_flags & MI4_RECOV_FAIL)
2694 return;
2695
2696 /*
2697 * No use in flooding the console with ENOSPC
2698 * messages from the same file system.
2699 */
2700 if ((error != ENOSPC && error != EDQUOT) ||
2701 now - mi->mi_printftime > 0) {
2702 zoneid_t zoneid = mi->mi_zone->zone_id;
2703
2704 #ifdef DEBUG
2705 nfs_perror(error, "NFS%ld write error on host %s: %m.\n",
2706 mi->mi_vers, VTOR4(vp)->r_server->sv_hostname, NULL);
2707 #else
2708 nfs_perror(error, "NFS write error on host %s: %m.\n",
2709 VTOR4(vp)->r_server->sv_hostname, NULL);
2710 #endif
2711 if (error == ENOSPC || error == EDQUOT) {
2712 zcmn_err(zoneid, CE_CONT,
2713 "^File: userid=%d, groupid=%d\n",
2714 crgetuid(cr), crgetgid(cr));
2715 if (crgetuid(curthread->t_cred) != crgetuid(cr) ||
2716 crgetgid(curthread->t_cred) != crgetgid(cr)) {
2717 zcmn_err(zoneid, CE_CONT,
2718 "^User: userid=%d, groupid=%d\n",
2719 crgetuid(curthread->t_cred),
2720 crgetgid(curthread->t_cred));
2721 }
2722 mi->mi_printftime = now +
2723 nfs_write_error_interval * hz;
2724 }
2725 sfh4_printfhandle(VTOR4(vp)->r_fh);
2726 #ifdef DEBUG
2727 if (error == EACCES) {
2728 zcmn_err(zoneid, CE_CONT,
2729 "nfs_bio: cred is%s kcred\n",
2730 cr == kcred ? "" : " not");
2731 }
2732 #endif
2733 }
2734 }
2735
2736 /*
2737 * Return non-zero if the given file can be safely memory mapped. Locks
2738 * are safe if whole-file (length and offset are both zero).
2739 */
2740
2741 #define SAFE_LOCK(flk) ((flk).l_start == 0 && (flk).l_len == 0)
2742
2743 static int
2744 nfs4_safemap(const vnode_t *vp)
2745 {
2746 locklist_t *llp, *next_llp;
2747 int safe = 1;
2748 rnode4_t *rp = VTOR4(vp);
2749
2750 ASSERT(nfs_rw_lock_held(&rp->r_lkserlock, RW_WRITER));
2751
2752 NFS4_DEBUG(nfs4_client_map_debug, (CE_NOTE, "nfs4_safemap: "
2753 "vp = %p", (void *)vp));
2754
2755 /*
2756 * Review all the locks for the vnode, both ones that have been
2757 * acquired and ones that are pending. We assume that
2758 * flk_active_locks_for_vp() has merged any locks that can be
2759 * merged (so that if a process has the entire file locked, it is
2760 * represented as a single lock).
2761 *
2762 * Note that we can't bail out of the loop if we find a non-safe
2763 * lock, because we have to free all the elements in the llp list.
2764 * We might be able to speed up this code slightly by not looking
2765 * at each lock's l_start and l_len fields once we've found a
2766 * non-safe lock.
2767 */
2768
2769 llp = flk_active_locks_for_vp(vp);
2770 while (llp) {
2771 NFS4_DEBUG(nfs4_client_map_debug, (CE_NOTE,
2772 "nfs4_safemap: active lock (%" PRId64 ", %" PRId64 ")",
2773 llp->ll_flock.l_start, llp->ll_flock.l_len));
2774 if (!SAFE_LOCK(llp->ll_flock)) {
2775 safe = 0;
2776 NFS4_DEBUG(nfs4_client_map_debug, (CE_NOTE,
2777 "nfs4_safemap: unsafe active lock (%" PRId64
2778 ", %" PRId64 ")", llp->ll_flock.l_start,
2779 llp->ll_flock.l_len));
2780 }
2781 next_llp = llp->ll_next;
2782 VN_RELE(llp->ll_vp);
2783 kmem_free(llp, sizeof (*llp));
2784 llp = next_llp;
2785 }
2786
2787 NFS4_DEBUG(nfs4_client_map_debug, (CE_NOTE, "nfs4_safemap: %s",
2788 safe ? "safe" : "unsafe"));
2789 return (safe);
2790 }
2791
2792 /*
2793 * Return whether there is a lost LOCK or LOCKU queued up for the given
2794 * file that would make an mmap request unsafe. cf. nfs4_safemap().
2795 */
2796
2797 bool_t
2798 nfs4_map_lost_lock_conflict(vnode_t *vp)
2799 {
2800 bool_t conflict = FALSE;
2801 nfs4_lost_rqst_t *lrp;
2802 mntinfo4_t *mi = VTOMI4(vp);
2803
2804 mutex_enter(&mi->mi_lock);
2805 for (lrp = list_head(&mi->mi_lost_state); lrp != NULL;
2806 lrp = list_next(&mi->mi_lost_state, lrp)) {
2807 if (lrp->lr_op != OP_LOCK && lrp->lr_op != OP_LOCKU)
2808 continue;
2809 ASSERT(lrp->lr_vp != NULL);
2810 if (!VOP_CMP(lrp->lr_vp, vp, NULL))
2811 continue; /* different file */
2812 if (!SAFE_LOCK(*lrp->lr_flk)) {
2813 conflict = TRUE;
2814 break;
2815 }
2816 }
2817
2818 mutex_exit(&mi->mi_lock);
2819 return (conflict);
2820 }
2821
2822 /*
2823 * nfs_lockcompletion:
2824 *
2825 * If the vnode has a lock that makes it unsafe to cache the file, mark it
2826 * as non cachable (set VNOCACHE bit).
2827 */
2828
2829 void
2830 nfs4_lockcompletion(vnode_t *vp, int cmd)
2831 {
2832 rnode4_t *rp = VTOR4(vp);
2833
2834 ASSERT(nfs_rw_lock_held(&rp->r_lkserlock, RW_WRITER));
2835 ASSERT(!IS_SHADOW(vp, rp));
2836
2837 if (cmd == F_SETLK || cmd == F_SETLKW) {
2838
2839 if (!nfs4_safemap(vp)) {
2840 mutex_enter(&vp->v_lock);
2841 vp->v_flag |= VNOCACHE;
2842 mutex_exit(&vp->v_lock);
2843 } else {
2844 mutex_enter(&vp->v_lock);
2845 vp->v_flag &= ~VNOCACHE;
2846 mutex_exit(&vp->v_lock);
2847 }
2848 }
2849 /*
2850 * The cached attributes of the file are stale after acquiring
2851 * the lock on the file. They were updated when the file was
2852 * opened, but not updated when the lock was acquired. Therefore the
2853 * cached attributes are invalidated after the lock is obtained.
2854 */
2855 PURGE_ATTRCACHE4(vp);
2856 }
2857
2858 /* ARGSUSED */
2859 static void *
2860 nfs4_mi_init(zoneid_t zoneid)
2861 {
2862 struct mi4_globals *mig;
2863
2864 mig = kmem_alloc(sizeof (*mig), KM_SLEEP);
2865 mutex_init(&mig->mig_lock, NULL, MUTEX_DEFAULT, NULL);
2866 list_create(&mig->mig_list, sizeof (mntinfo4_t),
2867 offsetof(mntinfo4_t, mi_zone_node));
2868 mig->mig_destructor_called = B_FALSE;
2869 return (mig);
2870 }
2871
2872 /*
2873 * Callback routine to tell all NFSv4 mounts in the zone to start tearing down
2874 * state and killing off threads.
2875 */
2876 /* ARGSUSED */
2877 static void
2878 nfs4_mi_shutdown(zoneid_t zoneid, void *data)
2879 {
2880 struct mi4_globals *mig = data;
2881 mntinfo4_t *mi;
2882 nfs4_server_t *np;
2883
2884 NFS4_DEBUG(nfs4_client_zone_debug, (CE_NOTE,
2885 "nfs4_mi_shutdown zone %d\n", zoneid));
2886 ASSERT(mig != NULL);
2887 for (;;) {
2888 mutex_enter(&mig->mig_lock);
2889 mi = list_head(&mig->mig_list);
2890 if (mi == NULL) {
2891 mutex_exit(&mig->mig_lock);
2892 break;
2893 }
2894
2895 NFS4_DEBUG(nfs4_client_zone_debug, (CE_NOTE,
2896 "nfs4_mi_shutdown stopping vfs %p\n", (void *)mi->mi_vfsp));
2897 /*
2898 * purge the DNLC for this filesystem
2899 */
2900 (void) dnlc_purge_vfsp(mi->mi_vfsp, 0);
2901 /*
2902 * Tell existing async worker threads to exit.
2903 */
2904 mutex_enter(&mi->mi_async_lock);
2905 mi->mi_max_threads = 0;
2906 NFS4_WAKEALL_ASYNC_WORKERS(mi->mi_async_work_cv);
2907 /*
2908 * Set the appropriate flags, signal and wait for both the
2909 * async manager and the inactive thread to exit when they're
2910 * done with their current work.
2911 */
2912 mutex_enter(&mi->mi_lock);
2913 mi->mi_flags |= (MI4_ASYNC_MGR_STOP|MI4_DEAD);
2914 mutex_exit(&mi->mi_lock);
2915 mutex_exit(&mi->mi_async_lock);
2916 if (mi->mi_manager_thread) {
2917 nfs4_async_manager_stop(mi->mi_vfsp);
2918 }
2919 if (mi->mi_inactive_thread) {
2920 mutex_enter(&mi->mi_async_lock);
2921 cv_signal(&mi->mi_inact_req_cv);
2922 /*
2923 * Wait for the inactive thread to exit.
2924 */
2925 while (mi->mi_inactive_thread != NULL) {
2926 cv_wait(&mi->mi_async_cv, &mi->mi_async_lock);
2927 }
2928 mutex_exit(&mi->mi_async_lock);
2929 }
2930 /*
2931 * Wait for the recovery thread to complete, that is, it will
2932 * signal when it is done using the "mi" structure and about
2933 * to exit
2934 */
2935 mutex_enter(&mi->mi_lock);
2936 while (mi->mi_in_recovery > 0)
2937 cv_wait(&mi->mi_cv_in_recov, &mi->mi_lock);
2938 mutex_exit(&mi->mi_lock);
2939 /*
2940 * We're done when every mi has been done or the list is empty.
2941 * This one is done, remove it from the list.
2942 */
2943 list_remove(&mig->mig_list, mi);
2944 mutex_exit(&mig->mig_lock);
2945 zone_rele_ref(&mi->mi_zone_ref, ZONE_REF_NFSV4);
2946
2947 /*
2948 * Release hold on vfs and mi done to prevent race with zone
2949 * shutdown. This releases the hold in nfs4_mi_zonelist_add.
2950 */
2951 VFS_RELE(mi->mi_vfsp);
2952 MI4_RELE(mi);
2953 }
2954 /*
2955 * Tell each renew thread in the zone to exit
2956 */
2957 mutex_enter(&nfs4_server_lst_lock);
2958 for (np = nfs4_server_lst.forw; np != &nfs4_server_lst; np = np->forw) {
2959 mutex_enter(&np->s_lock);
2960 if (np->zoneid == zoneid) {
2961 /*
2962 * We add another hold onto the nfs4_server_t
2963 * because this will make sure tha the nfs4_server_t
2964 * stays around until nfs4_callback_fini_zone destroys
2965 * the zone. This way, the renew thread can
2966 * unconditionally release its holds on the
2967 * nfs4_server_t.
2968 */
2969 np->s_refcnt++;
2970 nfs4_mark_srv_dead(np);
2971 }
2972 mutex_exit(&np->s_lock);
2973 }
2974 mutex_exit(&nfs4_server_lst_lock);
2975 }
2976
2977 static void
2978 nfs4_mi_free_globals(struct mi4_globals *mig)
2979 {
2980 list_destroy(&mig->mig_list); /* makes sure the list is empty */
2981 mutex_destroy(&mig->mig_lock);
2982 kmem_free(mig, sizeof (*mig));
2983 }
2984
2985 /* ARGSUSED */
2986 static void
2987 nfs4_mi_destroy(zoneid_t zoneid, void *data)
2988 {
2989 struct mi4_globals *mig = data;
2990
2991 NFS4_DEBUG(nfs4_client_zone_debug, (CE_NOTE,
2992 "nfs4_mi_destroy zone %d\n", zoneid));
2993 ASSERT(mig != NULL);
2994 mutex_enter(&mig->mig_lock);
2995 if (list_head(&mig->mig_list) != NULL) {
2996 /* Still waiting for VFS_FREEVFS() */
2997 mig->mig_destructor_called = B_TRUE;
2998 mutex_exit(&mig->mig_lock);
2999 return;
3000 }
3001 nfs4_mi_free_globals(mig);
3002 }
3003
3004 /*
3005 * Add an NFS mount to the per-zone list of NFS mounts.
3006 */
3007 void
3008 nfs4_mi_zonelist_add(mntinfo4_t *mi)
3009 {
3010 struct mi4_globals *mig;
3011
3012 mig = zone_getspecific(mi4_list_key, mi->mi_zone);
3013 mutex_enter(&mig->mig_lock);
3014 list_insert_head(&mig->mig_list, mi);
3015 /*
3016 * hold added to eliminate race with zone shutdown -this will be
3017 * released in mi_shutdown
3018 */
3019 MI4_HOLD(mi);
3020 VFS_HOLD(mi->mi_vfsp);
3021 mutex_exit(&mig->mig_lock);
3022 }
3023
3024 /*
3025 * Remove an NFS mount from the per-zone list of NFS mounts.
3026 */
3027 int
3028 nfs4_mi_zonelist_remove(mntinfo4_t *mi)
3029 {
3030 struct mi4_globals *mig;
3031 int ret = 0;
3032
3033 mig = zone_getspecific(mi4_list_key, mi->mi_zone);
3034 mutex_enter(&mig->mig_lock);
3035 mutex_enter(&mi->mi_lock);
3036 /* if this mi is marked dead, then the zone already released it */
3037 if (!(mi->mi_flags & MI4_DEAD)) {
3038 list_remove(&mig->mig_list, mi);
3039 mutex_exit(&mi->mi_lock);
3040
3041 /* release the holds put on in zonelist_add(). */
3042 VFS_RELE(mi->mi_vfsp);
3043 MI4_RELE(mi);
3044 ret = 1;
3045 } else {
3046 mutex_exit(&mi->mi_lock);
3047 }
3048
3049 /*
3050 * We can be called asynchronously by VFS_FREEVFS() after the zone
3051 * shutdown/destroy callbacks have executed; if so, clean up the zone's
3052 * mi globals.
3053 */
3054 if (list_head(&mig->mig_list) == NULL &&
3055 mig->mig_destructor_called == B_TRUE) {
3056 nfs4_mi_free_globals(mig);
3057 return (ret);
3058 }
3059 mutex_exit(&mig->mig_lock);
3060 return (ret);
3061 }
3062
3063 void
3064 nfs_free_mi4(mntinfo4_t *mi)
3065 {
3066 nfs4_open_owner_t *foop;
3067 nfs4_oo_hash_bucket_t *bucketp;
3068 nfs4_debug_msg_t *msgp;
3069 int i;
3070 servinfo4_t *svp;
3071
3072 /*
3073 * Code introduced here should be carefully evaluated to make
3074 * sure none of the freed resources are accessed either directly
3075 * or indirectly after freeing them. For eg: Introducing calls to
3076 * NFS4_DEBUG that use mntinfo4_t structure member after freeing
3077 * the structure members or other routines calling back into NFS
3078 * accessing freed mntinfo4_t structure member.
3079 */
3080 mutex_enter(&mi->mi_lock);
3081 ASSERT(mi->mi_recovthread == NULL);
3082 ASSERT(mi->mi_flags & MI4_ASYNC_MGR_STOP);
3083 mutex_exit(&mi->mi_lock);
3084 mutex_enter(&mi->mi_async_lock);
3085 ASSERT(mi->mi_threads[NFS4_ASYNC_QUEUE] == 0 &&
3086 mi->mi_threads[NFS4_ASYNC_PGOPS_QUEUE] == 0);
3087 ASSERT(mi->mi_manager_thread == NULL);
3088 mutex_exit(&mi->mi_async_lock);
3089 if (mi->mi_io_kstats) {
3090 kstat_delete(mi->mi_io_kstats);
3091 mi->mi_io_kstats = NULL;
3092 }
3093 if (mi->mi_ro_kstats) {
3094 kstat_delete(mi->mi_ro_kstats);
3095 mi->mi_ro_kstats = NULL;
3096 }
3097 if (mi->mi_recov_ksp) {
3098 kstat_delete(mi->mi_recov_ksp);
3099 mi->mi_recov_ksp = NULL;
3100 }
3101 mutex_enter(&mi->mi_msg_list_lock);
3102 while (msgp = list_head(&mi->mi_msg_list)) {
3103 list_remove(&mi->mi_msg_list, msgp);
3104 nfs4_free_msg(msgp);
3105 }
3106 mutex_exit(&mi->mi_msg_list_lock);
3107 list_destroy(&mi->mi_msg_list);
3108 if (mi->mi_fname != NULL)
3109 fn_rele(&mi->mi_fname);
3110 if (mi->mi_rootfh != NULL)
3111 sfh4_rele(&mi->mi_rootfh);
3112 if (mi->mi_srvparentfh != NULL)
3113 sfh4_rele(&mi->mi_srvparentfh);
3114 svp = mi->mi_servers;
3115 sv4_free(svp);
3116 mutex_destroy(&mi->mi_lock);
3117 mutex_destroy(&mi->mi_async_lock);
3118 mutex_destroy(&mi->mi_msg_list_lock);
3119 nfs_rw_destroy(&mi->mi_recovlock);
3120 nfs_rw_destroy(&mi->mi_rename_lock);
3121 nfs_rw_destroy(&mi->mi_fh_lock);
3122 cv_destroy(&mi->mi_failover_cv);
3123 cv_destroy(&mi->mi_async_reqs_cv);
3124 cv_destroy(&mi->mi_async_work_cv[NFS4_ASYNC_QUEUE]);
3125 cv_destroy(&mi->mi_async_work_cv[NFS4_ASYNC_PGOPS_QUEUE]);
3126 cv_destroy(&mi->mi_async_cv);
3127 cv_destroy(&mi->mi_inact_req_cv);
3128 /*
3129 * Destroy the oo hash lists and mutexes for the cred hash table.
3130 */
3131 for (i = 0; i < NFS4_NUM_OO_BUCKETS; i++) {
3132 bucketp = &(mi->mi_oo_list[i]);
3133 /* Destroy any remaining open owners on the list */
3134 foop = list_head(&bucketp->b_oo_hash_list);
3135 while (foop != NULL) {
3136 list_remove(&bucketp->b_oo_hash_list, foop);
3137 nfs4_destroy_open_owner(foop);
3138 foop = list_head(&bucketp->b_oo_hash_list);
3139 }
3140 list_destroy(&bucketp->b_oo_hash_list);
3141 mutex_destroy(&bucketp->b_lock);
3142 }
3143 /*
3144 * Empty and destroy the freed open owner list.
3145 */
3146 foop = list_head(&mi->mi_foo_list);
3147 while (foop != NULL) {
3148 list_remove(&mi->mi_foo_list, foop);
3149 nfs4_destroy_open_owner(foop);
3150 foop = list_head(&mi->mi_foo_list);
3151 }
3152 list_destroy(&mi->mi_foo_list);
3153 list_destroy(&mi->mi_bseqid_list);
3154 list_destroy(&mi->mi_lost_state);
3155 avl_destroy(&mi->mi_filehandles);
3156 kmem_free(mi, sizeof (*mi));
3157 }
3158 void
3159 mi_hold(mntinfo4_t *mi)
3160 {
3161 atomic_inc_32(&mi->mi_count);
3162 ASSERT(mi->mi_count != 0);
3163 }
3164
3165 void
3166 mi_rele(mntinfo4_t *mi)
3167 {
3168 ASSERT(mi->mi_count != 0);
3169 if (atomic_dec_32_nv(&mi->mi_count) == 0) {
3170 nfs_free_mi4(mi);
3171 }
3172 }
3173
3174 vnode_t nfs4_xattr_notsupp_vnode;
3175
3176 void
3177 nfs4_clnt_init(void)
3178 {
3179 nfs4_vnops_init();
3180 (void) nfs4_rnode_init();
3181 (void) nfs4_shadow_init();
3182 (void) nfs4_acache_init();
3183 (void) nfs4_subr_init();
3184 nfs4_acl_init();
3185 nfs_idmap_init();
3186 nfs4_callback_init();
3187 nfs4_secinfo_init();
3188 #ifdef DEBUG
3189 tsd_create(&nfs4_tsd_key, NULL);
3190 #endif
3191
3192 /*
3193 * Add a CPR callback so that we can update client
3194 * lease after a suspend and resume.
3195 */
3196 cid = callb_add(nfs4_client_cpr_callb, 0, CB_CL_CPR_RPC, "nfs4");
3197
3198 zone_key_create(&mi4_list_key, nfs4_mi_init, nfs4_mi_shutdown,
3199 nfs4_mi_destroy);
3200
3201 /*
3202 * Initialize the reference count of the notsupp xattr cache vnode to 1
3203 * so that it never goes away (VOP_INACTIVE isn't called on it).
3204 */
3205 vn_reinit(&nfs4_xattr_notsupp_vnode);
3206 }
3207
3208 void
3209 nfs4_clnt_fini(void)
3210 {
3211 (void) zone_key_delete(mi4_list_key);
3212 nfs4_vnops_fini();
3213 (void) nfs4_rnode_fini();
3214 (void) nfs4_shadow_fini();
3215 (void) nfs4_acache_fini();
3216 (void) nfs4_subr_fini();
3217 nfs_idmap_fini();
3218 nfs4_callback_fini();
3219 nfs4_secinfo_fini();
3220 #ifdef DEBUG
3221 tsd_destroy(&nfs4_tsd_key);
3222 #endif
3223 if (cid)
3224 (void) callb_delete(cid);
3225 }
3226
3227 /*ARGSUSED*/
3228 static boolean_t
3229 nfs4_client_cpr_callb(void *arg, int code)
3230 {
3231 /*
3232 * We get called for Suspend and Resume events.
3233 * For the suspend case we simply don't care!
3234 */
3235 if (code == CB_CODE_CPR_CHKPT) {
3236 return (B_TRUE);
3237 }
3238
3239 /*
3240 * When we get to here we are in the process of
3241 * resuming the system from a previous suspend.
3242 */
3243 nfs4_client_resumed = gethrestime_sec();
3244 return (B_TRUE);
3245 }
3246
3247 void
3248 nfs4_renew_lease_thread(nfs4_server_t *sp)
3249 {
3250 int error = 0;
3251 time_t tmp_last_renewal_time, tmp_time, tmp_now_time, kip_secs;
3252 clock_t tick_delay = 0;
3253 clock_t time_left = 0;
3254 callb_cpr_t cpr_info;
3255 kmutex_t cpr_lock;
3256
3257 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE,
3258 "nfs4_renew_lease_thread: acting on sp 0x%p", (void*)sp));
3259 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL);
3260 CALLB_CPR_INIT(&cpr_info, &cpr_lock, callb_generic_cpr, "nfsv4Lease");
3261
3262 mutex_enter(&sp->s_lock);
3263 /* sp->s_lease_time is set via a GETATTR */
3264 sp->last_renewal_time = gethrestime_sec();
3265 sp->lease_valid = NFS4_LEASE_UNINITIALIZED;
3266 ASSERT(sp->s_refcnt >= 1);
3267
3268 for (;;) {
3269 if (!sp->state_ref_count ||
3270 sp->lease_valid != NFS4_LEASE_VALID) {
3271
3272 kip_secs = MAX((sp->s_lease_time >> 1) -
3273 (3 * sp->propagation_delay.tv_sec), 1);
3274
3275 tick_delay = SEC_TO_TICK(kip_secs);
3276
3277 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE,
3278 "nfs4_renew_lease_thread: no renew : thread "
3279 "wait %ld secs", kip_secs));
3280
3281 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE,
3282 "nfs4_renew_lease_thread: no renew : "
3283 "state_ref_count %d, lease_valid %d",
3284 sp->state_ref_count, sp->lease_valid));
3285
3286 mutex_enter(&cpr_lock);
3287 CALLB_CPR_SAFE_BEGIN(&cpr_info);
3288 mutex_exit(&cpr_lock);
3289 time_left = cv_reltimedwait(&sp->cv_thread_exit,
3290 &sp->s_lock, tick_delay, TR_CLOCK_TICK);
3291 mutex_enter(&cpr_lock);
3292 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock);
3293 mutex_exit(&cpr_lock);
3294
3295 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE,
3296 "nfs4_renew_lease_thread: no renew: "
3297 "time left %ld", time_left));
3298
3299 if (sp->s_thread_exit == NFS4_THREAD_EXIT)
3300 goto die;
3301 continue;
3302 }
3303
3304 tmp_last_renewal_time = sp->last_renewal_time;
3305
3306 tmp_time = gethrestime_sec() - sp->last_renewal_time +
3307 (3 * sp->propagation_delay.tv_sec);
3308
3309 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE,
3310 "nfs4_renew_lease_thread: tmp_time %ld, "
3311 "sp->last_renewal_time %ld", tmp_time,
3312 sp->last_renewal_time));
3313
3314 kip_secs = MAX((sp->s_lease_time >> 1) - tmp_time, 1);
3315
3316 tick_delay = SEC_TO_TICK(kip_secs);
3317
3318 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE,
3319 "nfs4_renew_lease_thread: valid lease: sleep for %ld "
3320 "secs", kip_secs));
3321
3322 mutex_enter(&cpr_lock);
3323 CALLB_CPR_SAFE_BEGIN(&cpr_info);
3324 mutex_exit(&cpr_lock);
3325 time_left = cv_reltimedwait(&sp->cv_thread_exit, &sp->s_lock,
3326 tick_delay, TR_CLOCK_TICK);
3327 mutex_enter(&cpr_lock);
3328 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock);
3329 mutex_exit(&cpr_lock);
3330
3331 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE,
3332 "nfs4_renew_lease_thread: valid lease: time left %ld :"
3333 "sp last_renewal_time %ld, nfs4_client_resumed %ld, "
3334 "tmp_last_renewal_time %ld", time_left,
3335 sp->last_renewal_time, nfs4_client_resumed,
3336 tmp_last_renewal_time));
3337
3338 if (sp->s_thread_exit == NFS4_THREAD_EXIT)
3339 goto die;
3340
3341 if (tmp_last_renewal_time == sp->last_renewal_time ||
3342 (nfs4_client_resumed != 0 &&
3343 nfs4_client_resumed > sp->last_renewal_time)) {
3344 /*
3345 * Issue RENEW op since we haven't renewed the lease
3346 * since we slept.
3347 */
3348 tmp_now_time = gethrestime_sec();
3349 error = nfs4renew(sp);
3350 /*
3351 * Need to re-acquire sp's lock, nfs4renew()
3352 * relinqueshes it.
3353 */
3354 mutex_enter(&sp->s_lock);
3355
3356 /*
3357 * See if someone changed s_thread_exit while we gave
3358 * up s_lock.
3359 */
3360 if (sp->s_thread_exit == NFS4_THREAD_EXIT)
3361 goto die;
3362
3363 if (!error) {
3364 /*
3365 * check to see if we implicitly renewed while
3366 * we waited for a reply for our RENEW call.
3367 */
3368 if (tmp_last_renewal_time ==
3369 sp->last_renewal_time) {
3370 /* no implicit renew came */
3371 sp->last_renewal_time = tmp_now_time;
3372 } else {
3373 NFS4_DEBUG(nfs4_client_lease_debug,
3374 (CE_NOTE, "renew_thread: did "
3375 "implicit renewal before reply "
3376 "from server for RENEW"));
3377 }
3378 } else {
3379 /* figure out error */
3380 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE,
3381 "renew_thread: nfs4renew returned error"
3382 " %d", error));
3383 }
3384
3385 }
3386 }
3387
3388 die:
3389 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE,
3390 "nfs4_renew_lease_thread: thread exiting"));
3391
3392 while (sp->s_otw_call_count != 0) {
3393 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE,
3394 "nfs4_renew_lease_thread: waiting for outstanding "
3395 "otw calls to finish for sp 0x%p, current "
3396 "s_otw_call_count %d", (void *)sp,
3397 sp->s_otw_call_count));
3398 mutex_enter(&cpr_lock);
3399 CALLB_CPR_SAFE_BEGIN(&cpr_info);
3400 mutex_exit(&cpr_lock);
3401 cv_wait(&sp->s_cv_otw_count, &sp->s_lock);
3402 mutex_enter(&cpr_lock);
3403 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock);
3404 mutex_exit(&cpr_lock);
3405 }
3406 mutex_exit(&sp->s_lock);
3407
3408 nfs4_server_rele(sp); /* free the thread's reference */
3409 nfs4_server_rele(sp); /* free the list's reference */
3410 sp = NULL;
3411
3412 done:
3413 mutex_enter(&cpr_lock);
3414 CALLB_CPR_EXIT(&cpr_info); /* drops cpr_lock */
3415 mutex_destroy(&cpr_lock);
3416
3417 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE,
3418 "nfs4_renew_lease_thread: renew thread exit officially"));
3419
3420 zthread_exit();
3421 /* NOT REACHED */
3422 }
3423
3424 /*
3425 * Send out a RENEW op to the server.
3426 * Assumes sp is locked down.
3427 */
3428 static int
3429 nfs4renew(nfs4_server_t *sp)
3430 {
3431 COMPOUND4args_clnt args;
3432 COMPOUND4res_clnt res;
3433 nfs_argop4 argop[1];
3434 int doqueue = 1;
3435 int rpc_error;
3436 cred_t *cr;
3437 mntinfo4_t *mi;
3438 timespec_t prop_time, after_time;
3439 int needrecov = FALSE;
3440 nfs4_recov_state_t recov_state;
3441 nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS };
3442
3443 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE, "nfs4renew"));
3444
3445 recov_state.rs_flags = 0;
3446 recov_state.rs_num_retry_despite_err = 0;
3447
3448 recov_retry:
3449 mi = sp->mntinfo4_list;
3450 VFS_HOLD(mi->mi_vfsp);
3451 mutex_exit(&sp->s_lock);
3452 ASSERT(mi != NULL);
3453
3454 e.error = nfs4_start_op(mi, NULL, NULL, &recov_state);
3455 if (e.error) {
3456 VFS_RELE(mi->mi_vfsp);
3457 return (e.error);
3458 }
3459
3460 /* Check to see if we're dealing with a marked-dead sp */
3461 mutex_enter(&sp->s_lock);
3462 if (sp->s_thread_exit == NFS4_THREAD_EXIT) {
3463 mutex_exit(&sp->s_lock);
3464 nfs4_end_op(mi, NULL, NULL, &recov_state, needrecov);
3465 VFS_RELE(mi->mi_vfsp);
3466 return (0);
3467 }
3468
3469 /* Make sure mi hasn't changed on us */
3470 if (mi != sp->mntinfo4_list) {
3471 /* Must drop sp's lock to avoid a recursive mutex enter */
3472 mutex_exit(&sp->s_lock);
3473 nfs4_end_op(mi, NULL, NULL, &recov_state, needrecov);
3474 VFS_RELE(mi->mi_vfsp);
3475 mutex_enter(&sp->s_lock);
3476 goto recov_retry;
3477 }
3478 mutex_exit(&sp->s_lock);
3479
3480 args.ctag = TAG_RENEW;
3481
3482 args.array_len = 1;
3483 args.array = argop;
3484
3485 argop[0].argop = OP_RENEW;
3486
3487 mutex_enter(&sp->s_lock);
3488 argop[0].nfs_argop4_u.oprenew.clientid = sp->clientid;
3489 cr = sp->s_cred;
3490 crhold(cr);
3491 mutex_exit(&sp->s_lock);
3492
3493 ASSERT(cr != NULL);
3494
3495 /* used to figure out RTT for sp */
3496 gethrestime(&prop_time);
3497
3498 NFS4_DEBUG(nfs4_client_call_debug, (CE_NOTE,
3499 "nfs4renew: %s call, sp 0x%p", needrecov ? "recov" : "first",
3500 (void*)sp));
3501 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE, "before: %ld s %ld ns ",
3502 prop_time.tv_sec, prop_time.tv_nsec));
3503
3504 DTRACE_PROBE2(nfs4__renew__start, nfs4_server_t *, sp,
3505 mntinfo4_t *, mi);
3506
3507 rfs4call(mi, &args, &res, cr, &doqueue, 0, &e);
3508 crfree(cr);
3509
3510 DTRACE_PROBE2(nfs4__renew__end, nfs4_server_t *, sp,
3511 mntinfo4_t *, mi);
3512
3513 gethrestime(&after_time);
3514
3515 mutex_enter(&sp->s_lock);
3516 sp->propagation_delay.tv_sec =
3517 MAX(1, after_time.tv_sec - prop_time.tv_sec);
3518 mutex_exit(&sp->s_lock);
3519
3520 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE, "after : %ld s %ld ns ",
3521 after_time.tv_sec, after_time.tv_nsec));
3522
3523 if (e.error == 0 && res.status == NFS4ERR_CB_PATH_DOWN) {
3524 xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
3525 nfs4_delegreturn_all(sp);
3526 nfs4_end_op(mi, NULL, NULL, &recov_state, needrecov);
3527 VFS_RELE(mi->mi_vfsp);
3528 /*
3529 * If the server returns CB_PATH_DOWN, it has renewed
3530 * the lease and informed us that the callback path is
3531 * down. Since the lease is renewed, just return 0 and
3532 * let the renew thread proceed as normal.
3533 */
3534 return (0);
3535 }
3536
3537 needrecov = nfs4_needs_recovery(&e, FALSE, mi->mi_vfsp);
3538 if (!needrecov && e.error) {
3539 nfs4_end_op(mi, NULL, NULL, &recov_state, needrecov);
3540 VFS_RELE(mi->mi_vfsp);
3541 return (e.error);
3542 }
3543
3544 rpc_error = e.error;
3545
3546 if (needrecov) {
3547 NFS4_DEBUG(nfs4_client_recov_debug, (CE_NOTE,
3548 "nfs4renew: initiating recovery\n"));
3549
3550 if (nfs4_start_recovery(&e, mi, NULL, NULL, NULL, NULL,
3551 OP_RENEW, NULL, NULL, NULL) == FALSE) {
3552 nfs4_end_op(mi, NULL, NULL, &recov_state, needrecov);
3553 VFS_RELE(mi->mi_vfsp);
3554 if (!e.error)
3555 xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
3556 mutex_enter(&sp->s_lock);
3557 goto recov_retry;
3558 }
3559 /* fall through for res.status case */
3560 }
3561
3562 if (res.status) {
3563 if (res.status == NFS4ERR_LEASE_MOVED) {
3564 /*EMPTY*/
3565 /*
3566 * XXX need to try every mntinfo4 in sp->mntinfo4_list
3567 * to renew the lease on that server
3568 */
3569 }
3570 e.error = geterrno4(res.status);
3571 }
3572
3573 if (!rpc_error)
3574 xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
3575
3576 nfs4_end_op(mi, NULL, NULL, &recov_state, needrecov);
3577
3578 VFS_RELE(mi->mi_vfsp);
3579
3580 return (e.error);
3581 }
3582
3583 void
3584 nfs4_inc_state_ref_count(mntinfo4_t *mi)
3585 {
3586 nfs4_server_t *sp;
3587
3588 /* this locks down sp if it is found */
3589 sp = find_nfs4_server(mi);
3590
3591 if (sp != NULL) {
3592 nfs4_inc_state_ref_count_nolock(sp, mi);
3593 mutex_exit(&sp->s_lock);
3594 nfs4_server_rele(sp);
3595 }
3596 }
3597
3598 /*
3599 * Bump the number of OPEN files (ie: those with state) so we know if this
3600 * nfs4_server has any state to maintain a lease for or not.
3601 *
3602 * Also, marks the nfs4_server's lease valid if it hasn't been done so already.
3603 */
3604 void
3605 nfs4_inc_state_ref_count_nolock(nfs4_server_t *sp, mntinfo4_t *mi)
3606 {
3607 ASSERT(mutex_owned(&sp->s_lock));
3608
3609 sp->state_ref_count++;
3610 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE,
3611 "nfs4_inc_state_ref_count: state_ref_count now %d",
3612 sp->state_ref_count));
3613
3614 if (sp->lease_valid == NFS4_LEASE_UNINITIALIZED)
3615 sp->lease_valid = NFS4_LEASE_VALID;
3616
3617 /*
3618 * If this call caused the lease to be marked valid and/or
3619 * took the state_ref_count from 0 to 1, then start the time
3620 * on lease renewal.
3621 */
3622 if (sp->lease_valid == NFS4_LEASE_VALID && sp->state_ref_count == 1)
3623 sp->last_renewal_time = gethrestime_sec();
3624
3625 /* update the number of open files for mi */
3626 mi->mi_open_files++;
3627 }
3628
3629 void
3630 nfs4_dec_state_ref_count(mntinfo4_t *mi)
3631 {
3632 nfs4_server_t *sp;
3633
3634 /* this locks down sp if it is found */
3635 sp = find_nfs4_server_all(mi, 1);
3636
3637 if (sp != NULL) {
3638 nfs4_dec_state_ref_count_nolock(sp, mi);
3639 mutex_exit(&sp->s_lock);
3640 nfs4_server_rele(sp);
3641 }
3642 }
3643
3644 /*
3645 * Decrement the number of OPEN files (ie: those with state) so we know if
3646 * this nfs4_server has any state to maintain a lease for or not.
3647 */
3648 void
3649 nfs4_dec_state_ref_count_nolock(nfs4_server_t *sp, mntinfo4_t *mi)
3650 {
3651 ASSERT(mutex_owned(&sp->s_lock));
3652 ASSERT(sp->state_ref_count != 0);
3653 sp->state_ref_count--;
3654
3655 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE,
3656 "nfs4_dec_state_ref_count: state ref count now %d",
3657 sp->state_ref_count));
3658
3659 mi->mi_open_files--;
3660 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE,
3661 "nfs4_dec_state_ref_count: mi open files %d, v4 flags 0x%x",
3662 mi->mi_open_files, mi->mi_flags));
3663
3664 /* We don't have to hold the mi_lock to test mi_flags */
3665 if (mi->mi_open_files == 0 &&
3666 (mi->mi_flags & MI4_REMOVE_ON_LAST_CLOSE)) {
3667 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE,
3668 "nfs4_dec_state_ref_count: remove mntinfo4 %p since "
3669 "we have closed the last open file", (void*)mi));
3670 nfs4_remove_mi_from_server(mi, sp);
3671 }
3672 }
3673
3674 bool_t
3675 inlease(nfs4_server_t *sp)
3676 {
3677 bool_t result;
3678
3679 ASSERT(mutex_owned(&sp->s_lock));
3680
3681 if (sp->lease_valid == NFS4_LEASE_VALID &&
3682 gethrestime_sec() < sp->last_renewal_time + sp->s_lease_time)
3683 result = TRUE;
3684 else
3685 result = FALSE;
3686
3687 return (result);
3688 }
3689
3690
3691 /*
3692 * Return non-zero if the given nfs4_server_t is going through recovery.
3693 */
3694
3695 int
3696 nfs4_server_in_recovery(nfs4_server_t *sp)
3697 {
3698 return (nfs_rw_lock_held(&sp->s_recovlock, RW_WRITER));
3699 }
3700
3701 /*
3702 * Compare two shared filehandle objects. Returns -1, 0, or +1, if the
3703 * first is less than, equal to, or greater than the second.
3704 */
3705
3706 int
3707 sfh4cmp(const void *p1, const void *p2)
3708 {
3709 const nfs4_sharedfh_t *sfh1 = (const nfs4_sharedfh_t *)p1;
3710 const nfs4_sharedfh_t *sfh2 = (const nfs4_sharedfh_t *)p2;
3711
3712 return (nfs4cmpfh(&sfh1->sfh_fh, &sfh2->sfh_fh));
3713 }
3714
3715 /*
3716 * Create a table for shared filehandle objects.
3717 */
3718
3719 void
3720 sfh4_createtab(avl_tree_t *tab)
3721 {
3722 avl_create(tab, sfh4cmp, sizeof (nfs4_sharedfh_t),
3723 offsetof(nfs4_sharedfh_t, sfh_tree));
3724 }
3725
3726 /*
3727 * Return a shared filehandle object for the given filehandle. The caller
3728 * is responsible for eventually calling sfh4_rele().
3729 */
3730
3731 nfs4_sharedfh_t *
3732 sfh4_put(const nfs_fh4 *fh, mntinfo4_t *mi, nfs4_sharedfh_t *key)
3733 {
3734 nfs4_sharedfh_t *sfh, *nsfh;
3735 avl_index_t where;
3736 nfs4_sharedfh_t skey;
3737
3738 if (!key) {
3739 skey.sfh_fh = *fh;
3740 key = &skey;
3741 }
3742
3743 nsfh = kmem_alloc(sizeof (nfs4_sharedfh_t), KM_SLEEP);
3744 nsfh->sfh_fh.nfs_fh4_len = fh->nfs_fh4_len;
3745 /*
3746 * We allocate the largest possible filehandle size because it's
3747 * not that big, and it saves us from possibly having to resize the
3748 * buffer later.
3749 */
3750 nsfh->sfh_fh.nfs_fh4_val = kmem_alloc(NFS4_FHSIZE, KM_SLEEP);
3751 bcopy(fh->nfs_fh4_val, nsfh->sfh_fh.nfs_fh4_val, fh->nfs_fh4_len);
3752 mutex_init(&nsfh->sfh_lock, NULL, MUTEX_DEFAULT, NULL);
3753 nsfh->sfh_refcnt = 1;
3754 nsfh->sfh_flags = SFH4_IN_TREE;
3755 nsfh->sfh_mi = mi;
3756 NFS4_DEBUG(nfs4_sharedfh_debug, (CE_NOTE, "sfh4_get: new object (%p)",
3757 (void *)nsfh));
3758
3759 (void) nfs_rw_enter_sig(&mi->mi_fh_lock, RW_WRITER, 0);
3760 sfh = avl_find(&mi->mi_filehandles, key, &where);
3761 if (sfh != NULL) {
3762 mutex_enter(&sfh->sfh_lock);
3763 sfh->sfh_refcnt++;
3764 mutex_exit(&sfh->sfh_lock);
3765 nfs_rw_exit(&mi->mi_fh_lock);
3766 /* free our speculative allocs */
3767 kmem_free(nsfh->sfh_fh.nfs_fh4_val, NFS4_FHSIZE);
3768 kmem_free(nsfh, sizeof (nfs4_sharedfh_t));
3769 return (sfh);
3770 }
3771
3772 avl_insert(&mi->mi_filehandles, nsfh, where);
3773 nfs_rw_exit(&mi->mi_fh_lock);
3774
3775 return (nsfh);
3776 }
3777
3778 /*
3779 * Return a shared filehandle object for the given filehandle. The caller
3780 * is responsible for eventually calling sfh4_rele().
3781 */
3782
3783 nfs4_sharedfh_t *
3784 sfh4_get(const nfs_fh4 *fh, mntinfo4_t *mi)
3785 {
3786 nfs4_sharedfh_t *sfh;
3787 nfs4_sharedfh_t key;
3788
3789 ASSERT(fh->nfs_fh4_len <= NFS4_FHSIZE);
3790
3791 #ifdef DEBUG
3792 if (nfs4_sharedfh_debug) {
3793 nfs4_fhandle_t fhandle;
3794
3795 fhandle.fh_len = fh->nfs_fh4_len;
3796 bcopy(fh->nfs_fh4_val, fhandle.fh_buf, fhandle.fh_len);
3797 zcmn_err(mi->mi_zone->zone_id, CE_NOTE, "sfh4_get:");
3798 nfs4_printfhandle(&fhandle);
3799 }
3800 #endif
3801
3802 /*
3803 * If there's already an object for the given filehandle, bump the
3804 * reference count and return it. Otherwise, create a new object
3805 * and add it to the AVL tree.
3806 */
3807
3808 key.sfh_fh = *fh;
3809
3810 (void) nfs_rw_enter_sig(&mi->mi_fh_lock, RW_READER, 0);
3811 sfh = avl_find(&mi->mi_filehandles, &key, NULL);
3812 if (sfh != NULL) {
3813 mutex_enter(&sfh->sfh_lock);
3814 sfh->sfh_refcnt++;
3815 NFS4_DEBUG(nfs4_sharedfh_debug, (CE_NOTE,
3816 "sfh4_get: found existing %p, new refcnt=%d",
3817 (void *)sfh, sfh->sfh_refcnt));
3818 mutex_exit(&sfh->sfh_lock);
3819 nfs_rw_exit(&mi->mi_fh_lock);
3820 return (sfh);
3821 }
3822 nfs_rw_exit(&mi->mi_fh_lock);
3823
3824 return (sfh4_put(fh, mi, &key));
3825 }
3826
3827 /*
3828 * Get a reference to the given shared filehandle object.
3829 */
3830
3831 void
3832 sfh4_hold(nfs4_sharedfh_t *sfh)
3833 {
3834 ASSERT(sfh->sfh_refcnt > 0);
3835
3836 mutex_enter(&sfh->sfh_lock);
3837 sfh->sfh_refcnt++;
3838 NFS4_DEBUG(nfs4_sharedfh_debug,
3839 (CE_NOTE, "sfh4_hold %p, new refcnt=%d",
3840 (void *)sfh, sfh->sfh_refcnt));
3841 mutex_exit(&sfh->sfh_lock);
3842 }
3843
3844 /*
3845 * Release a reference to the given shared filehandle object and null out
3846 * the given pointer.
3847 */
3848
3849 void
3850 sfh4_rele(nfs4_sharedfh_t **sfhpp)
3851 {
3852 mntinfo4_t *mi;
3853 nfs4_sharedfh_t *sfh = *sfhpp;
3854
3855 ASSERT(sfh->sfh_refcnt > 0);
3856
3857 mutex_enter(&sfh->sfh_lock);
3858 if (sfh->sfh_refcnt > 1) {
3859 sfh->sfh_refcnt--;
3860 NFS4_DEBUG(nfs4_sharedfh_debug, (CE_NOTE,
3861 "sfh4_rele %p, new refcnt=%d",
3862 (void *)sfh, sfh->sfh_refcnt));
3863 mutex_exit(&sfh->sfh_lock);
3864 goto finish;
3865 }
3866 mutex_exit(&sfh->sfh_lock);
3867
3868 /*
3869 * Possibly the last reference, so get the lock for the table in
3870 * case it's time to remove the object from the table.
3871 */
3872 mi = sfh->sfh_mi;
3873 (void) nfs_rw_enter_sig(&mi->mi_fh_lock, RW_WRITER, 0);
3874 mutex_enter(&sfh->sfh_lock);
3875 sfh->sfh_refcnt--;
3876 if (sfh->sfh_refcnt > 0) {
3877 NFS4_DEBUG(nfs4_sharedfh_debug, (CE_NOTE,
3878 "sfh4_rele %p, new refcnt=%d",
3879 (void *)sfh, sfh->sfh_refcnt));
3880 mutex_exit(&sfh->sfh_lock);
3881 nfs_rw_exit(&mi->mi_fh_lock);
3882 goto finish;
3883 }
3884
3885 NFS4_DEBUG(nfs4_sharedfh_debug, (CE_NOTE,
3886 "sfh4_rele %p, last ref", (void *)sfh));
3887 if (sfh->sfh_flags & SFH4_IN_TREE) {
3888 avl_remove(&mi->mi_filehandles, sfh);
3889 sfh->sfh_flags &= ~SFH4_IN_TREE;
3890 }
3891 mutex_exit(&sfh->sfh_lock);
3892 nfs_rw_exit(&mi->mi_fh_lock);
3893 mutex_destroy(&sfh->sfh_lock);
3894 kmem_free(sfh->sfh_fh.nfs_fh4_val, NFS4_FHSIZE);
3895 kmem_free(sfh, sizeof (nfs4_sharedfh_t));
3896
3897 finish:
3898 *sfhpp = NULL;
3899 }
3900
3901 /*
3902 * Update the filehandle for the given shared filehandle object.
3903 */
3904
3905 int nfs4_warn_dupfh = 0; /* if set, always warn about dup fhs below */
3906
3907 void
3908 sfh4_update(nfs4_sharedfh_t *sfh, const nfs_fh4 *newfh)
3909 {
3910 mntinfo4_t *mi = sfh->sfh_mi;
3911 nfs4_sharedfh_t *dupsfh;
3912 avl_index_t where;
3913 nfs4_sharedfh_t key;
3914
3915 #ifdef DEBUG
3916 mutex_enter(&sfh->sfh_lock);
3917 ASSERT(sfh->sfh_refcnt > 0);
3918 mutex_exit(&sfh->sfh_lock);
3919 #endif
3920 ASSERT(newfh->nfs_fh4_len <= NFS4_FHSIZE);
3921
3922 /*
3923 * The basic plan is to remove the shared filehandle object from
3924 * the table, update it to have the new filehandle, then reinsert
3925 * it.
3926 */
3927
3928 (void) nfs_rw_enter_sig(&mi->mi_fh_lock, RW_WRITER, 0);
3929 mutex_enter(&sfh->sfh_lock);
3930 if (sfh->sfh_flags & SFH4_IN_TREE) {
3931 avl_remove(&mi->mi_filehandles, sfh);
3932 sfh->sfh_flags &= ~SFH4_IN_TREE;
3933 }
3934 mutex_exit(&sfh->sfh_lock);
3935 sfh->sfh_fh.nfs_fh4_len = newfh->nfs_fh4_len;
3936 bcopy(newfh->nfs_fh4_val, sfh->sfh_fh.nfs_fh4_val,
3937 sfh->sfh_fh.nfs_fh4_len);
3938
3939 /*
3940 * XXX If there is already a shared filehandle object with the new
3941 * filehandle, we're in trouble, because the rnode code assumes
3942 * that there is only one shared filehandle object for a given
3943 * filehandle. So issue a warning (for read-write mounts only)
3944 * and don't try to re-insert the given object into the table.
3945 * Hopefully the given object will quickly go away and everyone
3946 * will use the new object.
3947 */
3948 key.sfh_fh = *newfh;
3949 dupsfh = avl_find(&mi->mi_filehandles, &key, &where);
3950 if (dupsfh != NULL) {
3951 if (!(mi->mi_vfsp->vfs_flag & VFS_RDONLY) || nfs4_warn_dupfh) {
3952 zcmn_err(mi->mi_zone->zone_id, CE_WARN, "sfh4_update: "
3953 "duplicate filehandle detected");
3954 sfh4_printfhandle(dupsfh);
3955 }
3956 } else {
3957 avl_insert(&mi->mi_filehandles, sfh, where);
3958 mutex_enter(&sfh->sfh_lock);
3959 sfh->sfh_flags |= SFH4_IN_TREE;
3960 mutex_exit(&sfh->sfh_lock);
3961 }
3962 nfs_rw_exit(&mi->mi_fh_lock);
3963 }
3964
3965 /*
3966 * Copy out the current filehandle for the given shared filehandle object.
3967 */
3968
3969 void
3970 sfh4_copyval(const nfs4_sharedfh_t *sfh, nfs4_fhandle_t *fhp)
3971 {
3972 mntinfo4_t *mi = sfh->sfh_mi;
3973
3974 ASSERT(sfh->sfh_refcnt > 0);
3975
3976 (void) nfs_rw_enter_sig(&mi->mi_fh_lock, RW_READER, 0);
3977 fhp->fh_len = sfh->sfh_fh.nfs_fh4_len;
3978 ASSERT(fhp->fh_len <= NFS4_FHSIZE);
3979 bcopy(sfh->sfh_fh.nfs_fh4_val, fhp->fh_buf, fhp->fh_len);
3980 nfs_rw_exit(&mi->mi_fh_lock);
3981 }
3982
3983 /*
3984 * Print out the filehandle for the given shared filehandle object.
3985 */
3986
3987 void
3988 sfh4_printfhandle(const nfs4_sharedfh_t *sfh)
3989 {
3990 nfs4_fhandle_t fhandle;
3991
3992 sfh4_copyval(sfh, &fhandle);
3993 nfs4_printfhandle(&fhandle);
3994 }
3995
3996 /*
3997 * Compare 2 fnames. Returns -1 if the first is "less" than the second, 0
3998 * if they're the same, +1 if the first is "greater" than the second. The
3999 * caller (or whoever's calling the AVL package) is responsible for
4000 * handling locking issues.
4001 */
4002
4003 static int
4004 fncmp(const void *p1, const void *p2)
4005 {
4006 const nfs4_fname_t *f1 = p1;
4007 const nfs4_fname_t *f2 = p2;
4008 int res;
4009
4010 res = strcmp(f1->fn_name, f2->fn_name);
4011 /*
4012 * The AVL package wants +/-1, not arbitrary positive or negative
4013 * integers.
4014 */
4015 if (res > 0)
4016 res = 1;
4017 else if (res < 0)
4018 res = -1;
4019 return (res);
4020 }
4021
4022 /*
4023 * Get or create an fname with the given name, as a child of the given
4024 * fname. The caller is responsible for eventually releasing the reference
4025 * (fn_rele()). parent may be NULL.
4026 */
4027
4028 nfs4_fname_t *
4029 fn_get(nfs4_fname_t *parent, char *name, nfs4_sharedfh_t *sfh)
4030 {
4031 nfs4_fname_t key;
4032 nfs4_fname_t *fnp;
4033 avl_index_t where;
4034
4035 key.fn_name = name;
4036
4037 /*
4038 * If there's already an fname registered with the given name, bump
4039 * its reference count and return it. Otherwise, create a new one
4040 * and add it to the parent's AVL tree.
4041 *
4042 * fname entries we are looking for should match both name
4043 * and sfh stored in the fname.
4044 */
4045 again:
4046 if (parent != NULL) {
4047 mutex_enter(&parent->fn_lock);
4048 fnp = avl_find(&parent->fn_children, &key, &where);
4049 if (fnp != NULL) {
4050 /*
4051 * This hold on fnp is released below later,
4052 * in case this is not the fnp we want.
4053 */
4054 fn_hold(fnp);
4055
4056 if (fnp->fn_sfh == sfh) {
4057 /*
4058 * We have found our entry.
4059 * put an hold and return it.
4060 */
4061 mutex_exit(&parent->fn_lock);
4062 return (fnp);
4063 }
4064
4065 /*
4066 * We have found an entry that has a mismatching
4067 * fn_sfh. This could be a stale entry due to
4068 * server side rename. We will remove this entry
4069 * and make sure no such entries exist.
4070 */
4071 mutex_exit(&parent->fn_lock);
4072 mutex_enter(&fnp->fn_lock);
4073 if (fnp->fn_parent == parent) {
4074 /*
4075 * Remove ourselves from parent's
4076 * fn_children tree.
4077 */
4078 mutex_enter(&parent->fn_lock);
4079 avl_remove(&parent->fn_children, fnp);
4080 mutex_exit(&parent->fn_lock);
4081 fn_rele(&fnp->fn_parent);
4082 }
4083 mutex_exit(&fnp->fn_lock);
4084 fn_rele(&fnp);
4085 goto again;
4086 }
4087 }
4088
4089 fnp = kmem_alloc(sizeof (nfs4_fname_t), KM_SLEEP);
4090 mutex_init(&fnp->fn_lock, NULL, MUTEX_DEFAULT, NULL);
4091 fnp->fn_parent = parent;
4092 if (parent != NULL)
4093 fn_hold(parent);
4094 fnp->fn_len = strlen(name);
4095 ASSERT(fnp->fn_len < MAXNAMELEN);
4096 fnp->fn_name = kmem_alloc(fnp->fn_len + 1, KM_SLEEP);
4097 (void) strcpy(fnp->fn_name, name);
4098 fnp->fn_refcnt = 1;
4099
4100 /*
4101 * This hold on sfh is later released
4102 * when we do the final fn_rele() on this fname.
4103 */
4104 sfh4_hold(sfh);
4105 fnp->fn_sfh = sfh;
4106
4107 avl_create(&fnp->fn_children, fncmp, sizeof (nfs4_fname_t),
4108 offsetof(nfs4_fname_t, fn_tree));
4109 NFS4_DEBUG(nfs4_fname_debug, (CE_NOTE,
4110 "fn_get %p:%s, a new nfs4_fname_t!",
4111 (void *)fnp, fnp->fn_name));
4112 if (parent != NULL) {
4113 avl_insert(&parent->fn_children, fnp, where);
4114 mutex_exit(&parent->fn_lock);
4115 }
4116
4117 return (fnp);
4118 }
4119
4120 void
4121 fn_hold(nfs4_fname_t *fnp)
4122 {
4123 atomic_inc_32(&fnp->fn_refcnt);
4124 NFS4_DEBUG(nfs4_fname_debug, (CE_NOTE,
4125 "fn_hold %p:%s, new refcnt=%d",
4126 (void *)fnp, fnp->fn_name, fnp->fn_refcnt));
4127 }
4128
4129 /*
4130 * Decrement the reference count of the given fname, and destroy it if its
4131 * reference count goes to zero. Nulls out the given pointer.
4132 */
4133
4134 void
4135 fn_rele(nfs4_fname_t **fnpp)
4136 {
4137 nfs4_fname_t *parent;
4138 uint32_t newref;
4139 nfs4_fname_t *fnp;
4140
4141 recur:
4142 fnp = *fnpp;
4143 *fnpp = NULL;
4144
4145 mutex_enter(&fnp->fn_lock);
4146 parent = fnp->fn_parent;
4147 if (parent != NULL)
4148 mutex_enter(&parent->fn_lock); /* prevent new references */
4149 newref = atomic_dec_32_nv(&fnp->fn_refcnt);
4150 if (newref > 0) {
4151 NFS4_DEBUG(nfs4_fname_debug, (CE_NOTE,
4152 "fn_rele %p:%s, new refcnt=%d",
4153 (void *)fnp, fnp->fn_name, fnp->fn_refcnt));
4154 if (parent != NULL)
4155 mutex_exit(&parent->fn_lock);
4156 mutex_exit(&fnp->fn_lock);
4157 return;
4158 }
4159
4160 NFS4_DEBUG(nfs4_fname_debug, (CE_NOTE,
4161 "fn_rele %p:%s, last reference, deleting...",
4162 (void *)fnp, fnp->fn_name));
4163 if (parent != NULL) {
4164 avl_remove(&parent->fn_children, fnp);
4165 mutex_exit(&parent->fn_lock);
4166 }
4167 kmem_free(fnp->fn_name, fnp->fn_len + 1);
4168 sfh4_rele(&fnp->fn_sfh);
4169 mutex_destroy(&fnp->fn_lock);
4170 avl_destroy(&fnp->fn_children);
4171 kmem_free(fnp, sizeof (nfs4_fname_t));
4172 /*
4173 * Recursivly fn_rele the parent.
4174 * Use goto instead of a recursive call to avoid stack overflow.
4175 */
4176 if (parent != NULL) {
4177 fnpp = &parent;
4178 goto recur;
4179 }
4180 }
4181
4182 /*
4183 * Returns the single component name of the given fname, in a MAXNAMELEN
4184 * string buffer, which the caller is responsible for freeing. Note that
4185 * the name may become invalid as a result of fn_move().
4186 */
4187
4188 char *
4189 fn_name(nfs4_fname_t *fnp)
4190 {
4191 char *name;
4192
4193 ASSERT(fnp->fn_len < MAXNAMELEN);
4194 name = kmem_alloc(MAXNAMELEN, KM_SLEEP);
4195 mutex_enter(&fnp->fn_lock);
4196 (void) strcpy(name, fnp->fn_name);
4197 mutex_exit(&fnp->fn_lock);
4198
4199 return (name);
4200 }
4201
4202
4203 /*
4204 * fn_path_realloc
4205 *
4206 * This function, used only by fn_path, constructs
4207 * a new string which looks like "prepend" + "/" + "current".
4208 * by allocating a new string and freeing the old one.
4209 */
4210 static void
4211 fn_path_realloc(char **curses, char *prepend)
4212 {
4213 int len, curlen = 0;
4214 char *news;
4215
4216 if (*curses == NULL) {
4217 /*
4218 * Prime the pump, allocate just the
4219 * space for prepend and return that.
4220 */
4221 len = strlen(prepend) + 1;
4222 news = kmem_alloc(len, KM_SLEEP);
4223 (void) strncpy(news, prepend, len);
4224 } else {
4225 /*
4226 * Allocate the space for a new string
4227 * +1 +1 is for the "/" and the NULL
4228 * byte at the end of it all.
4229 */
4230 curlen = strlen(*curses);
4231 len = curlen + strlen(prepend) + 1 + 1;
4232 news = kmem_alloc(len, KM_SLEEP);
4233 (void) strncpy(news, prepend, len);
4234 (void) strcat(news, "/");
4235 (void) strcat(news, *curses);
4236 kmem_free(*curses, curlen + 1);
4237 }
4238 *curses = news;
4239 }
4240
4241 /*
4242 * Returns the path name (starting from the fs root) for the given fname.
4243 * The caller is responsible for freeing. Note that the path may be or
4244 * become invalid as a result of fn_move().
4245 */
4246
4247 char *
4248 fn_path(nfs4_fname_t *fnp)
4249 {
4250 char *path;
4251 nfs4_fname_t *nextfnp;
4252
4253 if (fnp == NULL)
4254 return (NULL);
4255
4256 path = NULL;
4257
4258 /* walk up the tree constructing the pathname. */
4259
4260 fn_hold(fnp); /* adjust for later rele */
4261 do {
4262 mutex_enter(&fnp->fn_lock);
4263 /*
4264 * Add fn_name in front of the current path
4265 */
4266 fn_path_realloc(&path, fnp->fn_name);
4267 nextfnp = fnp->fn_parent;
4268 if (nextfnp != NULL)
4269 fn_hold(nextfnp);
4270 mutex_exit(&fnp->fn_lock);
4271 fn_rele(&fnp);
4272 fnp = nextfnp;
4273 } while (fnp != NULL);
4274
4275 return (path);
4276 }
4277
4278 /*
4279 * Return a reference to the parent of the given fname, which the caller is
4280 * responsible for eventually releasing.
4281 */
4282
4283 nfs4_fname_t *
4284 fn_parent(nfs4_fname_t *fnp)
4285 {
4286 nfs4_fname_t *parent;
4287
4288 mutex_enter(&fnp->fn_lock);
4289 parent = fnp->fn_parent;
4290 if (parent != NULL)
4291 fn_hold(parent);
4292 mutex_exit(&fnp->fn_lock);
4293
4294 return (parent);
4295 }
4296
4297 /*
4298 * Update fnp so that its parent is newparent and its name is newname.
4299 */
4300
4301 void
4302 fn_move(nfs4_fname_t *fnp, nfs4_fname_t *newparent, char *newname)
4303 {
4304 nfs4_fname_t *parent, *tmpfnp;
4305 ssize_t newlen;
4306 nfs4_fname_t key;
4307 avl_index_t where;
4308
4309 /*
4310 * This assert exists to catch the client trying to rename
4311 * a dir to be a child of itself. This happened at a recent
4312 * bakeoff against a 3rd party (broken) server which allowed
4313 * the rename to succeed. If it trips it means that:
4314 * a) the code in nfs4rename that detects this case is broken
4315 * b) the server is broken (since it allowed the bogus rename)
4316 *
4317 * For non-DEBUG kernels, prepare for a recursive mutex_enter
4318 * panic below from: mutex_enter(&newparent->fn_lock);
4319 */
4320 ASSERT(fnp != newparent);
4321
4322 /*
4323 * Remove fnp from its current parent, change its name, then add it
4324 * to newparent. It might happen that fnp was replaced by another
4325 * nfs4_fname_t with the same fn_name in parent->fn_children.
4326 * In such case, fnp->fn_parent is NULL and we skip the removal
4327 * of fnp from its current parent.
4328 */
4329 mutex_enter(&fnp->fn_lock);
4330 parent = fnp->fn_parent;
4331 if (parent != NULL) {
4332 mutex_enter(&parent->fn_lock);
4333 avl_remove(&parent->fn_children, fnp);
4334 mutex_exit(&parent->fn_lock);
4335 fn_rele(&fnp->fn_parent);
4336 }
4337
4338 newlen = strlen(newname);
4339 if (newlen != fnp->fn_len) {
4340 ASSERT(newlen < MAXNAMELEN);
4341 kmem_free(fnp->fn_name, fnp->fn_len + 1);
4342 fnp->fn_name = kmem_alloc(newlen + 1, KM_SLEEP);
4343 fnp->fn_len = newlen;
4344 }
4345 (void) strcpy(fnp->fn_name, newname);
4346
4347 again:
4348 mutex_enter(&newparent->fn_lock);
4349 key.fn_name = fnp->fn_name;
4350 tmpfnp = avl_find(&newparent->fn_children, &key, &where);
4351 if (tmpfnp != NULL) {
4352 /*
4353 * This could be due to a file that was unlinked while
4354 * open, or perhaps the rnode is in the free list. Remove
4355 * it from newparent and let it go away on its own. The
4356 * contorted code is to deal with lock order issues and
4357 * race conditions.
4358 */
4359 fn_hold(tmpfnp);
4360 mutex_exit(&newparent->fn_lock);
4361 mutex_enter(&tmpfnp->fn_lock);
4362 if (tmpfnp->fn_parent == newparent) {
4363 mutex_enter(&newparent->fn_lock);
4364 avl_remove(&newparent->fn_children, tmpfnp);
4365 mutex_exit(&newparent->fn_lock);
4366 fn_rele(&tmpfnp->fn_parent);
4367 }
4368 mutex_exit(&tmpfnp->fn_lock);
4369 fn_rele(&tmpfnp);
4370 goto again;
4371 }
4372 fnp->fn_parent = newparent;
4373 fn_hold(newparent);
4374 avl_insert(&newparent->fn_children, fnp, where);
4375 mutex_exit(&newparent->fn_lock);
4376 mutex_exit(&fnp->fn_lock);
4377 }
4378
4379 #ifdef DEBUG
4380 /*
4381 * Return non-zero if the type information makes sense for the given vnode.
4382 * Otherwise panic.
4383 */
4384 int
4385 nfs4_consistent_type(vnode_t *vp)
4386 {
4387 rnode4_t *rp = VTOR4(vp);
4388
4389 if (nfs4_vtype_debug && vp->v_type != VNON &&
4390 rp->r_attr.va_type != VNON && vp->v_type != rp->r_attr.va_type) {
4391 cmn_err(CE_PANIC, "vnode %p type mismatch; v_type=%d, "
4392 "rnode attr type=%d", (void *)vp, vp->v_type,
4393 rp->r_attr.va_type);
4394 }
4395
4396 return (1);
4397 }
4398 #endif /* DEBUG */