1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
  23  *
  24  *      Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T.
  25  *      All rights reserved.
  26  */
  27 
  28 /*
  29  * Copyright (c) 2014, Joyent, Inc. All rights reserved.
  30  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
  31  */
  32 
  33 #include <sys/param.h>
  34 #include <sys/types.h>
  35 #include <sys/systm.h>
  36 #include <sys/cred.h>
  37 #include <sys/time.h>
  38 #include <sys/vnode.h>
  39 #include <sys/vfs.h>
  40 #include <sys/vfs_opreg.h>
  41 #include <sys/file.h>
  42 #include <sys/filio.h>
  43 #include <sys/uio.h>
  44 #include <sys/buf.h>
  45 #include <sys/mman.h>
  46 #include <sys/pathname.h>
  47 #include <sys/dirent.h>
  48 #include <sys/debug.h>
  49 #include <sys/vmsystm.h>
  50 #include <sys/fcntl.h>
  51 #include <sys/flock.h>
  52 #include <sys/swap.h>
  53 #include <sys/errno.h>
  54 #include <sys/strsubr.h>
  55 #include <sys/sysmacros.h>
  56 #include <sys/kmem.h>
  57 #include <sys/cmn_err.h>
  58 #include <sys/pathconf.h>
  59 #include <sys/utsname.h>
  60 #include <sys/dnlc.h>
  61 #include <sys/acl.h>
  62 #include <sys/atomic.h>
  63 #include <sys/policy.h>
  64 #include <sys/sdt.h>
  65 
  66 #include <rpc/types.h>
  67 #include <rpc/auth.h>
  68 #include <rpc/clnt.h>
  69 
  70 #include <nfs/nfs.h>
  71 #include <nfs/nfs_clnt.h>
  72 #include <nfs/rnode.h>
  73 #include <nfs/nfs_acl.h>
  74 #include <nfs/lm.h>
  75 
  76 #include <vm/hat.h>
  77 #include <vm/as.h>
  78 #include <vm/page.h>
  79 #include <vm/pvn.h>
  80 #include <vm/seg.h>
  81 #include <vm/seg_map.h>
  82 #include <vm/seg_kpm.h>
  83 #include <vm/seg_vn.h>
  84 
  85 #include <fs/fs_subr.h>
  86 
  87 #include <sys/ddi.h>
  88 
  89 static int      nfs_rdwrlbn(vnode_t *, page_t *, u_offset_t, size_t, int,
  90                         cred_t *);
  91 static int      nfswrite(vnode_t *, caddr_t, uint_t, int, cred_t *);
  92 static int      nfsread(vnode_t *, caddr_t, uint_t, int, size_t *, cred_t *);
  93 static int      nfssetattr(vnode_t *, struct vattr *, int, cred_t *);
  94 static int      nfslookup_dnlc(vnode_t *, char *, vnode_t **, cred_t *);
  95 static int      nfslookup_otw(vnode_t *, char *, vnode_t **, cred_t *, int);
  96 static int      nfsrename(vnode_t *, char *, vnode_t *, char *, cred_t *,
  97                         caller_context_t *);
  98 static int      nfsreaddir(vnode_t *, rddir_cache *, cred_t *);
  99 static int      nfs_bio(struct buf *, cred_t *);
 100 static int      nfs_getapage(vnode_t *, u_offset_t, size_t, uint_t *,
 101                         page_t *[], size_t, struct seg *, caddr_t,
 102                         enum seg_rw, cred_t *);
 103 static void     nfs_readahead(vnode_t *, u_offset_t, caddr_t, struct seg *,
 104                         cred_t *);
 105 static int      nfs_sync_putapage(vnode_t *, page_t *, u_offset_t, size_t,
 106                         int, cred_t *);
 107 static int      nfs_sync_pageio(vnode_t *, page_t *, u_offset_t, size_t,
 108                         int, cred_t *);
 109 static void     nfs_delmap_callback(struct as *, void *, uint_t);
 110 
 111 /*
 112  * Error flags used to pass information about certain special errors
 113  * which need to be handled specially.
 114  */
 115 #define NFS_EOF                 -98
 116 
 117 /*
 118  * These are the vnode ops routines which implement the vnode interface to
 119  * the networked file system.  These routines just take their parameters,
 120  * make them look networkish by putting the right info into interface structs,
 121  * and then calling the appropriate remote routine(s) to do the work.
 122  *
 123  * Note on directory name lookup cacheing:  If we detect a stale fhandle,
 124  * we purge the directory cache relative to that vnode.  This way, the
 125  * user won't get burned by the cache repeatedly.  See <nfs/rnode.h> for
 126  * more details on rnode locking.
 127  */
 128 
 129 static int      nfs_open(vnode_t **, int, cred_t *, caller_context_t *);
 130 static int      nfs_close(vnode_t *, int, int, offset_t, cred_t *,
 131                         caller_context_t *);
 132 static int      nfs_read(vnode_t *, struct uio *, int, cred_t *,
 133                         caller_context_t *);
 134 static int      nfs_write(vnode_t *, struct uio *, int, cred_t *,
 135                         caller_context_t *);
 136 static int      nfs_ioctl(vnode_t *, int, intptr_t, int, cred_t *, int *,
 137                         caller_context_t *);
 138 static int      nfs_getattr(vnode_t *, struct vattr *, int, cred_t *,
 139                         caller_context_t *);
 140 static int      nfs_setattr(vnode_t *, struct vattr *, int, cred_t *,
 141                         caller_context_t *);
 142 static int      nfs_access(vnode_t *, int, int, cred_t *, caller_context_t *);
 143 static int      nfs_accessx(void *, int, cred_t *);
 144 static int      nfs_readlink(vnode_t *, struct uio *, cred_t *,
 145                         caller_context_t *);
 146 static int      nfs_fsync(vnode_t *, int, cred_t *, caller_context_t *);
 147 static void     nfs_inactive(vnode_t *, cred_t *, caller_context_t *);
 148 static int      nfs_lookup(vnode_t *, char *, vnode_t **, struct pathname *,
 149                         int, vnode_t *, cred_t *, caller_context_t *,
 150                         int *, pathname_t *);
 151 static int      nfs_create(vnode_t *, char *, struct vattr *, enum vcexcl,
 152                         int, vnode_t **, cred_t *, int, caller_context_t *,
 153                         vsecattr_t *);
 154 static int      nfs_remove(vnode_t *, char *, cred_t *, caller_context_t *,
 155                         int);
 156 static int      nfs_link(vnode_t *, vnode_t *, char *, cred_t *,
 157                         caller_context_t *, int);
 158 static int      nfs_rename(vnode_t *, char *, vnode_t *, char *, cred_t *,
 159                         caller_context_t *, int);
 160 static int      nfs_mkdir(vnode_t *, char *, struct vattr *, vnode_t **,
 161                         cred_t *, caller_context_t *, int, vsecattr_t *);
 162 static int      nfs_rmdir(vnode_t *, char *, vnode_t *, cred_t *,
 163                         caller_context_t *, int);
 164 static int      nfs_symlink(vnode_t *, char *, struct vattr *, char *,
 165                         cred_t *, caller_context_t *, int);
 166 static int      nfs_readdir(vnode_t *, struct uio *, cred_t *, int *,
 167                         caller_context_t *, int);
 168 static int      nfs_fid(vnode_t *, fid_t *, caller_context_t *);
 169 static int      nfs_rwlock(vnode_t *, int, caller_context_t *);
 170 static void     nfs_rwunlock(vnode_t *, int, caller_context_t *);
 171 static int      nfs_seek(vnode_t *, offset_t, offset_t *, caller_context_t *);
 172 static int      nfs_getpage(vnode_t *, offset_t, size_t, uint_t *,
 173                         page_t *[], size_t, struct seg *, caddr_t,
 174                         enum seg_rw, cred_t *, caller_context_t *);
 175 static int      nfs_putpage(vnode_t *, offset_t, size_t, int, cred_t *,
 176                         caller_context_t *);
 177 static int      nfs_map(vnode_t *, offset_t, struct as *, caddr_t *, size_t,
 178                         uchar_t, uchar_t, uint_t, cred_t *, caller_context_t *);
 179 static int      nfs_addmap(vnode_t *, offset_t, struct as *, caddr_t, size_t,
 180                         uchar_t, uchar_t, uint_t, cred_t *, caller_context_t *);
 181 static int      nfs_frlock(vnode_t *, int, struct flock64 *, int, offset_t,
 182                         struct flk_callback *, cred_t *, caller_context_t *);
 183 static int      nfs_space(vnode_t *, int, struct flock64 *, int, offset_t,
 184                         cred_t *, caller_context_t *);
 185 static int      nfs_realvp(vnode_t *, vnode_t **, caller_context_t *);
 186 static int      nfs_delmap(vnode_t *, offset_t, struct as *, caddr_t, size_t,
 187                         uint_t, uint_t, uint_t, cred_t *, caller_context_t *);
 188 static int      nfs_pathconf(vnode_t *, int, ulong_t *, cred_t *,
 189                         caller_context_t *);
 190 static int      nfs_pageio(vnode_t *, page_t *, u_offset_t, size_t, int,
 191                         cred_t *, caller_context_t *);
 192 static int      nfs_setsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
 193                         caller_context_t *);
 194 static int      nfs_getsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
 195                         caller_context_t *);
 196 static int      nfs_shrlock(vnode_t *, int, struct shrlock *, int, cred_t *,
 197                         caller_context_t *);
 198 
 199 struct vnodeops *nfs_vnodeops;
 200 
 201 const fs_operation_def_t nfs_vnodeops_template[] = {
 202         VOPNAME_OPEN,           { .vop_open = nfs_open },
 203         VOPNAME_CLOSE,          { .vop_close = nfs_close },
 204         VOPNAME_READ,           { .vop_read = nfs_read },
 205         VOPNAME_WRITE,          { .vop_write = nfs_write },
 206         VOPNAME_IOCTL,          { .vop_ioctl = nfs_ioctl },
 207         VOPNAME_GETATTR,        { .vop_getattr = nfs_getattr },
 208         VOPNAME_SETATTR,        { .vop_setattr = nfs_setattr },
 209         VOPNAME_ACCESS,         { .vop_access = nfs_access },
 210         VOPNAME_LOOKUP,         { .vop_lookup = nfs_lookup },
 211         VOPNAME_CREATE,         { .vop_create = nfs_create },
 212         VOPNAME_REMOVE,         { .vop_remove = nfs_remove },
 213         VOPNAME_LINK,           { .vop_link = nfs_link },
 214         VOPNAME_RENAME,         { .vop_rename = nfs_rename },
 215         VOPNAME_MKDIR,          { .vop_mkdir = nfs_mkdir },
 216         VOPNAME_RMDIR,          { .vop_rmdir = nfs_rmdir },
 217         VOPNAME_READDIR,        { .vop_readdir = nfs_readdir },
 218         VOPNAME_SYMLINK,        { .vop_symlink = nfs_symlink },
 219         VOPNAME_READLINK,       { .vop_readlink = nfs_readlink },
 220         VOPNAME_FSYNC,          { .vop_fsync = nfs_fsync },
 221         VOPNAME_INACTIVE,       { .vop_inactive = nfs_inactive },
 222         VOPNAME_FID,            { .vop_fid = nfs_fid },
 223         VOPNAME_RWLOCK,         { .vop_rwlock = nfs_rwlock },
 224         VOPNAME_RWUNLOCK,       { .vop_rwunlock = nfs_rwunlock },
 225         VOPNAME_SEEK,           { .vop_seek = nfs_seek },
 226         VOPNAME_FRLOCK,         { .vop_frlock = nfs_frlock },
 227         VOPNAME_SPACE,          { .vop_space = nfs_space },
 228         VOPNAME_REALVP,         { .vop_realvp = nfs_realvp },
 229         VOPNAME_GETPAGE,        { .vop_getpage = nfs_getpage },
 230         VOPNAME_PUTPAGE,        { .vop_putpage = nfs_putpage },
 231         VOPNAME_MAP,            { .vop_map = nfs_map },
 232         VOPNAME_ADDMAP,         { .vop_addmap = nfs_addmap },
 233         VOPNAME_DELMAP,         { .vop_delmap = nfs_delmap },
 234         VOPNAME_DUMP,           { .vop_dump = nfs_dump },
 235         VOPNAME_PATHCONF,       { .vop_pathconf = nfs_pathconf },
 236         VOPNAME_PAGEIO,         { .vop_pageio = nfs_pageio },
 237         VOPNAME_SETSECATTR,     { .vop_setsecattr = nfs_setsecattr },
 238         VOPNAME_GETSECATTR,     { .vop_getsecattr = nfs_getsecattr },
 239         VOPNAME_SHRLOCK,        { .vop_shrlock = nfs_shrlock },
 240         VOPNAME_VNEVENT,        { .vop_vnevent = fs_vnevent_support },
 241         NULL,                   NULL
 242 };
 243 
 244 /*
 245  * XXX:  This is referenced in modstubs.s
 246  */
 247 struct vnodeops *
 248 nfs_getvnodeops(void)
 249 {
 250         return (nfs_vnodeops);
 251 }
 252 
 253 /* ARGSUSED */
 254 static int
 255 nfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
 256 {
 257         int error;
 258         struct vattr va;
 259         rnode_t *rp;
 260         vnode_t *vp;
 261 
 262         vp = *vpp;
 263         rp = VTOR(vp);
 264         if (nfs_zone() != VTOMI(vp)->mi_zone)
 265                 return (EIO);
 266         mutex_enter(&rp->r_statelock);
 267         if (rp->r_cred == NULL) {
 268                 crhold(cr);
 269                 rp->r_cred = cr;
 270         }
 271         mutex_exit(&rp->r_statelock);
 272 
 273         /*
 274          * If there is no cached data or if close-to-open
 275          * consistency checking is turned off, we can avoid
 276          * the over the wire getattr.  Otherwise, if the
 277          * file system is mounted readonly, then just verify
 278          * the caches are up to date using the normal mechanism.
 279          * Else, if the file is not mmap'd, then just mark
 280          * the attributes as timed out.  They will be refreshed
 281          * and the caches validated prior to being used.
 282          * Else, the file system is mounted writeable so
 283          * force an over the wire GETATTR in order to ensure
 284          * that all cached data is valid.
 285          */
 286         if (vp->v_count > 1 ||
 287             ((vn_has_cached_data(vp) || HAVE_RDDIR_CACHE(rp)) &&
 288             !(VTOMI(vp)->mi_flags & MI_NOCTO))) {
 289                 if (vn_is_readonly(vp))
 290                         error = nfs_validate_caches(vp, cr);
 291                 else if (rp->r_mapcnt == 0 && vp->v_count == 1) {
 292                         PURGE_ATTRCACHE(vp);
 293                         error = 0;
 294                 } else {
 295                         va.va_mask = AT_ALL;
 296                         error = nfs_getattr_otw(vp, &va, cr);
 297                 }
 298         } else
 299                 error = 0;
 300 
 301         return (error);
 302 }
 303 
 304 /* ARGSUSED */
 305 static int
 306 nfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
 307         caller_context_t *ct)
 308 {
 309         rnode_t *rp;
 310         int error;
 311         struct vattr va;
 312 
 313         /*
 314          * zone_enter(2) prevents processes from changing zones with NFS files
 315          * open; if we happen to get here from the wrong zone we can't do
 316          * anything over the wire.
 317          */
 318         if (VTOMI(vp)->mi_zone != nfs_zone()) {
 319                 /*
 320                  * We could attempt to clean up locks, except we're sure
 321                  * that the current process didn't acquire any locks on
 322                  * the file: any attempt to lock a file belong to another zone
 323                  * will fail, and one can't lock an NFS file and then change
 324                  * zones, as that fails too.
 325                  *
 326                  * Returning an error here is the sane thing to do.  A
 327                  * subsequent call to VN_RELE() which translates to a
 328                  * nfs_inactive() will clean up state: if the zone of the
 329                  * vnode's origin is still alive and kicking, an async worker
 330                  * thread will handle the request (from the correct zone), and
 331                  * everything (minus the final nfs_getattr_otw() call) should
 332                  * be OK. If the zone is going away nfs_async_inactive() will
 333                  * throw away cached pages inline.
 334                  */
 335                 return (EIO);
 336         }
 337 
 338         /*
 339          * If we are using local locking for this filesystem, then
 340          * release all of the SYSV style record locks.  Otherwise,
 341          * we are doing network locking and we need to release all
 342          * of the network locks.  All of the locks held by this
 343          * process on this file are released no matter what the
 344          * incoming reference count is.
 345          */
 346         if (VTOMI(vp)->mi_flags & MI_LLOCK) {
 347                 cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
 348                 cleanshares(vp, ttoproc(curthread)->p_pid);
 349         } else
 350                 nfs_lockrelease(vp, flag, offset, cr);
 351 
 352         if (count > 1)
 353                 return (0);
 354 
 355         /*
 356          * If the file has been `unlinked', then purge the
 357          * DNLC so that this vnode will get reycled quicker
 358          * and the .nfs* file on the server will get removed.
 359          */
 360         rp = VTOR(vp);
 361         if (rp->r_unldvp != NULL)
 362                 dnlc_purge_vp(vp);
 363 
 364         /*
 365          * If the file was open for write and there are pages,
 366          * then if the file system was mounted using the "no-close-
 367          *      to-open" semantics, then start an asynchronous flush
 368          *      of the all of the pages in the file.
 369          * else the file system was not mounted using the "no-close-
 370          *      to-open" semantics, then do a synchronous flush and
 371          *      commit of all of the dirty and uncommitted pages.
 372          *
 373          * The asynchronous flush of the pages in the "nocto" path
 374          * mostly just associates a cred pointer with the rnode so
 375          * writes which happen later will have a better chance of
 376          * working.  It also starts the data being written to the
 377          * server, but without unnecessarily delaying the application.
 378          */
 379         if ((flag & FWRITE) && vn_has_cached_data(vp)) {
 380                 if ((VTOMI(vp)->mi_flags & MI_NOCTO)) {
 381                         error = nfs_putpage(vp, (offset_t)0, 0, B_ASYNC,
 382                             cr, ct);
 383                         if (error == EAGAIN)
 384                                 error = 0;
 385                 } else
 386                         error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
 387                 if (!error) {
 388                         mutex_enter(&rp->r_statelock);
 389                         error = rp->r_error;
 390                         rp->r_error = 0;
 391                         mutex_exit(&rp->r_statelock);
 392                 }
 393         } else {
 394                 mutex_enter(&rp->r_statelock);
 395                 error = rp->r_error;
 396                 rp->r_error = 0;
 397                 mutex_exit(&rp->r_statelock);
 398         }
 399 
 400         /*
 401          * If RWRITEATTR is set, then issue an over the wire GETATTR to
 402          * refresh the attribute cache with a set of attributes which
 403          * weren't returned from a WRITE.  This will enable the close-
 404          * to-open processing to work.
 405          */
 406         if (rp->r_flags & RWRITEATTR)
 407                 (void) nfs_getattr_otw(vp, &va, cr);
 408 
 409         return (error);
 410 }
 411 
 412 /* ARGSUSED */
 413 static int
 414 nfs_read(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr,
 415         caller_context_t *ct)
 416 {
 417         rnode_t *rp;
 418         u_offset_t off;
 419         offset_t diff;
 420         int on;
 421         size_t n;
 422         caddr_t base;
 423         uint_t flags;
 424         int error;
 425         mntinfo_t *mi;
 426 
 427         rp = VTOR(vp);
 428         mi = VTOMI(vp);
 429 
 430         if (nfs_zone() != mi->mi_zone)
 431                 return (EIO);
 432 
 433         ASSERT(nfs_rw_lock_held(&rp->r_rwlock, RW_READER));
 434 
 435         if (vp->v_type != VREG)
 436                 return (EISDIR);
 437 
 438         if (uiop->uio_resid == 0)
 439                 return (0);
 440 
 441         if (uiop->uio_loffset > MAXOFF32_T)
 442                 return (EFBIG);
 443 
 444         if (uiop->uio_loffset < 0 ||
 445             uiop->uio_loffset + uiop->uio_resid > MAXOFF32_T)
 446                 return (EINVAL);
 447 
 448         /*
 449          * Bypass VM if caching has been disabled (e.g., locking) or if
 450          * using client-side direct I/O and the file is not mmap'd and
 451          * there are no cached pages.
 452          */
 453         if ((vp->v_flag & VNOCACHE) ||
 454             (((rp->r_flags & RDIRECTIO) || (mi->mi_flags & MI_DIRECTIO)) &&
 455             rp->r_mapcnt == 0 && rp->r_inmap == 0 &&
 456             !vn_has_cached_data(vp))) {
 457                 size_t bufsize;
 458                 size_t resid = 0;
 459 
 460                 /*
 461                  * Let's try to do read in as large a chunk as we can
 462                  * (Filesystem (NFS client) bsize if possible/needed).
 463                  * For V3, this is 32K and for V2, this is 8K.
 464                  */
 465                 bufsize = MIN(uiop->uio_resid, VTOMI(vp)->mi_curread);
 466                 base = kmem_alloc(bufsize, KM_SLEEP);
 467                 do {
 468                         n = MIN(uiop->uio_resid, bufsize);
 469                         error = nfsread(vp, base, uiop->uio_offset, n,
 470                             &resid, cr);
 471                         if (!error) {
 472                                 n -= resid;
 473                                 error = uiomove(base, n, UIO_READ, uiop);
 474                         }
 475                 } while (!error && uiop->uio_resid > 0 && n > 0);
 476                 kmem_free(base, bufsize);
 477                 return (error);
 478         }
 479 
 480         error = 0;
 481 
 482         do {
 483                 off = uiop->uio_loffset & MAXBMASK; /* mapping offset */
 484                 on = uiop->uio_loffset & MAXBOFFSET; /* Relative offset */
 485                 n = MIN(MAXBSIZE - on, uiop->uio_resid);
 486 
 487                 error = nfs_validate_caches(vp, cr);
 488                 if (error)
 489                         break;
 490 
 491                 mutex_enter(&rp->r_statelock);
 492                 while (rp->r_flags & RINCACHEPURGE) {
 493                         if (!cv_wait_sig(&rp->r_cv, &rp->r_statelock)) {
 494                                 mutex_exit(&rp->r_statelock);
 495                                 return (EINTR);
 496                         }
 497                 }
 498                 diff = rp->r_size - uiop->uio_loffset;
 499                 mutex_exit(&rp->r_statelock);
 500                 if (diff <= 0)
 501                         break;
 502                 if (diff < n)
 503                         n = (size_t)diff;
 504 
 505                 if (vpm_enable) {
 506                         /*
 507                          * Copy data.
 508                          */
 509                         error = vpm_data_copy(vp, off + on, n, uiop,
 510                             1, NULL, 0, S_READ);
 511                 } else {
 512                         base = segmap_getmapflt(segkmap, vp, off + on, n,
 513                             1, S_READ);
 514                         error = uiomove(base + on, n, UIO_READ, uiop);
 515                 }
 516 
 517                 if (!error) {
 518                         /*
 519                          * If read a whole block or read to eof,
 520                          * won't need this buffer again soon.
 521                          */
 522                         mutex_enter(&rp->r_statelock);
 523                         if (n + on == MAXBSIZE ||
 524                             uiop->uio_loffset == rp->r_size)
 525                                 flags = SM_DONTNEED;
 526                         else
 527                                 flags = 0;
 528                         mutex_exit(&rp->r_statelock);
 529                         if (vpm_enable) {
 530                                 error = vpm_sync_pages(vp, off, n, flags);
 531                         } else {
 532                                 error = segmap_release(segkmap, base, flags);
 533                         }
 534                 } else {
 535                         if (vpm_enable) {
 536                                 (void) vpm_sync_pages(vp, off, n, 0);
 537                         } else {
 538                                 (void) segmap_release(segkmap, base, 0);
 539                         }
 540                 }
 541         } while (!error && uiop->uio_resid > 0);
 542 
 543         return (error);
 544 }
 545 
 546 /* ARGSUSED */
 547 static int
 548 nfs_write(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr,
 549         caller_context_t *ct)
 550 {
 551         rnode_t *rp;
 552         u_offset_t off;
 553         caddr_t base;
 554         uint_t flags;
 555         int remainder;
 556         size_t n;
 557         int on;
 558         int error;
 559         int resid;
 560         offset_t offset;
 561         rlim_t limit;
 562         mntinfo_t *mi;
 563 
 564         rp = VTOR(vp);
 565 
 566         mi = VTOMI(vp);
 567         if (nfs_zone() != mi->mi_zone)
 568                 return (EIO);
 569         if (vp->v_type != VREG)
 570                 return (EISDIR);
 571 
 572         if (uiop->uio_resid == 0)
 573                 return (0);
 574 
 575         if (ioflag & FAPPEND) {
 576                 struct vattr va;
 577 
 578                 /*
 579                  * Must serialize if appending.
 580                  */
 581                 if (nfs_rw_lock_held(&rp->r_rwlock, RW_READER)) {
 582                         nfs_rw_exit(&rp->r_rwlock);
 583                         if (nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER,
 584                             INTR(vp)))
 585                                 return (EINTR);
 586                 }
 587 
 588                 va.va_mask = AT_SIZE;
 589                 error = nfsgetattr(vp, &va, cr);
 590                 if (error)
 591                         return (error);
 592                 uiop->uio_loffset = va.va_size;
 593         }
 594 
 595         if (uiop->uio_loffset > MAXOFF32_T)
 596                 return (EFBIG);
 597 
 598         offset = uiop->uio_loffset + uiop->uio_resid;
 599 
 600         if (uiop->uio_loffset < 0 || offset > MAXOFF32_T)
 601                 return (EINVAL);
 602 
 603         if (uiop->uio_llimit > (rlim64_t)MAXOFF32_T) {
 604                 limit = MAXOFF32_T;
 605         } else {
 606                 limit = (rlim_t)uiop->uio_llimit;
 607         }
 608 
 609         /*
 610          * Check to make sure that the process will not exceed
 611          * its limit on file size.  It is okay to write up to
 612          * the limit, but not beyond.  Thus, the write which
 613          * reaches the limit will be short and the next write
 614          * will return an error.
 615          */
 616         remainder = 0;
 617         if (offset > limit) {
 618                 remainder = offset - limit;
 619                 uiop->uio_resid = limit - uiop->uio_offset;
 620                 if (uiop->uio_resid <= 0) {
 621                         proc_t *p = ttoproc(curthread);
 622 
 623                         uiop->uio_resid += remainder;
 624                         mutex_enter(&p->p_lock);
 625                         (void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE],
 626                             p->p_rctls, p, RCA_UNSAFE_SIGINFO);
 627                         mutex_exit(&p->p_lock);
 628                         return (EFBIG);
 629                 }
 630         }
 631 
 632         if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_READER, INTR(vp)))
 633                 return (EINTR);
 634 
 635         /*
 636          * Bypass VM if caching has been disabled (e.g., locking) or if
 637          * using client-side direct I/O and the file is not mmap'd and
 638          * there are no cached pages.
 639          */
 640         if ((vp->v_flag & VNOCACHE) ||
 641             (((rp->r_flags & RDIRECTIO) || (mi->mi_flags & MI_DIRECTIO)) &&
 642             rp->r_mapcnt == 0 && rp->r_inmap == 0 &&
 643             !vn_has_cached_data(vp))) {
 644                 size_t bufsize;
 645                 int count;
 646                 uint_t org_offset;
 647 
 648 nfs_fwrite:
 649                 if (rp->r_flags & RSTALE) {
 650                         resid = uiop->uio_resid;
 651                         offset = uiop->uio_loffset;
 652                         error = rp->r_error;
 653                         /*
 654                          * A close may have cleared r_error, if so,
 655                          * propagate ESTALE error return properly
 656                          */
 657                         if (error == 0)
 658                                 error = ESTALE;
 659                         goto bottom;
 660                 }
 661                 bufsize = MIN(uiop->uio_resid, mi->mi_curwrite);
 662                 base = kmem_alloc(bufsize, KM_SLEEP);
 663                 do {
 664                         resid = uiop->uio_resid;
 665                         offset = uiop->uio_loffset;
 666                         count = MIN(uiop->uio_resid, bufsize);
 667                         org_offset = uiop->uio_offset;
 668                         error = uiomove(base, count, UIO_WRITE, uiop);
 669                         if (!error) {
 670                                 error = nfswrite(vp, base, org_offset,
 671                                     count, cr);
 672                         }
 673                 } while (!error && uiop->uio_resid > 0);
 674                 kmem_free(base, bufsize);
 675                 goto bottom;
 676         }
 677 
 678         do {
 679                 off = uiop->uio_loffset & MAXBMASK; /* mapping offset */
 680                 on = uiop->uio_loffset & MAXBOFFSET; /* Relative offset */
 681                 n = MIN(MAXBSIZE - on, uiop->uio_resid);
 682 
 683                 resid = uiop->uio_resid;
 684                 offset = uiop->uio_loffset;
 685 
 686                 if (rp->r_flags & RSTALE) {
 687                         error = rp->r_error;
 688                         /*
 689                          * A close may have cleared r_error, if so,
 690                          * propagate ESTALE error return properly
 691                          */
 692                         if (error == 0)
 693                                 error = ESTALE;
 694                         break;
 695                 }
 696 
 697                 /*
 698                  * Don't create dirty pages faster than they
 699                  * can be cleaned so that the system doesn't
 700                  * get imbalanced.  If the async queue is
 701                  * maxed out, then wait for it to drain before
 702                  * creating more dirty pages.  Also, wait for
 703                  * any threads doing pagewalks in the vop_getattr
 704                  * entry points so that they don't block for
 705                  * long periods.
 706                  */
 707                 mutex_enter(&rp->r_statelock);
 708                 while ((mi->mi_max_threads != 0 &&
 709                     rp->r_awcount > 2 * mi->mi_max_threads) ||
 710                     rp->r_gcount > 0) {
 711                         if (INTR(vp)) {
 712                                 klwp_t *lwp = ttolwp(curthread);
 713 
 714                                 if (lwp != NULL)
 715                                         lwp->lwp_nostop++;
 716                                 if (!cv_wait_sig(&rp->r_cv, &rp->r_statelock)) {
 717                                         mutex_exit(&rp->r_statelock);
 718                                         if (lwp != NULL)
 719                                                 lwp->lwp_nostop--;
 720                                         error = EINTR;
 721                                         goto bottom;
 722                                 }
 723                                 if (lwp != NULL)
 724                                         lwp->lwp_nostop--;
 725                         } else
 726                                 cv_wait(&rp->r_cv, &rp->r_statelock);
 727                 }
 728                 mutex_exit(&rp->r_statelock);
 729 
 730                 /*
 731                  * Touch the page and fault it in if it is not in core
 732                  * before segmap_getmapflt or vpm_data_copy can lock it.
 733                  * This is to avoid the deadlock if the buffer is mapped
 734                  * to the same file through mmap which we want to write.
 735                  */
 736                 uio_prefaultpages((long)n, uiop);
 737 
 738                 if (vpm_enable) {
 739                         /*
 740                          * It will use kpm mappings, so no need to
 741                          * pass an address.
 742                          */
 743                         error = writerp(rp, NULL, n, uiop, 0);
 744                 } else  {
 745                         if (segmap_kpm) {
 746                                 int pon = uiop->uio_loffset & PAGEOFFSET;
 747                                 size_t pn = MIN(PAGESIZE - pon,
 748                                     uiop->uio_resid);
 749                                 int pagecreate;
 750 
 751                                 mutex_enter(&rp->r_statelock);
 752                                 pagecreate = (pon == 0) && (pn == PAGESIZE ||
 753                                     uiop->uio_loffset + pn >= rp->r_size);
 754                                 mutex_exit(&rp->r_statelock);
 755 
 756                                 base = segmap_getmapflt(segkmap, vp, off + on,
 757                                     pn, !pagecreate, S_WRITE);
 758 
 759                                 error = writerp(rp, base + pon, n, uiop,
 760                                     pagecreate);
 761 
 762                         } else {
 763                                 base = segmap_getmapflt(segkmap, vp, off + on,
 764                                     n, 0, S_READ);
 765                                 error = writerp(rp, base + on, n, uiop, 0);
 766                         }
 767                 }
 768 
 769                 if (!error) {
 770                         if (mi->mi_flags & MI_NOAC)
 771                                 flags = SM_WRITE;
 772                         else if (n + on == MAXBSIZE || IS_SWAPVP(vp)) {
 773                                 /*
 774                                  * Have written a whole block.
 775                                  * Start an asynchronous write
 776                                  * and mark the buffer to
 777                                  * indicate that it won't be
 778                                  * needed again soon.
 779                                  */
 780                                 flags = SM_WRITE | SM_ASYNC | SM_DONTNEED;
 781                         } else
 782                                 flags = 0;
 783                         if ((ioflag & (FSYNC|FDSYNC)) ||
 784                             (rp->r_flags & ROUTOFSPACE)) {
 785                                 flags &= ~SM_ASYNC;
 786                                 flags |= SM_WRITE;
 787                         }
 788                         if (vpm_enable) {
 789                                 error = vpm_sync_pages(vp, off, n, flags);
 790                         } else {
 791                                 error = segmap_release(segkmap, base, flags);
 792                         }
 793                 } else {
 794                         if (vpm_enable) {
 795                                 (void) vpm_sync_pages(vp, off, n, 0);
 796                         } else {
 797                                 (void) segmap_release(segkmap, base, 0);
 798                         }
 799                         /*
 800                          * In the event that we got an access error while
 801                          * faulting in a page for a write-only file just
 802                          * force a write.
 803                          */
 804                         if (error == EACCES)
 805                                 goto nfs_fwrite;
 806                 }
 807         } while (!error && uiop->uio_resid > 0);
 808 
 809 bottom:
 810         if (error) {
 811                 uiop->uio_resid = resid + remainder;
 812                 uiop->uio_loffset = offset;
 813         } else
 814                 uiop->uio_resid += remainder;
 815 
 816         nfs_rw_exit(&rp->r_lkserlock);
 817 
 818         return (error);
 819 }
 820 
 821 /*
 822  * Flags are composed of {B_ASYNC, B_INVAL, B_FREE, B_DONTNEED}
 823  */
 824 static int
 825 nfs_rdwrlbn(vnode_t *vp, page_t *pp, u_offset_t off, size_t len,
 826         int flags, cred_t *cr)
 827 {
 828         struct buf *bp;
 829         int error;
 830 
 831         ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
 832         bp = pageio_setup(pp, len, vp, flags);
 833         ASSERT(bp != NULL);
 834 
 835         /*
 836          * pageio_setup should have set b_addr to 0.  This
 837          * is correct since we want to do I/O on a page
 838          * boundary.  bp_mapin will use this addr to calculate
 839          * an offset, and then set b_addr to the kernel virtual
 840          * address it allocated for us.
 841          */
 842         ASSERT(bp->b_un.b_addr == 0);
 843 
 844         bp->b_edev = 0;
 845         bp->b_dev = 0;
 846         bp->b_lblkno = lbtodb(off);
 847         bp->b_file = vp;
 848         bp->b_offset = (offset_t)off;
 849         bp_mapin(bp);
 850 
 851         error = nfs_bio(bp, cr);
 852 
 853         bp_mapout(bp);
 854         pageio_done(bp);
 855 
 856         return (error);
 857 }
 858 
 859 /*
 860  * Write to file.  Writes to remote server in largest size
 861  * chunks that the server can handle.  Write is synchronous.
 862  */
 863 static int
 864 nfswrite(vnode_t *vp, caddr_t base, uint_t offset, int count, cred_t *cr)
 865 {
 866         rnode_t *rp;
 867         mntinfo_t *mi;
 868         struct nfswriteargs wa;
 869         struct nfsattrstat ns;
 870         int error;
 871         int tsize;
 872         int douprintf;
 873 
 874         douprintf = 1;
 875 
 876         rp = VTOR(vp);
 877         mi = VTOMI(vp);
 878 
 879         ASSERT(nfs_zone() == mi->mi_zone);
 880 
 881         wa.wa_args = &wa.wa_args_buf;
 882         wa.wa_fhandle = *VTOFH(vp);
 883 
 884         do {
 885                 tsize = MIN(mi->mi_curwrite, count);
 886                 wa.wa_data = base;
 887                 wa.wa_begoff = offset;
 888                 wa.wa_totcount = tsize;
 889                 wa.wa_count = tsize;
 890                 wa.wa_offset = offset;
 891 
 892                 if (mi->mi_io_kstats) {
 893                         mutex_enter(&mi->mi_lock);
 894                         kstat_runq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
 895                         mutex_exit(&mi->mi_lock);
 896                 }
 897                 wa.wa_mblk = NULL;
 898                 do {
 899                         error = rfs2call(mi, RFS_WRITE,
 900                             xdr_writeargs, (caddr_t)&wa,
 901                             xdr_attrstat, (caddr_t)&ns, cr,
 902                             &douprintf, &ns.ns_status, 0, NULL);
 903                 } while (error == ENFS_TRYAGAIN);
 904                 if (mi->mi_io_kstats) {
 905                         mutex_enter(&mi->mi_lock);
 906                         kstat_runq_exit(KSTAT_IO_PTR(mi->mi_io_kstats));
 907                         mutex_exit(&mi->mi_lock);
 908                 }
 909 
 910                 if (!error) {
 911                         error = geterrno(ns.ns_status);
 912                         /*
 913                          * Can't check for stale fhandle and purge caches
 914                          * here because pages are held by nfs_getpage.
 915                          * Just mark the attribute cache as timed out
 916                          * and set RWRITEATTR to indicate that the file
 917                          * was modified with a WRITE operation.
 918                          */
 919                         if (!error) {
 920                                 count -= tsize;
 921                                 base += tsize;
 922                                 offset += tsize;
 923                                 if (mi->mi_io_kstats) {
 924                                         mutex_enter(&mi->mi_lock);
 925                                         KSTAT_IO_PTR(mi->mi_io_kstats)->
 926                                             writes++;
 927                                         KSTAT_IO_PTR(mi->mi_io_kstats)->
 928                                             nwritten += tsize;
 929                                         mutex_exit(&mi->mi_lock);
 930                                 }
 931                                 lwp_stat_update(LWP_STAT_OUBLK, 1);
 932                                 mutex_enter(&rp->r_statelock);
 933                                 PURGE_ATTRCACHE_LOCKED(rp);
 934                                 rp->r_flags |= RWRITEATTR;
 935                                 mutex_exit(&rp->r_statelock);
 936                         }
 937                 }
 938         } while (!error && count);
 939 
 940         return (error);
 941 }
 942 
 943 /*
 944  * Read from a file.  Reads data in largest chunks our interface can handle.
 945  */
 946 static int
 947 nfsread(vnode_t *vp, caddr_t base, uint_t offset,
 948     int count, size_t *residp, cred_t *cr)
 949 {
 950         mntinfo_t *mi;
 951         struct nfsreadargs ra;
 952         struct nfsrdresult rr;
 953         int tsize;
 954         int error;
 955         int douprintf;
 956         failinfo_t fi;
 957         rnode_t *rp;
 958         struct vattr va;
 959         hrtime_t t;
 960 
 961         rp = VTOR(vp);
 962         mi = VTOMI(vp);
 963 
 964         ASSERT(nfs_zone() == mi->mi_zone);
 965 
 966         douprintf = 1;
 967 
 968         ra.ra_fhandle = *VTOFH(vp);
 969 
 970         fi.vp = vp;
 971         fi.fhp = (caddr_t)&ra.ra_fhandle;
 972         fi.copyproc = nfscopyfh;
 973         fi.lookupproc = nfslookup;
 974         fi.xattrdirproc = acl_getxattrdir2;
 975 
 976         do {
 977                 if (mi->mi_io_kstats) {
 978                         mutex_enter(&mi->mi_lock);
 979                         kstat_runq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
 980                         mutex_exit(&mi->mi_lock);
 981                 }
 982 
 983                 do {
 984                         tsize = MIN(mi->mi_curread, count);
 985                         rr.rr_data = base;
 986                         ra.ra_offset = offset;
 987                         ra.ra_totcount = tsize;
 988                         ra.ra_count = tsize;
 989                         ra.ra_data = base;
 990                         t = gethrtime();
 991                         error = rfs2call(mi, RFS_READ,
 992                             xdr_readargs, (caddr_t)&ra,
 993                             xdr_rdresult, (caddr_t)&rr, cr,
 994                             &douprintf, &rr.rr_status, 0, &fi);
 995                 } while (error == ENFS_TRYAGAIN);
 996 
 997                 if (mi->mi_io_kstats) {
 998                         mutex_enter(&mi->mi_lock);
 999                         kstat_runq_exit(KSTAT_IO_PTR(mi->mi_io_kstats));
1000                         mutex_exit(&mi->mi_lock);
1001                 }
1002 
1003                 if (!error) {
1004                         error = geterrno(rr.rr_status);
1005                         if (!error) {
1006                                 count -= rr.rr_count;
1007                                 base += rr.rr_count;
1008                                 offset += rr.rr_count;
1009                                 if (mi->mi_io_kstats) {
1010                                         mutex_enter(&mi->mi_lock);
1011                                         KSTAT_IO_PTR(mi->mi_io_kstats)->reads++;
1012                                         KSTAT_IO_PTR(mi->mi_io_kstats)->nread +=
1013                                             rr.rr_count;
1014                                         mutex_exit(&mi->mi_lock);
1015                                 }
1016                                 lwp_stat_update(LWP_STAT_INBLK, 1);
1017                         }
1018                 }
1019         } while (!error && count && rr.rr_count == tsize);
1020 
1021         *residp = count;
1022 
1023         if (!error) {
1024                 /*
1025                  * Since no error occurred, we have the current
1026                  * attributes and we need to do a cache check and then
1027                  * potentially update the cached attributes.  We can't
1028                  * use the normal attribute check and cache mechanisms
1029                  * because they might cause a cache flush which would
1030                  * deadlock.  Instead, we just check the cache to see
1031                  * if the attributes have changed.  If it is, then we
1032                  * just mark the attributes as out of date.  The next
1033                  * time that the attributes are checked, they will be
1034                  * out of date, new attributes will be fetched, and
1035                  * the page cache will be flushed.  If the attributes
1036                  * weren't changed, then we just update the cached
1037                  * attributes with these attributes.
1038                  */
1039                 /*
1040                  * If NFS_ACL is supported on the server, then the
1041                  * attributes returned by server may have minimal
1042                  * permissions sometimes denying access to users having
1043                  * proper access.  To get the proper attributes, mark
1044                  * the attributes as expired so that they will be
1045                  * regotten via the NFS_ACL GETATTR2 procedure.
1046                  */
1047                 error = nattr_to_vattr(vp, &rr.rr_attr, &va);
1048                 mutex_enter(&rp->r_statelock);
1049                 if (error || !CACHE_VALID(rp, va.va_mtime, va.va_size) ||
1050                     (mi->mi_flags & MI_ACL)) {
1051                         mutex_exit(&rp->r_statelock);
1052                         PURGE_ATTRCACHE(vp);
1053                 } else {
1054                         if (rp->r_mtime <= t) {
1055                                 nfs_attrcache_va(vp, &va);
1056                         }
1057                         mutex_exit(&rp->r_statelock);
1058                 }
1059         }
1060 
1061         return (error);
1062 }
1063 
1064 /* ARGSUSED */
1065 static int
1066 nfs_ioctl(vnode_t *vp, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp,
1067         caller_context_t *ct)
1068 {
1069 
1070         if (nfs_zone() != VTOMI(vp)->mi_zone)
1071                 return (EIO);
1072         switch (cmd) {
1073                 case _FIODIRECTIO:
1074                         return (nfs_directio(vp, (int)arg, cr));
1075                 default:
1076                         return (ENOTTY);
1077         }
1078 }
1079 
1080 /* ARGSUSED */
1081 static int
1082 nfs_getattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr,
1083         caller_context_t *ct)
1084 {
1085         int error;
1086         rnode_t *rp;
1087 
1088         if (nfs_zone() != VTOMI(vp)->mi_zone)
1089                 return (EIO);
1090         /*
1091          * If it has been specified that the return value will
1092          * just be used as a hint, and we are only being asked
1093          * for size, fsid or rdevid, then return the client's
1094          * notion of these values without checking to make sure
1095          * that the attribute cache is up to date.
1096          * The whole point is to avoid an over the wire GETATTR
1097          * call.
1098          */
1099         rp = VTOR(vp);
1100         if (flags & ATTR_HINT) {
1101                 if (vap->va_mask ==
1102                     (vap->va_mask & (AT_SIZE | AT_FSID | AT_RDEV))) {
1103                         mutex_enter(&rp->r_statelock);
1104                         if (vap->va_mask | AT_SIZE)
1105                                 vap->va_size = rp->r_size;
1106                         if (vap->va_mask | AT_FSID)
1107                                 vap->va_fsid = rp->r_attr.va_fsid;
1108                         if (vap->va_mask | AT_RDEV)
1109                                 vap->va_rdev = rp->r_attr.va_rdev;
1110                         mutex_exit(&rp->r_statelock);
1111                         return (0);
1112                 }
1113         }
1114 
1115         /*
1116          * Only need to flush pages if asking for the mtime
1117          * and if there any dirty pages or any outstanding
1118          * asynchronous (write) requests for this file.
1119          */
1120         if (vap->va_mask & AT_MTIME) {
1121                 if (vn_has_cached_data(vp) &&
1122                     ((rp->r_flags & RDIRTY) || rp->r_awcount > 0)) {
1123                         mutex_enter(&rp->r_statelock);
1124                         rp->r_gcount++;
1125                         mutex_exit(&rp->r_statelock);
1126                         error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
1127                         mutex_enter(&rp->r_statelock);
1128                         if (error && (error == ENOSPC || error == EDQUOT)) {
1129                                 if (!rp->r_error)
1130                                         rp->r_error = error;
1131                         }
1132                         if (--rp->r_gcount == 0)
1133                                 cv_broadcast(&rp->r_cv);
1134                         mutex_exit(&rp->r_statelock);
1135                 }
1136         }
1137 
1138         return (nfsgetattr(vp, vap, cr));
1139 }
1140 
1141 /*ARGSUSED4*/
1142 static int
1143 nfs_setattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr,
1144                 caller_context_t *ct)
1145 {
1146         int error;
1147         uint_t mask;
1148         struct vattr va;
1149 
1150         mask = vap->va_mask;
1151 
1152         if (mask & AT_NOSET)
1153                 return (EINVAL);
1154 
1155         if ((mask & AT_SIZE) &&
1156             vap->va_type == VREG &&
1157             vap->va_size > MAXOFF32_T)
1158                 return (EFBIG);
1159 
1160         if (nfs_zone() != VTOMI(vp)->mi_zone)
1161                 return (EIO);
1162 
1163         va.va_mask = AT_UID | AT_MODE;
1164 
1165         error = nfsgetattr(vp, &va, cr);
1166         if (error)
1167                 return (error);
1168 
1169         error = secpolicy_vnode_setattr(cr, vp, vap, &va, flags, nfs_accessx,
1170             vp);
1171 
1172         if (error)
1173                 return (error);
1174 
1175         error = nfssetattr(vp, vap, flags, cr);
1176 
1177         if (error == 0 && (mask & AT_SIZE)) {
1178                 if (vap->va_size == 0) {
1179                         vnevent_truncate(vp, ct);
1180                 } else {
1181                         vnevent_resize(vp, ct);
1182                 }
1183         }
1184 
1185         return (error);
1186 }
1187 
1188 static int
1189 nfssetattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr)
1190 {
1191         int error;
1192         uint_t mask;
1193         struct nfssaargs args;
1194         struct nfsattrstat ns;
1195         int douprintf;
1196         rnode_t *rp;
1197         struct vattr va;
1198         mode_t omode;
1199         mntinfo_t *mi;
1200         vsecattr_t *vsp;
1201         hrtime_t t;
1202 
1203         mask = vap->va_mask;
1204 
1205         ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
1206 
1207         rp = VTOR(vp);
1208 
1209         /*
1210          * Only need to flush pages if there are any pages and
1211          * if the file is marked as dirty in some fashion.  The
1212          * file must be flushed so that we can accurately
1213          * determine the size of the file and the cached data
1214          * after the SETATTR returns.  A file is considered to
1215          * be dirty if it is either marked with RDIRTY, has
1216          * outstanding i/o's active, or is mmap'd.  In this
1217          * last case, we can't tell whether there are dirty
1218          * pages, so we flush just to be sure.
1219          */
1220         if (vn_has_cached_data(vp) &&
1221             ((rp->r_flags & RDIRTY) ||
1222             rp->r_count > 0 ||
1223             rp->r_mapcnt > 0)) {
1224                 ASSERT(vp->v_type != VCHR);
1225                 error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, NULL);
1226                 if (error && (error == ENOSPC || error == EDQUOT)) {
1227                         mutex_enter(&rp->r_statelock);
1228                         if (!rp->r_error)
1229                                 rp->r_error = error;
1230                         mutex_exit(&rp->r_statelock);
1231                 }
1232         }
1233 
1234         /*
1235          * If the system call was utime(2) or utimes(2) and the
1236          * application did not specify the times, then set the
1237          * mtime nanosecond field to 1 billion.  This will get
1238          * translated from 1 billion nanoseconds to 1 million
1239          * microseconds in the over the wire request.  The
1240          * server will use 1 million in the microsecond field
1241          * to tell whether both the mtime and atime should be
1242          * set to the server's current time.
1243          *
1244          * This is an overload of the protocol and should be
1245          * documented in the NFS Version 2 protocol specification.
1246          */
1247         if ((mask & AT_MTIME) && !(flags & ATTR_UTIME)) {
1248                 vap->va_mtime.tv_nsec = 1000000000;
1249                 if (NFS_TIME_T_OK(vap->va_mtime.tv_sec) &&
1250                     NFS_TIME_T_OK(vap->va_atime.tv_sec)) {
1251                         error = vattr_to_sattr(vap, &args.saa_sa);
1252                 } else {
1253                         /*
1254                          * Use server times. vap time values will not be used.
1255                          * To ensure no time overflow, make sure vap has
1256                          * valid values, but retain the original values.
1257                          */
1258                         timestruc_t     mtime = vap->va_mtime;
1259                         timestruc_t     atime = vap->va_atime;
1260                         time_t          now;
1261 
1262                         now = gethrestime_sec();
1263                         if (NFS_TIME_T_OK(now)) {
1264                                 /* Just in case server does not know of this */
1265                                 vap->va_mtime.tv_sec = now;
1266                                 vap->va_atime.tv_sec = now;
1267                         } else {
1268                                 vap->va_mtime.tv_sec = 0;
1269                                 vap->va_atime.tv_sec = 0;
1270                         }
1271                         error = vattr_to_sattr(vap, &args.saa_sa);
1272                         /* set vap times back on */
1273                         vap->va_mtime = mtime;
1274                         vap->va_atime = atime;
1275                 }
1276         } else {
1277                 /* Either do not set times or use the client specified times */
1278                 error = vattr_to_sattr(vap, &args.saa_sa);
1279         }
1280         if (error) {
1281                 /* req time field(s) overflow - return immediately */
1282                 return (error);
1283         }
1284         args.saa_fh = *VTOFH(vp);
1285 
1286         va.va_mask = AT_MODE;
1287         error = nfsgetattr(vp, &va, cr);
1288         if (error)
1289                 return (error);
1290         omode = va.va_mode;
1291 
1292         mi = VTOMI(vp);
1293 
1294         douprintf = 1;
1295 
1296         t = gethrtime();
1297 
1298         error = rfs2call(mi, RFS_SETATTR,
1299             xdr_saargs, (caddr_t)&args,
1300             xdr_attrstat, (caddr_t)&ns, cr,
1301             &douprintf, &ns.ns_status, 0, NULL);
1302 
1303         /*
1304          * Purge the access cache and ACL cache if changing either the
1305          * owner of the file, the group owner, or the mode.  These may
1306          * change the access permissions of the file, so purge old
1307          * information and start over again.
1308          */
1309         if ((mask & (AT_UID | AT_GID | AT_MODE)) && (mi->mi_flags & MI_ACL)) {
1310                 (void) nfs_access_purge_rp(rp);
1311                 if (rp->r_secattr != NULL) {
1312                         mutex_enter(&rp->r_statelock);
1313                         vsp = rp->r_secattr;
1314                         rp->r_secattr = NULL;
1315                         mutex_exit(&rp->r_statelock);
1316                         if (vsp != NULL)
1317                                 nfs_acl_free(vsp);
1318                 }
1319         }
1320 
1321         if (!error) {
1322                 error = geterrno(ns.ns_status);
1323                 if (!error) {
1324                         /*
1325                          * If changing the size of the file, invalidate
1326                          * any local cached data which is no longer part
1327                          * of the file.  We also possibly invalidate the
1328                          * last page in the file.  We could use
1329                          * pvn_vpzero(), but this would mark the page as
1330                          * modified and require it to be written back to
1331                          * the server for no particularly good reason.
1332                          * This way, if we access it, then we bring it
1333                          * back in.  A read should be cheaper than a
1334                          * write.
1335                          */
1336                         if (mask & AT_SIZE) {
1337                                 nfs_invalidate_pages(vp,
1338                                     (vap->va_size & PAGEMASK), cr);
1339                         }
1340                         (void) nfs_cache_fattr(vp, &ns.ns_attr, &va, t, cr);
1341                         /*
1342                          * If NFS_ACL is supported on the server, then the
1343                          * attributes returned by server may have minimal
1344                          * permissions sometimes denying access to users having
1345                          * proper access.  To get the proper attributes, mark
1346                          * the attributes as expired so that they will be
1347                          * regotten via the NFS_ACL GETATTR2 procedure.
1348                          */
1349                         if (mi->mi_flags & MI_ACL) {
1350                                 PURGE_ATTRCACHE(vp);
1351                         }
1352                         /*
1353                          * This next check attempts to deal with NFS
1354                          * servers which can not handle increasing
1355                          * the size of the file via setattr.  Most
1356                          * of these servers do not return an error,
1357                          * but do not change the size of the file.
1358                          * Hence, this check and then attempt to set
1359                          * the file size by writing 1 byte at the
1360                          * offset of the end of the file that we need.
1361                          */
1362                         if ((mask & AT_SIZE) &&
1363                             ns.ns_attr.na_size < (uint32_t)vap->va_size) {
1364                                 char zb = '\0';
1365 
1366                                 error = nfswrite(vp, &zb,
1367                                     vap->va_size - sizeof (zb),
1368                                     sizeof (zb), cr);
1369                         }
1370                         /*
1371                          * Some servers will change the mode to clear the setuid
1372                          * and setgid bits when changing the uid or gid.  The
1373                          * client needs to compensate appropriately.
1374                          */
1375                         if (mask & (AT_UID | AT_GID)) {
1376                                 int terror;
1377 
1378                                 va.va_mask = AT_MODE;
1379                                 terror = nfsgetattr(vp, &va, cr);
1380                                 if (!terror &&
1381                                     (((mask & AT_MODE) &&
1382                                     va.va_mode != vap->va_mode) ||
1383                                     (!(mask & AT_MODE) &&
1384                                     va.va_mode != omode))) {
1385                                         va.va_mask = AT_MODE;
1386                                         if (mask & AT_MODE)
1387                                                 va.va_mode = vap->va_mode;
1388                                         else
1389                                                 va.va_mode = omode;
1390                                         (void) nfssetattr(vp, &va, 0, cr);
1391                                 }
1392                         }
1393                 } else {
1394                         PURGE_ATTRCACHE(vp);
1395                         PURGE_STALE_FH(error, vp, cr);
1396                 }
1397         } else {
1398                 PURGE_ATTRCACHE(vp);
1399         }
1400 
1401         return (error);
1402 }
1403 
1404 static int
1405 nfs_accessx(void *vp, int mode, cred_t *cr)
1406 {
1407         ASSERT(nfs_zone() == VTOMI((vnode_t *)vp)->mi_zone);
1408         return (nfs_access(vp, mode, 0, cr, NULL));
1409 }
1410 
1411 /* ARGSUSED */
1412 static int
1413 nfs_access(vnode_t *vp, int mode, int flags, cred_t *cr, caller_context_t *ct)
1414 {
1415         struct vattr va;
1416         int error;
1417         mntinfo_t *mi;
1418         int shift = 0;
1419 
1420         mi = VTOMI(vp);
1421 
1422         if (nfs_zone() != mi->mi_zone)
1423                 return (EIO);
1424         if (mi->mi_flags & MI_ACL) {
1425                 error = acl_access2(vp, mode, flags, cr);
1426                 if (mi->mi_flags & MI_ACL)
1427                         return (error);
1428         }
1429 
1430         va.va_mask = AT_MODE | AT_UID | AT_GID;
1431         error = nfsgetattr(vp, &va, cr);
1432         if (error)
1433                 return (error);
1434 
1435         /*
1436          * Disallow write attempts on read-only
1437          * file systems, unless the file is a
1438          * device node.
1439          */
1440         if ((mode & VWRITE) && vn_is_readonly(vp) && !IS_DEVVP(vp))
1441                 return (EROFS);
1442 
1443         /*
1444          * Disallow attempts to access mandatory lock files.
1445          */
1446         if ((mode & (VWRITE | VREAD | VEXEC)) &&
1447             MANDLOCK(vp, va.va_mode))
1448                 return (EACCES);
1449 
1450         /*
1451          * Access check is based on only
1452          * one of owner, group, public.
1453          * If not owner, then check group.
1454          * If not a member of the group,
1455          * then check public access.
1456          */
1457         if (crgetuid(cr) != va.va_uid) {
1458                 shift += 3;
1459                 if (!groupmember(va.va_gid, cr))
1460                         shift += 3;
1461         }
1462 
1463         return (secpolicy_vnode_access2(cr, vp, va.va_uid,
1464             va.va_mode << shift, mode));
1465 }
1466 
1467 static int nfs_do_symlink_cache = 1;
1468 
1469 /* ARGSUSED */
1470 static int
1471 nfs_readlink(vnode_t *vp, struct uio *uiop, cred_t *cr, caller_context_t *ct)
1472 {
1473         int error;
1474         struct nfsrdlnres rl;
1475         rnode_t *rp;
1476         int douprintf;
1477         failinfo_t fi;
1478 
1479         /*
1480          * We want to be consistent with UFS semantics so we will return
1481          * EINVAL instead of ENXIO. This violates the XNFS spec and
1482          * the RFC 1094, which are wrong any way. BUGID 1138002.
1483          */
1484         if (vp->v_type != VLNK)
1485                 return (EINVAL);
1486 
1487         if (nfs_zone() != VTOMI(vp)->mi_zone)
1488                 return (EIO);
1489 
1490         rp = VTOR(vp);
1491         if (nfs_do_symlink_cache && rp->r_symlink.contents != NULL) {
1492                 error = nfs_validate_caches(vp, cr);
1493                 if (error)
1494                         return (error);
1495                 mutex_enter(&rp->r_statelock);
1496                 if (rp->r_symlink.contents != NULL) {
1497                         error = uiomove(rp->r_symlink.contents,
1498                             rp->r_symlink.len, UIO_READ, uiop);
1499                         mutex_exit(&rp->r_statelock);
1500                         return (error);
1501                 }
1502                 mutex_exit(&rp->r_statelock);
1503         }
1504 
1505 
1506         rl.rl_data = kmem_alloc(NFS_MAXPATHLEN, KM_SLEEP);
1507 
1508         fi.vp = vp;
1509         fi.fhp = NULL;          /* no need to update, filehandle not copied */
1510         fi.copyproc = nfscopyfh;
1511         fi.lookupproc = nfslookup;
1512         fi.xattrdirproc = acl_getxattrdir2;
1513 
1514         douprintf = 1;
1515 
1516         error = rfs2call(VTOMI(vp), RFS_READLINK,
1517             xdr_readlink, (caddr_t)VTOFH(vp),
1518             xdr_rdlnres, (caddr_t)&rl, cr,
1519             &douprintf, &rl.rl_status, 0, &fi);
1520 
1521         if (error) {
1522 
1523                 kmem_free((void *)rl.rl_data, NFS_MAXPATHLEN);
1524                 return (error);
1525         }
1526 
1527         error = geterrno(rl.rl_status);
1528         if (!error) {
1529                 error = uiomove(rl.rl_data, (int)rl.rl_count, UIO_READ, uiop);
1530                 if (nfs_do_symlink_cache && rp->r_symlink.contents == NULL) {
1531                         mutex_enter(&rp->r_statelock);
1532                         if (rp->r_symlink.contents == NULL) {
1533                                 rp->r_symlink.contents = rl.rl_data;
1534                                 rp->r_symlink.len = (int)rl.rl_count;
1535                                 rp->r_symlink.size = NFS_MAXPATHLEN;
1536                                 mutex_exit(&rp->r_statelock);
1537                         } else {
1538                                 mutex_exit(&rp->r_statelock);
1539 
1540                                 kmem_free((void *)rl.rl_data,
1541                                     NFS_MAXPATHLEN);
1542                         }
1543                 } else {
1544 
1545                         kmem_free((void *)rl.rl_data, NFS_MAXPATHLEN);
1546                 }
1547         } else {
1548                 PURGE_STALE_FH(error, vp, cr);
1549 
1550                 kmem_free((void *)rl.rl_data, NFS_MAXPATHLEN);
1551         }
1552 
1553         /*
1554          * Conform to UFS semantics (see comment above)
1555          */
1556         return (error == ENXIO ? EINVAL : error);
1557 }
1558 
1559 /*
1560  * Flush local dirty pages to stable storage on the server.
1561  *
1562  * If FNODSYNC is specified, then there is nothing to do because
1563  * metadata changes are not cached on the client before being
1564  * sent to the server.
1565  */
1566 /* ARGSUSED */
1567 static int
1568 nfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
1569 {
1570         int error;
1571 
1572         if ((syncflag & FNODSYNC) || IS_SWAPVP(vp))
1573                 return (0);
1574 
1575         if (nfs_zone() != VTOMI(vp)->mi_zone)
1576                 return (EIO);
1577 
1578         error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
1579         if (!error)
1580                 error = VTOR(vp)->r_error;
1581         return (error);
1582 }
1583 
1584 
1585 /*
1586  * Weirdness: if the file was removed or the target of a rename
1587  * operation while it was open, it got renamed instead.  Here we
1588  * remove the renamed file.
1589  */
1590 /* ARGSUSED */
1591 static void
1592 nfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
1593 {
1594         rnode_t *rp;
1595 
1596         ASSERT(vp != DNLC_NO_VNODE);
1597 
1598         /*
1599          * If this is coming from the wrong zone, we let someone in the right
1600          * zone take care of it asynchronously.  We can get here due to
1601          * VN_RELE() being called from pageout() or fsflush().  This call may
1602          * potentially turn into an expensive no-op if, for instance, v_count
1603          * gets incremented in the meantime, but it's still correct.
1604          */
1605         if (nfs_zone() != VTOMI(vp)->mi_zone) {
1606                 nfs_async_inactive(vp, cr, nfs_inactive);
1607                 return;
1608         }
1609 
1610         rp = VTOR(vp);
1611 redo:
1612         if (rp->r_unldvp != NULL) {
1613                 /*
1614                  * Save the vnode pointer for the directory where the
1615                  * unlinked-open file got renamed, then set it to NULL
1616                  * to prevent another thread from getting here before
1617                  * we're done with the remove.  While we have the
1618                  * statelock, make local copies of the pertinent rnode
1619                  * fields.  If we weren't to do this in an atomic way, the
1620                  * the unl* fields could become inconsistent with respect
1621                  * to each other due to a race condition between this
1622                  * code and nfs_remove().  See bug report 1034328.
1623                  */
1624                 mutex_enter(&rp->r_statelock);
1625                 if (rp->r_unldvp != NULL) {
1626                         vnode_t *unldvp;
1627                         char *unlname;
1628                         cred_t *unlcred;
1629                         struct nfsdiropargs da;
1630                         enum nfsstat status;
1631                         int douprintf;
1632                         int error;
1633 
1634                         unldvp = rp->r_unldvp;
1635                         rp->r_unldvp = NULL;
1636                         unlname = rp->r_unlname;
1637                         rp->r_unlname = NULL;
1638                         unlcred = rp->r_unlcred;
1639                         rp->r_unlcred = NULL;
1640                         mutex_exit(&rp->r_statelock);
1641 
1642                         /*
1643                          * If there are any dirty pages left, then flush
1644                          * them.  This is unfortunate because they just
1645                          * may get thrown away during the remove operation,
1646                          * but we have to do this for correctness.
1647                          */
1648                         if (vn_has_cached_data(vp) &&
1649                             ((rp->r_flags & RDIRTY) || rp->r_count > 0)) {
1650                                 ASSERT(vp->v_type != VCHR);
1651                                 error = nfs_putpage(vp, (offset_t)0, 0, 0,
1652                                     cr, ct);
1653                                 if (error) {
1654                                         mutex_enter(&rp->r_statelock);
1655                                         if (!rp->r_error)
1656                                                 rp->r_error = error;
1657                                         mutex_exit(&rp->r_statelock);
1658                                 }
1659                         }
1660 
1661                         /*
1662                          * Do the remove operation on the renamed file
1663                          */
1664                         setdiropargs(&da, unlname, unldvp);
1665 
1666                         douprintf = 1;
1667 
1668                         (void) rfs2call(VTOMI(unldvp), RFS_REMOVE,
1669                             xdr_diropargs, (caddr_t)&da,
1670                             xdr_enum, (caddr_t)&status, unlcred,
1671                             &douprintf, &status, 0, NULL);
1672 
1673                         if (HAVE_RDDIR_CACHE(VTOR(unldvp)))
1674                                 nfs_purge_rddir_cache(unldvp);
1675                         PURGE_ATTRCACHE(unldvp);
1676 
1677                         /*
1678                          * Release stuff held for the remove
1679                          */
1680                         VN_RELE(unldvp);
1681                         kmem_free(unlname, MAXNAMELEN);
1682                         crfree(unlcred);
1683                         goto redo;
1684                 }
1685                 mutex_exit(&rp->r_statelock);
1686         }
1687 
1688         rp_addfree(rp, cr);
1689 }
1690 
1691 /*
1692  * Remote file system operations having to do with directory manipulation.
1693  */
1694 
1695 /* ARGSUSED */
1696 static int
1697 nfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
1698         int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
1699         int *direntflags, pathname_t *realpnp)
1700 {
1701         int error;
1702         vnode_t *vp;
1703         vnode_t *avp = NULL;
1704         rnode_t *drp;
1705 
1706         if (nfs_zone() != VTOMI(dvp)->mi_zone)
1707                 return (EPERM);
1708 
1709         drp = VTOR(dvp);
1710 
1711         /*
1712          * Are we looking up extended attributes?  If so, "dvp" is
1713          * the file or directory for which we want attributes, and
1714          * we need a lookup of the hidden attribute directory
1715          * before we lookup the rest of the path.
1716          */
1717         if (flags & LOOKUP_XATTR) {
1718                 bool_t cflag = ((flags & CREATE_XATTR_DIR) != 0);
1719                 mntinfo_t *mi;
1720 
1721                 mi = VTOMI(dvp);
1722                 if (!(mi->mi_flags & MI_EXTATTR))
1723                         return (EINVAL);
1724 
1725                 if (nfs_rw_enter_sig(&drp->r_rwlock, RW_READER, INTR(dvp)))
1726                         return (EINTR);
1727 
1728                 (void) nfslookup_dnlc(dvp, XATTR_DIR_NAME, &avp, cr);
1729                 if (avp == NULL)
1730                         error = acl_getxattrdir2(dvp, &avp, cflag, cr, 0);
1731                 else
1732                         error = 0;
1733 
1734                 nfs_rw_exit(&drp->r_rwlock);
1735 
1736                 if (error) {
1737                         if (mi->mi_flags & MI_EXTATTR)
1738                                 return (error);
1739                         return (EINVAL);
1740                 }
1741                 dvp = avp;
1742                 drp = VTOR(dvp);
1743         }
1744 
1745         if (nfs_rw_enter_sig(&drp->r_rwlock, RW_READER, INTR(dvp))) {
1746                 error = EINTR;
1747                 goto out;
1748         }
1749 
1750         error = nfslookup(dvp, nm, vpp, pnp, flags, rdir, cr, 0);
1751 
1752         nfs_rw_exit(&drp->r_rwlock);
1753 
1754         /*
1755          * If vnode is a device, create special vnode.
1756          */
1757         if (!error && IS_DEVVP(*vpp)) {
1758                 vp = *vpp;
1759                 *vpp = specvp(vp, vp->v_rdev, vp->v_type, cr);
1760                 VN_RELE(vp);
1761         }
1762 
1763 out:
1764         if (avp != NULL)
1765                 VN_RELE(avp);
1766 
1767         return (error);
1768 }
1769 
1770 static int nfs_lookup_neg_cache = 1;
1771 
1772 #ifdef DEBUG
1773 static int nfs_lookup_dnlc_hits = 0;
1774 static int nfs_lookup_dnlc_misses = 0;
1775 static int nfs_lookup_dnlc_neg_hits = 0;
1776 static int nfs_lookup_dnlc_disappears = 0;
1777 static int nfs_lookup_dnlc_lookups = 0;
1778 #endif
1779 
1780 /* ARGSUSED */
1781 int
1782 nfslookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
1783         int flags, vnode_t *rdir, cred_t *cr, int rfscall_flags)
1784 {
1785         int error;
1786 
1787         ASSERT(nfs_zone() == VTOMI(dvp)->mi_zone);
1788 
1789         /*
1790          * If lookup is for "", just return dvp.  Don't need
1791          * to send it over the wire, look it up in the dnlc,
1792          * or perform any access checks.
1793          */
1794         if (*nm == '\0') {
1795                 VN_HOLD(dvp);
1796                 *vpp = dvp;
1797                 return (0);
1798         }
1799 
1800         /*
1801          * Can't do lookups in non-directories.
1802          */
1803         if (dvp->v_type != VDIR)
1804                 return (ENOTDIR);
1805 
1806         /*
1807          * If we're called with RFSCALL_SOFT, it's important that
1808          * the only rfscall is one we make directly; if we permit
1809          * an access call because we're looking up "." or validating
1810          * a dnlc hit, we'll deadlock because that rfscall will not
1811          * have the RFSCALL_SOFT set.
1812          */
1813         if (rfscall_flags & RFSCALL_SOFT)
1814                 goto callit;
1815 
1816         /*
1817          * If lookup is for ".", just return dvp.  Don't need
1818          * to send it over the wire or look it up in the dnlc,
1819          * just need to check access.
1820          */
1821         if (strcmp(nm, ".") == 0) {
1822                 error = nfs_access(dvp, VEXEC, 0, cr, NULL);
1823                 if (error)
1824                         return (error);
1825                 VN_HOLD(dvp);
1826                 *vpp = dvp;
1827                 return (0);
1828         }
1829 
1830         /*
1831          * Lookup this name in the DNLC.  If there was a valid entry,
1832          * then return the results of the lookup.
1833          */
1834         error = nfslookup_dnlc(dvp, nm, vpp, cr);
1835         if (error || *vpp != NULL)
1836                 return (error);
1837 
1838 callit:
1839         error = nfslookup_otw(dvp, nm, vpp, cr, rfscall_flags);
1840 
1841         return (error);
1842 }
1843 
1844 static int
1845 nfslookup_dnlc(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr)
1846 {
1847         int error;
1848         vnode_t *vp;
1849 
1850         ASSERT(*nm != '\0');
1851         ASSERT(nfs_zone() == VTOMI(dvp)->mi_zone);
1852 
1853         /*
1854          * Lookup this name in the DNLC.  If successful, then validate
1855          * the caches and then recheck the DNLC.  The DNLC is rechecked
1856          * just in case this entry got invalidated during the call
1857          * to nfs_validate_caches.
1858          *
1859          * An assumption is being made that it is safe to say that a
1860          * file exists which may not on the server.  Any operations to
1861          * the server will fail with ESTALE.
1862          */
1863 #ifdef DEBUG
1864         nfs_lookup_dnlc_lookups++;
1865 #endif
1866         vp = dnlc_lookup(dvp, nm);
1867         if (vp != NULL) {
1868                 VN_RELE(vp);
1869                 if (vp == DNLC_NO_VNODE && !vn_is_readonly(dvp)) {
1870                         PURGE_ATTRCACHE(dvp);
1871                 }
1872                 error = nfs_validate_caches(dvp, cr);
1873                 if (error)
1874                         return (error);
1875                 vp = dnlc_lookup(dvp, nm);
1876                 if (vp != NULL) {
1877                         error = nfs_access(dvp, VEXEC, 0, cr, NULL);
1878                         if (error) {
1879                                 VN_RELE(vp);
1880                                 return (error);
1881                         }
1882                         if (vp == DNLC_NO_VNODE) {
1883                                 VN_RELE(vp);
1884 #ifdef DEBUG
1885                                 nfs_lookup_dnlc_neg_hits++;
1886 #endif
1887                                 return (ENOENT);
1888                         }
1889                         *vpp = vp;
1890 #ifdef DEBUG
1891                         nfs_lookup_dnlc_hits++;
1892 #endif
1893                         return (0);
1894                 }
1895 #ifdef DEBUG
1896                 nfs_lookup_dnlc_disappears++;
1897 #endif
1898         }
1899 #ifdef DEBUG
1900         else
1901                 nfs_lookup_dnlc_misses++;
1902 #endif
1903 
1904         *vpp = NULL;
1905 
1906         return (0);
1907 }
1908 
1909 static int
1910 nfslookup_otw(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr,
1911         int rfscall_flags)
1912 {
1913         int error;
1914         struct nfsdiropargs da;
1915         struct nfsdiropres dr;
1916         int douprintf;
1917         failinfo_t fi;
1918         hrtime_t t;
1919 
1920         ASSERT(*nm != '\0');
1921         ASSERT(dvp->v_type == VDIR);
1922         ASSERT(nfs_zone() == VTOMI(dvp)->mi_zone);
1923 
1924         setdiropargs(&da, nm, dvp);
1925 
1926         fi.vp = dvp;
1927         fi.fhp = NULL;          /* no need to update, filehandle not copied */
1928         fi.copyproc = nfscopyfh;
1929         fi.lookupproc = nfslookup;
1930         fi.xattrdirproc = acl_getxattrdir2;
1931 
1932         douprintf = 1;
1933 
1934         t = gethrtime();
1935 
1936         error = rfs2call(VTOMI(dvp), RFS_LOOKUP,
1937             xdr_diropargs, (caddr_t)&da,
1938             xdr_diropres, (caddr_t)&dr, cr,
1939             &douprintf, &dr.dr_status, rfscall_flags, &fi);
1940 
1941         if (!error) {
1942                 error = geterrno(dr.dr_status);
1943                 if (!error) {
1944                         *vpp = makenfsnode(&dr.dr_fhandle, &dr.dr_attr,
1945                             dvp->v_vfsp, t, cr, VTOR(dvp)->r_path, nm);
1946                         /*
1947                          * If NFS_ACL is supported on the server, then the
1948                          * attributes returned by server may have minimal
1949                          * permissions sometimes denying access to users having
1950                          * proper access.  To get the proper attributes, mark
1951                          * the attributes as expired so that they will be
1952                          * regotten via the NFS_ACL GETATTR2 procedure.
1953                          */
1954                         if (VTOMI(*vpp)->mi_flags & MI_ACL) {
1955                                 PURGE_ATTRCACHE(*vpp);
1956                         }
1957                         if (!(rfscall_flags & RFSCALL_SOFT))
1958                                 dnlc_update(dvp, nm, *vpp);
1959                 } else {
1960                         PURGE_STALE_FH(error, dvp, cr);
1961                         if (error == ENOENT && nfs_lookup_neg_cache)
1962                                 dnlc_enter(dvp, nm, DNLC_NO_VNODE);
1963                 }
1964         }
1965 
1966         return (error);
1967 }
1968 
1969 /* ARGSUSED */
1970 static int
1971 nfs_create(vnode_t *dvp, char *nm, struct vattr *va, enum vcexcl exclusive,
1972         int mode, vnode_t **vpp, cred_t *cr, int lfaware, caller_context_t *ct,
1973         vsecattr_t *vsecp)
1974 {
1975         int error;
1976         struct nfscreatargs args;
1977         struct nfsdiropres dr;
1978         int douprintf;
1979         vnode_t *vp;
1980         rnode_t *rp;
1981         struct vattr vattr;
1982         rnode_t *drp;
1983         vnode_t *tempvp;
1984         hrtime_t t;
1985 
1986         drp = VTOR(dvp);
1987 
1988         if (nfs_zone() != VTOMI(dvp)->mi_zone)
1989                 return (EPERM);
1990         if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
1991                 return (EINTR);
1992 
1993         /*
1994          * We make a copy of the attributes because the caller does not
1995          * expect us to change what va points to.
1996          */
1997         vattr = *va;
1998 
1999         /*
2000          * If the pathname is "", just use dvp.  Don't need
2001          * to send it over the wire, look it up in the dnlc,
2002          * or perform any access checks.
2003          */
2004         if (*nm == '\0') {
2005                 error = 0;
2006                 VN_HOLD(dvp);
2007                 vp = dvp;
2008         /*
2009          * If the pathname is ".", just use dvp.  Don't need
2010          * to send it over the wire or look it up in the dnlc,
2011          * just need to check access.
2012          */
2013         } else if (strcmp(nm, ".") == 0) {
2014                 error = nfs_access(dvp, VEXEC, 0, cr, ct);
2015                 if (error) {
2016                         nfs_rw_exit(&drp->r_rwlock);
2017                         return (error);
2018                 }
2019                 VN_HOLD(dvp);
2020                 vp = dvp;
2021         /*
2022          * We need to go over the wire, just to be sure whether the
2023          * file exists or not.  Using the DNLC can be dangerous in
2024          * this case when making a decision regarding existence.
2025          */
2026         } else {
2027                 error = nfslookup_otw(dvp, nm, &vp, cr, 0);
2028         }
2029         if (!error) {
2030                 if (exclusive == EXCL)
2031                         error = EEXIST;
2032                 else if (vp->v_type == VDIR && (mode & VWRITE))
2033                         error = EISDIR;
2034                 else {
2035                         /*
2036                          * If vnode is a device, create special vnode.
2037                          */
2038                         if (IS_DEVVP(vp)) {
2039                                 tempvp = vp;
2040                                 vp = specvp(vp, vp->v_rdev, vp->v_type, cr);
2041                                 VN_RELE(tempvp);
2042                         }
2043                         if (!(error = VOP_ACCESS(vp, mode, 0, cr, ct))) {
2044                                 if ((vattr.va_mask & AT_SIZE) &&
2045                                     vp->v_type == VREG) {
2046                                         vattr.va_mask = AT_SIZE;
2047                                         error = nfssetattr(vp, &vattr, 0, cr);
2048 
2049                                         if (!error) {
2050                                                 /*
2051                                                  * Existing file was truncated;
2052                                                  * emit a create event.
2053                                                  */
2054                                                 vnevent_create(vp, ct);
2055                                         }
2056                                 }
2057                         }
2058                 }
2059                 nfs_rw_exit(&drp->r_rwlock);
2060                 if (error) {
2061                         VN_RELE(vp);
2062                 } else {
2063                         *vpp = vp;
2064                 }
2065                 return (error);
2066         }
2067 
2068         ASSERT(vattr.va_mask & AT_TYPE);
2069         if (vattr.va_type == VREG) {
2070                 ASSERT(vattr.va_mask & AT_MODE);
2071                 if (MANDMODE(vattr.va_mode)) {
2072                         nfs_rw_exit(&drp->r_rwlock);
2073                         return (EACCES);
2074                 }
2075         }
2076 
2077         dnlc_remove(dvp, nm);
2078 
2079         setdiropargs(&args.ca_da, nm, dvp);
2080 
2081         /*
2082          * Decide what the group-id of the created file should be.
2083          * Set it in attribute list as advisory...then do a setattr
2084          * if the server didn't get it right the first time.
2085          */
2086         error = setdirgid(dvp, &vattr.va_gid, cr);
2087         if (error) {
2088                 nfs_rw_exit(&drp->r_rwlock);
2089                 return (error);
2090         }
2091         vattr.va_mask |= AT_GID;
2092 
2093         /*
2094          * This is a completely gross hack to make mknod
2095          * work over the wire until we can wack the protocol
2096          */
2097 #define IFCHR           0020000         /* character special */
2098 #define IFBLK           0060000         /* block special */
2099 #define IFSOCK          0140000         /* socket */
2100 
2101         /*
2102          * dev_t is uint_t in 5.x and short in 4.x. Both 4.x
2103          * supports 8 bit majors. 5.x supports 14 bit majors. 5.x supports 18
2104          * bits in the minor number where 4.x supports 8 bits.  If the 5.x
2105          * minor/major numbers <= 8 bits long, compress the device
2106          * number before sending it. Otherwise, the 4.x server will not
2107          * create the device with the correct device number and nothing can be
2108          * done about this.
2109          */
2110         if (vattr.va_type == VCHR || vattr.va_type == VBLK) {
2111                 dev_t d = vattr.va_rdev;
2112                 dev32_t dev32;
2113 
2114                 if (vattr.va_type == VCHR)
2115                         vattr.va_mode |= IFCHR;
2116                 else
2117                         vattr.va_mode |= IFBLK;
2118 
2119                 (void) cmpldev(&dev32, d);
2120                 if (dev32 & ~((SO4_MAXMAJ << L_BITSMINOR32) | SO4_MAXMIN))
2121                         vattr.va_size = (u_offset_t)dev32;
2122                 else
2123                         vattr.va_size = (u_offset_t)nfsv2_cmpdev(d);
2124 
2125                 vattr.va_mask |= AT_MODE|AT_SIZE;
2126         } else if (vattr.va_type == VFIFO) {
2127                 vattr.va_mode |= IFCHR;         /* xtra kludge for namedpipe */
2128                 vattr.va_size = (u_offset_t)NFS_FIFO_DEV;       /* blech */
2129                 vattr.va_mask |= AT_MODE|AT_SIZE;
2130         } else if (vattr.va_type == VSOCK) {
2131                 vattr.va_mode |= IFSOCK;
2132                 /*
2133                  * To avoid triggering bugs in the servers set AT_SIZE
2134                  * (all other RFS_CREATE calls set this).
2135                  */
2136                 vattr.va_size = 0;
2137                 vattr.va_mask |= AT_MODE|AT_SIZE;
2138         }
2139 
2140         args.ca_sa = &args.ca_sa_buf;
2141         error = vattr_to_sattr(&vattr, args.ca_sa);
2142         if (error) {
2143                 /* req time field(s) overflow - return immediately */
2144                 nfs_rw_exit(&drp->r_rwlock);
2145                 return (error);
2146         }
2147 
2148         douprintf = 1;
2149 
2150         t = gethrtime();
2151 
2152         error = rfs2call(VTOMI(dvp), RFS_CREATE,
2153             xdr_creatargs, (caddr_t)&args,
2154             xdr_diropres, (caddr_t)&dr, cr,
2155             &douprintf, &dr.dr_status, 0, NULL);
2156 
2157         PURGE_ATTRCACHE(dvp);   /* mod time changed */
2158 
2159         if (!error) {
2160                 error = geterrno(dr.dr_status);
2161                 if (!error) {
2162                         if (HAVE_RDDIR_CACHE(drp))
2163                                 nfs_purge_rddir_cache(dvp);
2164                         vp = makenfsnode(&dr.dr_fhandle, &dr.dr_attr,
2165                             dvp->v_vfsp, t, cr, NULL, NULL);
2166                         /*
2167                          * If NFS_ACL is supported on the server, then the
2168                          * attributes returned by server may have minimal
2169                          * permissions sometimes denying access to users having
2170                          * proper access.  To get the proper attributes, mark
2171                          * the attributes as expired so that they will be
2172                          * regotten via the NFS_ACL GETATTR2 procedure.
2173                          */
2174                         if (VTOMI(vp)->mi_flags & MI_ACL) {
2175                                 PURGE_ATTRCACHE(vp);
2176                         }
2177                         dnlc_update(dvp, nm, vp);
2178                         rp = VTOR(vp);
2179                         if (vattr.va_size == 0) {
2180                                 mutex_enter(&rp->r_statelock);
2181                                 rp->r_size = 0;
2182                                 mutex_exit(&rp->r_statelock);
2183                                 if (vn_has_cached_data(vp)) {
2184                                         ASSERT(vp->v_type != VCHR);
2185                                         nfs_invalidate_pages(vp,
2186                                             (u_offset_t)0, cr);
2187                                 }
2188                         }
2189 
2190                         /*
2191                          * Make sure the gid was set correctly.
2192                          * If not, try to set it (but don't lose
2193                          * any sleep over it).
2194                          */
2195                         if (vattr.va_gid != rp->r_attr.va_gid) {
2196                                 vattr.va_mask = AT_GID;
2197                                 (void) nfssetattr(vp, &vattr, 0, cr);
2198                         }
2199 
2200                         /*
2201                          * If vnode is a device create special vnode
2202                          */
2203                         if (IS_DEVVP(vp)) {
2204                                 *vpp = specvp(vp, vp->v_rdev, vp->v_type, cr);
2205                                 VN_RELE(vp);
2206                         } else
2207                                 *vpp = vp;
2208                 } else {
2209                         PURGE_STALE_FH(error, dvp, cr);
2210                 }
2211         }
2212 
2213         nfs_rw_exit(&drp->r_rwlock);
2214 
2215         return (error);
2216 }
2217 
2218 /*
2219  * Weirdness: if the vnode to be removed is open
2220  * we rename it instead of removing it and nfs_inactive
2221  * will remove the new name.
2222  */
2223 /* ARGSUSED */
2224 static int
2225 nfs_remove(vnode_t *dvp, char *nm, cred_t *cr, caller_context_t *ct, int flags)
2226 {
2227         int error;
2228         struct nfsdiropargs da;
2229         enum nfsstat status;
2230         vnode_t *vp;
2231         char *tmpname;
2232         int douprintf;
2233         rnode_t *rp;
2234         rnode_t *drp;
2235 
2236         if (nfs_zone() != VTOMI(dvp)->mi_zone)
2237                 return (EPERM);
2238         drp = VTOR(dvp);
2239         if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
2240                 return (EINTR);
2241 
2242         error = nfslookup(dvp, nm, &vp, NULL, 0, NULL, cr, 0);
2243         if (error) {
2244                 nfs_rw_exit(&drp->r_rwlock);
2245                 return (error);
2246         }
2247 
2248         if (vp->v_type == VDIR && secpolicy_fs_linkdir(cr, dvp->v_vfsp)) {
2249                 VN_RELE(vp);
2250                 nfs_rw_exit(&drp->r_rwlock);
2251                 return (EPERM);
2252         }
2253 
2254         /*
2255          * First just remove the entry from the name cache, as it
2256          * is most likely the only entry for this vp.
2257          */
2258         dnlc_remove(dvp, nm);
2259 
2260         /*
2261          * If the file has a v_count > 1 then there may be more than one
2262          * entry in the name cache due multiple links or an open file,
2263          * but we don't have the real reference count so flush all
2264          * possible entries.
2265          */
2266         if (vp->v_count > 1)
2267                 dnlc_purge_vp(vp);
2268 
2269         /*
2270          * Now we have the real reference count on the vnode
2271          */
2272         rp = VTOR(vp);
2273         mutex_enter(&rp->r_statelock);
2274         if (vp->v_count > 1 &&
2275             (rp->r_unldvp == NULL || strcmp(nm, rp->r_unlname) == 0)) {
2276                 mutex_exit(&rp->r_statelock);
2277                 tmpname = newname();
2278                 error = nfsrename(dvp, nm, dvp, tmpname, cr, ct);
2279                 if (error)
2280                         kmem_free(tmpname, MAXNAMELEN);
2281                 else {
2282                         mutex_enter(&rp->r_statelock);
2283                         if (rp->r_unldvp == NULL) {
2284                                 VN_HOLD(dvp);
2285                                 rp->r_unldvp = dvp;
2286                                 if (rp->r_unlcred != NULL)
2287                                         crfree(rp->r_unlcred);
2288                                 crhold(cr);
2289                                 rp->r_unlcred = cr;
2290                                 rp->r_unlname = tmpname;
2291                         } else {
2292                                 kmem_free(rp->r_unlname, MAXNAMELEN);
2293                                 rp->r_unlname = tmpname;
2294                         }
2295                         mutex_exit(&rp->r_statelock);
2296                 }
2297         } else {
2298                 mutex_exit(&rp->r_statelock);
2299                 /*
2300                  * We need to flush any dirty pages which happen to
2301                  * be hanging around before removing the file.  This
2302                  * shouldn't happen very often and mostly on file
2303                  * systems mounted "nocto".
2304                  */
2305                 if (vn_has_cached_data(vp) &&
2306                     ((rp->r_flags & RDIRTY) || rp->r_count > 0)) {
2307                         error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
2308                         if (error && (error == ENOSPC || error == EDQUOT)) {
2309                                 mutex_enter(&rp->r_statelock);
2310                                 if (!rp->r_error)
2311                                         rp->r_error = error;
2312                                 mutex_exit(&rp->r_statelock);
2313                         }
2314                 }
2315 
2316                 setdiropargs(&da, nm, dvp);
2317 
2318                 douprintf = 1;
2319 
2320                 error = rfs2call(VTOMI(dvp), RFS_REMOVE,
2321                     xdr_diropargs, (caddr_t)&da,
2322                     xdr_enum, (caddr_t)&status, cr,
2323                     &douprintf, &status, 0, NULL);
2324 
2325                 /*
2326                  * The xattr dir may be gone after last attr is removed,
2327                  * so flush it from dnlc.
2328                  */
2329                 if (dvp->v_flag & V_XATTRDIR)
2330                         dnlc_purge_vp(dvp);
2331 
2332                 PURGE_ATTRCACHE(dvp);   /* mod time changed */
2333                 PURGE_ATTRCACHE(vp);    /* link count changed */
2334 
2335                 if (!error) {
2336                         error = geterrno(status);
2337                         if (!error) {
2338                                 if (HAVE_RDDIR_CACHE(drp))
2339                                         nfs_purge_rddir_cache(dvp);
2340                         } else {
2341                                 PURGE_STALE_FH(error, dvp, cr);
2342                         }
2343                 }
2344         }
2345 
2346         if (error == 0) {
2347                 vnevent_remove(vp, dvp, nm, ct);
2348         }
2349         VN_RELE(vp);
2350 
2351         nfs_rw_exit(&drp->r_rwlock);
2352 
2353         return (error);
2354 }
2355 
2356 /* ARGSUSED */
2357 static int
2358 nfs_link(vnode_t *tdvp, vnode_t *svp, char *tnm, cred_t *cr,
2359         caller_context_t *ct, int flags)
2360 {
2361         int error;
2362         struct nfslinkargs args;
2363         enum nfsstat status;
2364         vnode_t *realvp;
2365         int douprintf;
2366         rnode_t *tdrp;
2367 
2368         if (nfs_zone() != VTOMI(tdvp)->mi_zone)
2369                 return (EPERM);
2370         if (VOP_REALVP(svp, &realvp, ct) == 0)
2371                 svp = realvp;
2372 
2373         args.la_from = VTOFH(svp);
2374         setdiropargs(&args.la_to, tnm, tdvp);
2375 
2376         tdrp = VTOR(tdvp);
2377         if (nfs_rw_enter_sig(&tdrp->r_rwlock, RW_WRITER, INTR(tdvp)))
2378                 return (EINTR);
2379 
2380         dnlc_remove(tdvp, tnm);
2381 
2382         douprintf = 1;
2383 
2384         error = rfs2call(VTOMI(svp), RFS_LINK,
2385             xdr_linkargs, (caddr_t)&args,
2386             xdr_enum, (caddr_t)&status, cr,
2387             &douprintf, &status, 0, NULL);
2388 
2389         PURGE_ATTRCACHE(tdvp);  /* mod time changed */
2390         PURGE_ATTRCACHE(svp);   /* link count changed */
2391 
2392         if (!error) {
2393                 error = geterrno(status);
2394                 if (!error) {
2395                         if (HAVE_RDDIR_CACHE(tdrp))
2396                                 nfs_purge_rddir_cache(tdvp);
2397                 }
2398         }
2399 
2400         nfs_rw_exit(&tdrp->r_rwlock);
2401 
2402         if (!error) {
2403                 /*
2404                  * Notify the source file of this link operation.
2405                  */
2406                 vnevent_link(svp, ct);
2407         }
2408         return (error);
2409 }
2410 
2411 /* ARGSUSED */
2412 static int
2413 nfs_rename(vnode_t *odvp, char *onm, vnode_t *ndvp, char *nnm, cred_t *cr,
2414         caller_context_t *ct, int flags)
2415 {
2416         vnode_t *realvp;
2417 
2418         if (nfs_zone() != VTOMI(odvp)->mi_zone)
2419                 return (EPERM);
2420         if (VOP_REALVP(ndvp, &realvp, ct) == 0)
2421                 ndvp = realvp;
2422 
2423         return (nfsrename(odvp, onm, ndvp, nnm, cr, ct));
2424 }
2425 
2426 /*
2427  * nfsrename does the real work of renaming in NFS Version 2.
2428  */
2429 static int
2430 nfsrename(vnode_t *odvp, char *onm, vnode_t *ndvp, char *nnm, cred_t *cr,
2431     caller_context_t *ct)
2432 {
2433         int error;
2434         enum nfsstat status;
2435         struct nfsrnmargs args;
2436         int douprintf;
2437         vnode_t *nvp = NULL;
2438         vnode_t *ovp = NULL;
2439         char *tmpname;
2440         rnode_t *rp;
2441         rnode_t *odrp;
2442         rnode_t *ndrp;
2443 
2444         ASSERT(nfs_zone() == VTOMI(odvp)->mi_zone);
2445         if (strcmp(onm, ".") == 0 || strcmp(onm, "..") == 0 ||
2446             strcmp(nnm, ".") == 0 || strcmp(nnm, "..") == 0)
2447                 return (EINVAL);
2448 
2449         odrp = VTOR(odvp);
2450         ndrp = VTOR(ndvp);
2451         if ((intptr_t)odrp < (intptr_t)ndrp) {
2452                 if (nfs_rw_enter_sig(&odrp->r_rwlock, RW_WRITER, INTR(odvp)))
2453                         return (EINTR);
2454                 if (nfs_rw_enter_sig(&ndrp->r_rwlock, RW_WRITER, INTR(ndvp))) {
2455                         nfs_rw_exit(&odrp->r_rwlock);
2456                         return (EINTR);
2457                 }
2458         } else {
2459                 if (nfs_rw_enter_sig(&ndrp->r_rwlock, RW_WRITER, INTR(ndvp)))
2460                         return (EINTR);
2461                 if (nfs_rw_enter_sig(&odrp->r_rwlock, RW_WRITER, INTR(odvp))) {
2462                         nfs_rw_exit(&ndrp->r_rwlock);
2463                         return (EINTR);
2464                 }
2465         }
2466 
2467         /*
2468          * Lookup the target file.  If it exists, it needs to be
2469          * checked to see whether it is a mount point and whether
2470          * it is active (open).
2471          */
2472         error = nfslookup(ndvp, nnm, &nvp, NULL, 0, NULL, cr, 0);
2473         if (!error) {
2474                 /*
2475                  * If this file has been mounted on, then just
2476                  * return busy because renaming to it would remove
2477                  * the mounted file system from the name space.
2478                  */
2479                 if (vn_mountedvfs(nvp) != NULL) {
2480                         VN_RELE(nvp);
2481                         nfs_rw_exit(&odrp->r_rwlock);
2482                         nfs_rw_exit(&ndrp->r_rwlock);
2483                         return (EBUSY);
2484                 }
2485 
2486                 /*
2487                  * Purge the name cache of all references to this vnode
2488                  * so that we can check the reference count to infer
2489                  * whether it is active or not.
2490                  */
2491                 /*
2492                  * First just remove the entry from the name cache, as it
2493                  * is most likely the only entry for this vp.
2494                  */
2495                 dnlc_remove(ndvp, nnm);
2496                 /*
2497                  * If the file has a v_count > 1 then there may be more
2498                  * than one entry in the name cache due multiple links
2499                  * or an open file, but we don't have the real reference
2500                  * count so flush all possible entries.
2501                  */
2502                 if (nvp->v_count > 1)
2503                         dnlc_purge_vp(nvp);
2504 
2505                 /*
2506                  * If the vnode is active and is not a directory,
2507                  * arrange to rename it to a
2508                  * temporary file so that it will continue to be
2509                  * accessible.  This implements the "unlink-open-file"
2510                  * semantics for the target of a rename operation.
2511                  * Before doing this though, make sure that the
2512                  * source and target files are not already the same.
2513                  */
2514                 if (nvp->v_count > 1 && nvp->v_type != VDIR) {
2515                         /*
2516                          * Lookup the source name.
2517                          */
2518                         error = nfslookup(odvp, onm, &ovp, NULL, 0, NULL,
2519                             cr, 0);
2520 
2521                         /*
2522                          * The source name *should* already exist.
2523                          */
2524                         if (error) {
2525                                 VN_RELE(nvp);
2526                                 nfs_rw_exit(&odrp->r_rwlock);
2527                                 nfs_rw_exit(&ndrp->r_rwlock);
2528                                 return (error);
2529                         }
2530 
2531                         /*
2532                          * Compare the two vnodes.  If they are the same,
2533                          * just release all held vnodes and return success.
2534                          */
2535                         if (ovp == nvp) {
2536                                 VN_RELE(ovp);
2537                                 VN_RELE(nvp);
2538                                 nfs_rw_exit(&odrp->r_rwlock);
2539                                 nfs_rw_exit(&ndrp->r_rwlock);
2540                                 return (0);
2541                         }
2542 
2543                         /*
2544                          * Can't mix and match directories and non-
2545                          * directories in rename operations.  We already
2546                          * know that the target is not a directory.  If
2547                          * the source is a directory, return an error.
2548                          */
2549                         if (ovp->v_type == VDIR) {
2550                                 VN_RELE(ovp);
2551                                 VN_RELE(nvp);
2552                                 nfs_rw_exit(&odrp->r_rwlock);
2553                                 nfs_rw_exit(&ndrp->r_rwlock);
2554                                 return (ENOTDIR);
2555                         }
2556 
2557                         /*
2558                          * The target file exists, is not the same as
2559                          * the source file, and is active.  Link it
2560                          * to a temporary filename to avoid having
2561                          * the server removing the file completely.
2562                          */
2563                         tmpname = newname();
2564                         error = nfs_link(ndvp, nvp, tmpname, cr, NULL, 0);
2565                         if (error == EOPNOTSUPP) {
2566                                 error = nfs_rename(ndvp, nnm, ndvp, tmpname,
2567                                     cr, NULL, 0);
2568                         }
2569                         if (error) {
2570                                 kmem_free(tmpname, MAXNAMELEN);
2571                                 VN_RELE(ovp);
2572                                 VN_RELE(nvp);
2573                                 nfs_rw_exit(&odrp->r_rwlock);
2574                                 nfs_rw_exit(&ndrp->r_rwlock);
2575                                 return (error);
2576                         }
2577                         rp = VTOR(nvp);
2578                         mutex_enter(&rp->r_statelock);
2579                         if (rp->r_unldvp == NULL) {
2580                                 VN_HOLD(ndvp);
2581                                 rp->r_unldvp = ndvp;
2582                                 if (rp->r_unlcred != NULL)
2583                                         crfree(rp->r_unlcred);
2584                                 crhold(cr);
2585                                 rp->r_unlcred = cr;
2586                                 rp->r_unlname = tmpname;
2587                         } else {
2588                                 kmem_free(rp->r_unlname, MAXNAMELEN);
2589                                 rp->r_unlname = tmpname;
2590                         }
2591                         mutex_exit(&rp->r_statelock);
2592                 }
2593         }
2594 
2595         if (ovp == NULL) {
2596                 /*
2597                  * When renaming directories to be a subdirectory of a
2598                  * different parent, the dnlc entry for ".." will no
2599                  * longer be valid, so it must be removed.
2600                  *
2601                  * We do a lookup here to determine whether we are renaming
2602                  * a directory and we need to check if we are renaming
2603                  * an unlinked file.  This might have already been done
2604                  * in previous code, so we check ovp == NULL to avoid
2605                  * doing it twice.
2606                  */
2607 
2608                 error = nfslookup(odvp, onm, &ovp, NULL, 0, NULL, cr, 0);
2609 
2610                 /*
2611                  * The source name *should* already exist.
2612                  */
2613                 if (error) {
2614                         nfs_rw_exit(&odrp->r_rwlock);
2615                         nfs_rw_exit(&ndrp->r_rwlock);
2616                         if (nvp) {
2617                                 VN_RELE(nvp);
2618                         }
2619                         return (error);
2620                 }
2621                 ASSERT(ovp != NULL);
2622         }
2623 
2624         dnlc_remove(odvp, onm);
2625         dnlc_remove(ndvp, nnm);
2626 
2627         setdiropargs(&args.rna_from, onm, odvp);
2628         setdiropargs(&args.rna_to, nnm, ndvp);
2629 
2630         douprintf = 1;
2631 
2632         error = rfs2call(VTOMI(odvp), RFS_RENAME,
2633             xdr_rnmargs, (caddr_t)&args,
2634             xdr_enum, (caddr_t)&status, cr,
2635             &douprintf, &status, 0, NULL);
2636 
2637         PURGE_ATTRCACHE(odvp);  /* mod time changed */
2638         PURGE_ATTRCACHE(ndvp);  /* mod time changed */
2639 
2640         if (!error) {
2641                 error = geterrno(status);
2642                 if (!error) {
2643                         if (HAVE_RDDIR_CACHE(odrp))
2644                                 nfs_purge_rddir_cache(odvp);
2645                         if (HAVE_RDDIR_CACHE(ndrp))
2646                                 nfs_purge_rddir_cache(ndvp);
2647                         /*
2648                          * when renaming directories to be a subdirectory of a
2649                          * different parent, the dnlc entry for ".." will no
2650                          * longer be valid, so it must be removed
2651                          */
2652                         rp = VTOR(ovp);
2653                         if (ndvp != odvp) {
2654                                 if (ovp->v_type == VDIR) {
2655                                         dnlc_remove(ovp, "..");
2656                                         if (HAVE_RDDIR_CACHE(rp))
2657                                                 nfs_purge_rddir_cache(ovp);
2658                                 }
2659                         }
2660 
2661                         /*
2662                          * If we are renaming the unlinked file, update the
2663                          * r_unldvp and r_unlname as needed.
2664                          */
2665                         mutex_enter(&rp->r_statelock);
2666                         if (rp->r_unldvp != NULL) {
2667                                 if (strcmp(rp->r_unlname, onm) == 0) {
2668                                         (void) strncpy(rp->r_unlname,
2669                                             nnm, MAXNAMELEN);
2670                                         rp->r_unlname[MAXNAMELEN - 1] = '\0';
2671 
2672                                         if (ndvp != rp->r_unldvp) {
2673                                                 VN_RELE(rp->r_unldvp);
2674                                                 rp->r_unldvp = ndvp;
2675                                                 VN_HOLD(ndvp);
2676                                         }
2677                                 }
2678                         }
2679                         mutex_exit(&rp->r_statelock);
2680                 } else {
2681                         /*
2682                          * System V defines rename to return EEXIST, not
2683                          * ENOTEMPTY if the target directory is not empty.
2684                          * Over the wire, the error is NFSERR_ENOTEMPTY
2685                          * which geterrno maps to ENOTEMPTY.
2686                          */
2687                         if (error == ENOTEMPTY)
2688                                 error = EEXIST;
2689                 }
2690         }
2691 
2692         if (error == 0) {
2693                 if (nvp)
2694                         vnevent_rename_dest(nvp, ndvp, nnm, ct);
2695 
2696                 ASSERT(ovp != NULL);
2697                 vnevent_rename_src(ovp, odvp, onm, ct);
2698                 vnevent_rename_dest_dir(ndvp, ovp, nnm, ct);
2699         }
2700 
2701         if (nvp) {
2702                 VN_RELE(nvp);
2703         }
2704         VN_RELE(ovp);
2705 
2706         nfs_rw_exit(&odrp->r_rwlock);
2707         nfs_rw_exit(&ndrp->r_rwlock);
2708 
2709         return (error);
2710 }
2711 
2712 /* ARGSUSED */
2713 static int
2714 nfs_mkdir(vnode_t *dvp, char *nm, struct vattr *va, vnode_t **vpp, cred_t *cr,
2715         caller_context_t *ct, int flags, vsecattr_t *vsecp)
2716 {
2717         int error;
2718         struct nfscreatargs args;
2719         struct nfsdiropres dr;
2720         int douprintf;
2721         rnode_t *drp;
2722         hrtime_t t;
2723 
2724         if (nfs_zone() != VTOMI(dvp)->mi_zone)
2725                 return (EPERM);
2726 
2727         setdiropargs(&args.ca_da, nm, dvp);
2728 
2729         /*
2730          * Decide what the group-id and set-gid bit of the created directory
2731          * should be.  May have to do a setattr to get the gid right.
2732          */
2733         error = setdirgid(dvp, &va->va_gid, cr);
2734         if (error)
2735                 return (error);
2736         error = setdirmode(dvp, &va->va_mode, cr);
2737         if (error)
2738                 return (error);
2739         va->va_mask |= AT_MODE|AT_GID;
2740 
2741         args.ca_sa = &args.ca_sa_buf;
2742         error = vattr_to_sattr(va, args.ca_sa);
2743         if (error) {
2744                 /* req time field(s) overflow - return immediately */
2745                 return (error);
2746         }
2747 
2748         drp = VTOR(dvp);
2749         if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
2750                 return (EINTR);
2751 
2752         dnlc_remove(dvp, nm);
2753 
2754         douprintf = 1;
2755 
2756         t = gethrtime();
2757 
2758         error = rfs2call(VTOMI(dvp), RFS_MKDIR,
2759             xdr_creatargs, (caddr_t)&args,
2760             xdr_diropres, (caddr_t)&dr, cr,
2761             &douprintf, &dr.dr_status, 0, NULL);
2762 
2763         PURGE_ATTRCACHE(dvp);   /* mod time changed */
2764 
2765         if (!error) {
2766                 error = geterrno(dr.dr_status);
2767                 if (!error) {
2768                         if (HAVE_RDDIR_CACHE(drp))
2769                                 nfs_purge_rddir_cache(dvp);
2770                         /*
2771                          * The attributes returned by RFS_MKDIR can not
2772                          * be depended upon, so mark the attribute cache
2773                          * as purged.  A subsequent GETATTR will get the
2774                          * correct attributes from the server.
2775                          */
2776                         *vpp = makenfsnode(&dr.dr_fhandle, &dr.dr_attr,
2777                             dvp->v_vfsp, t, cr, NULL, NULL);
2778                         PURGE_ATTRCACHE(*vpp);
2779                         dnlc_update(dvp, nm, *vpp);
2780 
2781                         /*
2782                          * Make sure the gid was set correctly.
2783                          * If not, try to set it (but don't lose
2784                          * any sleep over it).
2785                          */
2786                         if (va->va_gid != VTOR(*vpp)->r_attr.va_gid) {
2787                                 va->va_mask = AT_GID;
2788                                 (void) nfssetattr(*vpp, va, 0, cr);
2789                         }
2790                 } else {
2791                         PURGE_STALE_FH(error, dvp, cr);
2792                 }
2793         }
2794 
2795         nfs_rw_exit(&drp->r_rwlock);
2796 
2797         return (error);
2798 }
2799 
2800 /* ARGSUSED */
2801 static int
2802 nfs_rmdir(vnode_t *dvp, char *nm, vnode_t *cdir, cred_t *cr,
2803         caller_context_t *ct, int flags)
2804 {
2805         int error;
2806         enum nfsstat status;
2807         struct nfsdiropargs da;
2808         vnode_t *vp;
2809         int douprintf;
2810         rnode_t *drp;
2811 
2812         if (nfs_zone() != VTOMI(dvp)->mi_zone)
2813                 return (EPERM);
2814         drp = VTOR(dvp);
2815         if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
2816                 return (EINTR);
2817 
2818         /*
2819          * Attempt to prevent a rmdir(".") from succeeding.
2820          */
2821         error = nfslookup(dvp, nm, &vp, NULL, 0, NULL, cr, 0);
2822         if (error) {
2823                 nfs_rw_exit(&drp->r_rwlock);
2824                 return (error);
2825         }
2826 
2827         if (vp == cdir) {
2828                 VN_RELE(vp);
2829                 nfs_rw_exit(&drp->r_rwlock);
2830                 return (EINVAL);
2831         }
2832 
2833         setdiropargs(&da, nm, dvp);
2834 
2835         /*
2836          * First just remove the entry from the name cache, as it
2837          * is most likely an entry for this vp.
2838          */
2839         dnlc_remove(dvp, nm);
2840 
2841         /*
2842          * If there vnode reference count is greater than one, then
2843          * there may be additional references in the DNLC which will
2844          * need to be purged.  First, trying removing the entry for
2845          * the parent directory and see if that removes the additional
2846          * reference(s).  If that doesn't do it, then use dnlc_purge_vp
2847          * to completely remove any references to the directory which
2848          * might still exist in the DNLC.
2849          */
2850         if (vp->v_count > 1) {
2851                 dnlc_remove(vp, "..");
2852                 if (vp->v_count > 1)
2853                         dnlc_purge_vp(vp);
2854         }
2855 
2856         douprintf = 1;
2857 
2858         error = rfs2call(VTOMI(dvp), RFS_RMDIR,
2859             xdr_diropargs, (caddr_t)&da,
2860             xdr_enum, (caddr_t)&status, cr,
2861             &douprintf, &status, 0, NULL);
2862 
2863         PURGE_ATTRCACHE(dvp);   /* mod time changed */
2864 
2865         if (error) {
2866                 VN_RELE(vp);
2867                 nfs_rw_exit(&drp->r_rwlock);
2868                 return (error);
2869         }
2870 
2871         error = geterrno(status);
2872         if (!error) {
2873                 if (HAVE_RDDIR_CACHE(drp))
2874                         nfs_purge_rddir_cache(dvp);
2875                 if (HAVE_RDDIR_CACHE(VTOR(vp)))
2876                         nfs_purge_rddir_cache(vp);
2877         } else {
2878                 PURGE_STALE_FH(error, dvp, cr);
2879                 /*
2880                  * System V defines rmdir to return EEXIST, not
2881                  * ENOTEMPTY if the directory is not empty.  Over
2882                  * the wire, the error is NFSERR_ENOTEMPTY which
2883                  * geterrno maps to ENOTEMPTY.
2884                  */
2885                 if (error == ENOTEMPTY)
2886                         error = EEXIST;
2887         }
2888 
2889         if (error == 0) {
2890                 vnevent_rmdir(vp, dvp, nm, ct);
2891         }
2892         VN_RELE(vp);
2893 
2894         nfs_rw_exit(&drp->r_rwlock);
2895 
2896         return (error);
2897 }
2898 
2899 /* ARGSUSED */
2900 static int
2901 nfs_symlink(vnode_t *dvp, char *lnm, struct vattr *tva, char *tnm, cred_t *cr,
2902         caller_context_t *ct, int flags)
2903 {
2904         int error;
2905         struct nfsslargs args;
2906         enum nfsstat status;
2907         int douprintf;
2908         rnode_t *drp;
2909 
2910         if (nfs_zone() != VTOMI(dvp)->mi_zone)
2911                 return (EPERM);
2912         setdiropargs(&args.sla_from, lnm, dvp);
2913         args.sla_sa = &args.sla_sa_buf;
2914         error = vattr_to_sattr(tva, args.sla_sa);
2915         if (error) {
2916                 /* req time field(s) overflow - return immediately */
2917                 return (error);
2918         }
2919         args.sla_tnm = tnm;
2920 
2921         drp = VTOR(dvp);
2922         if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
2923                 return (EINTR);
2924 
2925         dnlc_remove(dvp, lnm);
2926 
2927         douprintf = 1;
2928 
2929         error = rfs2call(VTOMI(dvp), RFS_SYMLINK,
2930             xdr_slargs, (caddr_t)&args,
2931             xdr_enum, (caddr_t)&status, cr,
2932             &douprintf, &status, 0, NULL);
2933 
2934         PURGE_ATTRCACHE(dvp);   /* mod time changed */
2935 
2936         if (!error) {
2937                 error = geterrno(status);
2938                 if (!error) {
2939                         if (HAVE_RDDIR_CACHE(drp))
2940                                 nfs_purge_rddir_cache(dvp);
2941                 } else {
2942                         PURGE_STALE_FH(error, dvp, cr);
2943                 }
2944         }
2945 
2946         nfs_rw_exit(&drp->r_rwlock);
2947 
2948         return (error);
2949 }
2950 
2951 #ifdef DEBUG
2952 static int nfs_readdir_cache_hits = 0;
2953 static int nfs_readdir_cache_shorts = 0;
2954 static int nfs_readdir_cache_waits = 0;
2955 static int nfs_readdir_cache_misses = 0;
2956 static int nfs_readdir_readahead = 0;
2957 #endif
2958 
2959 static int nfs_shrinkreaddir = 0;
2960 
2961 /*
2962  * Read directory entries.
2963  * There are some weird things to look out for here.  The uio_offset
2964  * field is either 0 or it is the offset returned from a previous
2965  * readdir.  It is an opaque value used by the server to find the
2966  * correct directory block to read. The count field is the number
2967  * of blocks to read on the server.  This is advisory only, the server
2968  * may return only one block's worth of entries.  Entries may be compressed
2969  * on the server.
2970  */
2971 /* ARGSUSED */
2972 static int
2973 nfs_readdir(vnode_t *vp, struct uio *uiop, cred_t *cr, int *eofp,
2974         caller_context_t *ct, int flags)
2975 {
2976         int error;
2977         size_t count;
2978         rnode_t *rp;
2979         rddir_cache *rdc;
2980         rddir_cache *nrdc;
2981         rddir_cache *rrdc;
2982 #ifdef DEBUG
2983         int missed;
2984 #endif
2985         rddir_cache srdc;
2986         avl_index_t where;
2987 
2988         rp = VTOR(vp);
2989 
2990         ASSERT(nfs_rw_lock_held(&rp->r_rwlock, RW_READER));
2991         if (nfs_zone() != VTOMI(vp)->mi_zone)
2992                 return (EIO);
2993         /*
2994          * Make sure that the directory cache is valid.
2995          */
2996         if (HAVE_RDDIR_CACHE(rp)) {
2997                 if (nfs_disable_rddir_cache) {
2998                         /*
2999                          * Setting nfs_disable_rddir_cache in /etc/system
3000                          * allows interoperability with servers that do not
3001                          * properly update the attributes of directories.
3002                          * Any cached information gets purged before an
3003                          * access is made to it.
3004                          */
3005                         nfs_purge_rddir_cache(vp);
3006                 } else {
3007                         error = nfs_validate_caches(vp, cr);
3008                         if (error)
3009                                 return (error);
3010                 }
3011         }
3012 
3013         /*
3014          * UGLINESS: SunOS 3.2 servers apparently cannot always handle an
3015          * RFS_READDIR request with rda_count set to more than 0x400. So
3016          * we reduce the request size here purely for compatibility.
3017          *
3018          * In general, this is no longer required.  However, if a server
3019          * is discovered which can not handle requests larger than 1024,
3020          * nfs_shrinkreaddir can be set to 1 to enable this backwards
3021          * compatibility.
3022          *
3023          * In any case, the request size is limited to NFS_MAXDATA bytes.
3024          */
3025         count = MIN(uiop->uio_iov->iov_len,
3026             nfs_shrinkreaddir ? 0x400 : NFS_MAXDATA);
3027 
3028         nrdc = NULL;
3029 #ifdef DEBUG
3030         missed = 0;
3031 #endif
3032 top:
3033         /*
3034          * Short circuit last readdir which always returns 0 bytes.
3035          * This can be done after the directory has been read through
3036          * completely at least once.  This will set r_direof which
3037          * can be used to find the value of the last cookie.
3038          */
3039         mutex_enter(&rp->r_statelock);
3040         if (rp->r_direof != NULL &&
3041             uiop->uio_offset == rp->r_direof->nfs_ncookie) {
3042                 mutex_exit(&rp->r_statelock);
3043 #ifdef DEBUG
3044                 nfs_readdir_cache_shorts++;
3045 #endif
3046                 if (eofp)
3047                         *eofp = 1;
3048                 if (nrdc != NULL)
3049                         rddir_cache_rele(nrdc);
3050                 return (0);
3051         }
3052         /*
3053          * Look for a cache entry.  Cache entries are identified
3054          * by the NFS cookie value and the byte count requested.
3055          */
3056         srdc.nfs_cookie = uiop->uio_offset;
3057         srdc.buflen = count;
3058         rdc = avl_find(&rp->r_dir, &srdc, &where);
3059         if (rdc != NULL) {
3060                 rddir_cache_hold(rdc);
3061                 /*
3062                  * If the cache entry is in the process of being
3063                  * filled in, wait until this completes.  The
3064                  * RDDIRWAIT bit is set to indicate that someone
3065                  * is waiting and then the thread currently
3066                  * filling the entry is done, it should do a
3067                  * cv_broadcast to wakeup all of the threads
3068                  * waiting for it to finish.
3069                  */
3070                 if (rdc->flags & RDDIR) {
3071                         nfs_rw_exit(&rp->r_rwlock);
3072                         rdc->flags |= RDDIRWAIT;
3073 #ifdef DEBUG
3074                         nfs_readdir_cache_waits++;
3075 #endif
3076                         if (!cv_wait_sig(&rdc->cv, &rp->r_statelock)) {
3077                                 /*
3078                                  * We got interrupted, probably
3079                                  * the user typed ^C or an alarm
3080                                  * fired.  We free the new entry
3081                                  * if we allocated one.
3082                                  */
3083                                 mutex_exit(&rp->r_statelock);
3084                                 (void) nfs_rw_enter_sig(&rp->r_rwlock,
3085                                     RW_READER, FALSE);
3086                                 rddir_cache_rele(rdc);
3087                                 if (nrdc != NULL)
3088                                         rddir_cache_rele(nrdc);
3089                                 return (EINTR);
3090                         }
3091                         mutex_exit(&rp->r_statelock);
3092                         (void) nfs_rw_enter_sig(&rp->r_rwlock,
3093                             RW_READER, FALSE);
3094                         rddir_cache_rele(rdc);
3095                         goto top;
3096                 }
3097                 /*
3098                  * Check to see if a readdir is required to
3099                  * fill the entry.  If so, mark this entry
3100                  * as being filled, remove our reference,
3101                  * and branch to the code to fill the entry.
3102                  */
3103                 if (rdc->flags & RDDIRREQ) {
3104                         rdc->flags &= ~RDDIRREQ;
3105                         rdc->flags |= RDDIR;
3106                         if (nrdc != NULL)
3107                                 rddir_cache_rele(nrdc);
3108                         nrdc = rdc;
3109                         mutex_exit(&rp->r_statelock);
3110                         goto bottom;
3111                 }
3112 #ifdef DEBUG
3113                 if (!missed)
3114                         nfs_readdir_cache_hits++;
3115 #endif
3116                 /*
3117                  * If an error occurred while attempting
3118                  * to fill the cache entry, just return it.
3119                  */
3120                 if (rdc->error) {
3121                         error = rdc->error;
3122                         mutex_exit(&rp->r_statelock);
3123                         rddir_cache_rele(rdc);
3124                         if (nrdc != NULL)
3125                                 rddir_cache_rele(nrdc);
3126                         return (error);
3127                 }
3128 
3129                 /*
3130                  * The cache entry is complete and good,
3131                  * copyout the dirent structs to the calling
3132                  * thread.
3133                  */
3134                 error = uiomove(rdc->entries, rdc->entlen, UIO_READ, uiop);
3135 
3136                 /*
3137                  * If no error occurred during the copyout,
3138                  * update the offset in the uio struct to
3139                  * contain the value of the next cookie
3140                  * and set the eof value appropriately.
3141                  */
3142                 if (!error) {
3143                         uiop->uio_offset = rdc->nfs_ncookie;
3144                         if (eofp)
3145                                 *eofp = rdc->eof;
3146                 }
3147 
3148                 /*
3149                  * Decide whether to do readahead.  Don't if
3150                  * have already read to the end of directory.
3151                  */
3152                 if (rdc->eof) {
3153                         rp->r_direof = rdc;
3154                         mutex_exit(&rp->r_statelock);
3155                         rddir_cache_rele(rdc);
3156                         if (nrdc != NULL)
3157                                 rddir_cache_rele(nrdc);
3158                         return (error);
3159                 }
3160 
3161                 /*
3162                  * Check to see whether we found an entry
3163                  * for the readahead.  If so, we don't need
3164                  * to do anything further, so free the new
3165                  * entry if one was allocated.  Otherwise,
3166                  * allocate a new entry, add it to the cache,
3167                  * and then initiate an asynchronous readdir
3168                  * operation to fill it.
3169                  */
3170                 srdc.nfs_cookie = rdc->nfs_ncookie;
3171                 srdc.buflen = count;
3172                 rrdc = avl_find(&rp->r_dir, &srdc, &where);
3173                 if (rrdc != NULL) {
3174                         if (nrdc != NULL)
3175                                 rddir_cache_rele(nrdc);
3176                 } else {
3177                         if (nrdc != NULL)
3178                                 rrdc = nrdc;
3179                         else {
3180                                 rrdc = rddir_cache_alloc(KM_NOSLEEP);
3181                         }
3182                         if (rrdc != NULL) {
3183                                 rrdc->nfs_cookie = rdc->nfs_ncookie;
3184                                 rrdc->buflen = count;
3185                                 avl_insert(&rp->r_dir, rrdc, where);
3186                                 rddir_cache_hold(rrdc);
3187                                 mutex_exit(&rp->r_statelock);
3188                                 rddir_cache_rele(rdc);
3189 #ifdef DEBUG
3190                                 nfs_readdir_readahead++;
3191 #endif
3192                                 nfs_async_readdir(vp, rrdc, cr, nfsreaddir);
3193                                 return (error);
3194                         }
3195                 }
3196 
3197                 mutex_exit(&rp->r_statelock);
3198                 rddir_cache_rele(rdc);
3199                 return (error);
3200         }
3201 
3202         /*
3203          * Didn't find an entry in the cache.  Construct a new empty
3204          * entry and link it into the cache.  Other processes attempting
3205          * to access this entry will need to wait until it is filled in.
3206          *
3207          * Since kmem_alloc may block, another pass through the cache
3208          * will need to be taken to make sure that another process
3209          * hasn't already added an entry to the cache for this request.
3210          */
3211         if (nrdc == NULL) {
3212                 mutex_exit(&rp->r_statelock);
3213                 nrdc = rddir_cache_alloc(KM_SLEEP);
3214                 nrdc->nfs_cookie = uiop->uio_offset;
3215                 nrdc->buflen = count;
3216                 goto top;
3217         }
3218 
3219         /*
3220          * Add this entry to the cache.
3221          */
3222         avl_insert(&rp->r_dir, nrdc, where);
3223         rddir_cache_hold(nrdc);
3224         mutex_exit(&rp->r_statelock);
3225 
3226 bottom:
3227 #ifdef DEBUG
3228         missed = 1;
3229         nfs_readdir_cache_misses++;
3230 #endif
3231         /*
3232          * Do the readdir.
3233          */
3234         error = nfsreaddir(vp, nrdc, cr);
3235 
3236         /*
3237          * If this operation failed, just return the error which occurred.
3238          */
3239         if (error != 0)
3240                 return (error);
3241 
3242         /*
3243          * Since the RPC operation will have taken sometime and blocked
3244          * this process, another pass through the cache will need to be
3245          * taken to find the correct cache entry.  It is possible that
3246          * the correct cache entry will not be there (although one was
3247          * added) because the directory changed during the RPC operation
3248          * and the readdir cache was flushed.  In this case, just start
3249          * over.  It is hoped that this will not happen too often... :-)
3250          */
3251         nrdc = NULL;
3252         goto top;
3253         /* NOTREACHED */
3254 }
3255 
3256 static int
3257 nfsreaddir(vnode_t *vp, rddir_cache *rdc, cred_t *cr)
3258 {
3259         int error;
3260         struct nfsrddirargs rda;
3261         struct nfsrddirres rd;
3262         rnode_t *rp;
3263         mntinfo_t *mi;
3264         uint_t count;
3265         int douprintf;
3266         failinfo_t fi, *fip;
3267 
3268         ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
3269         count = rdc->buflen;
3270 
3271         rp = VTOR(vp);
3272         mi = VTOMI(vp);
3273 
3274         rda.rda_fh = *VTOFH(vp);
3275         rda.rda_offset = rdc->nfs_cookie;
3276 
3277         /*
3278          * NFS client failover support
3279          * suppress failover unless we have a zero cookie
3280          */
3281         if (rdc->nfs_cookie == (off_t)0) {
3282                 fi.vp = vp;
3283                 fi.fhp = (caddr_t)&rda.rda_fh;
3284                 fi.copyproc = nfscopyfh;
3285                 fi.lookupproc = nfslookup;
3286                 fi.xattrdirproc = acl_getxattrdir2;
3287                 fip = &fi;
3288         } else {
3289                 fip = NULL;
3290         }
3291 
3292         rd.rd_entries = kmem_alloc(rdc->buflen, KM_SLEEP);
3293         rd.rd_size = count;
3294         rd.rd_offset = rda.rda_offset;
3295 
3296         douprintf = 1;
3297 
3298         if (mi->mi_io_kstats) {
3299                 mutex_enter(&mi->mi_lock);
3300                 kstat_runq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
3301                 mutex_exit(&mi->mi_lock);
3302         }
3303 
3304         do {
3305                 rda.rda_count = MIN(count, mi->mi_curread);
3306                 error = rfs2call(mi, RFS_READDIR,
3307                     xdr_rddirargs, (caddr_t)&rda,
3308                     xdr_getrddirres, (caddr_t)&rd, cr,
3309                     &douprintf, &rd.rd_status, 0, fip);
3310         } while (error == ENFS_TRYAGAIN);
3311 
3312         if (mi->mi_io_kstats) {
3313                 mutex_enter(&mi->mi_lock);
3314                 kstat_runq_exit(KSTAT_IO_PTR(mi->mi_io_kstats));
3315                 mutex_exit(&mi->mi_lock);
3316         }
3317 
3318         /*
3319          * Since we are actually doing a READDIR RPC, we must have
3320          * exclusive access to the cache entry being filled.  Thus,
3321          * it is safe to update all fields except for the flags
3322          * field.  The r_statelock in the rnode must be held to
3323          * prevent two different threads from simultaneously
3324          * attempting to update the flags field.  This can happen
3325          * if we are turning off RDDIR and the other thread is
3326          * trying to set RDDIRWAIT.
3327          */
3328         ASSERT(rdc->flags & RDDIR);
3329         if (!error) {
3330                 error = geterrno(rd.rd_status);
3331                 if (!error) {
3332                         rdc->nfs_ncookie = rd.rd_offset;
3333                         rdc->eof = rd.rd_eof ? 1 : 0;
3334                         rdc->entlen = rd.rd_size;
3335                         ASSERT(rdc->entlen <= rdc->buflen);
3336 #ifdef DEBUG
3337                         rdc->entries = rddir_cache_buf_alloc(rdc->buflen,
3338                             KM_SLEEP);
3339 #else
3340                         rdc->entries = kmem_alloc(rdc->buflen, KM_SLEEP);
3341 #endif
3342                         bcopy(rd.rd_entries, rdc->entries, rdc->entlen);
3343                         rdc->error = 0;
3344                         if (mi->mi_io_kstats) {
3345                                 mutex_enter(&mi->mi_lock);
3346                                 KSTAT_IO_PTR(mi->mi_io_kstats)->reads++;
3347                                 KSTAT_IO_PTR(mi->mi_io_kstats)->nread +=
3348                                     rd.rd_size;
3349                                 mutex_exit(&mi->mi_lock);
3350                         }
3351                 } else {
3352                         PURGE_STALE_FH(error, vp, cr);
3353                 }
3354         }
3355         if (error) {
3356                 rdc->entries = NULL;
3357                 rdc->error = error;
3358         }
3359         kmem_free(rd.rd_entries, rdc->buflen);
3360 
3361         mutex_enter(&rp->r_statelock);
3362         rdc->flags &= ~RDDIR;
3363         if (rdc->flags & RDDIRWAIT) {
3364                 rdc->flags &= ~RDDIRWAIT;
3365                 cv_broadcast(&rdc->cv);
3366         }
3367         if (error)
3368                 rdc->flags |= RDDIRREQ;
3369         mutex_exit(&rp->r_statelock);
3370 
3371         rddir_cache_rele(rdc);
3372 
3373         return (error);
3374 }
3375 
3376 #ifdef DEBUG
3377 static int nfs_bio_do_stop = 0;
3378 #endif
3379 
3380 static int
3381 nfs_bio(struct buf *bp, cred_t *cr)
3382 {
3383         rnode_t *rp = VTOR(bp->b_vp);
3384         int count;
3385         int error;
3386         cred_t *cred;
3387         uint_t offset;
3388 
3389         DTRACE_IO1(start, struct buf *, bp);
3390 
3391         ASSERT(nfs_zone() == VTOMI(bp->b_vp)->mi_zone);
3392         offset = dbtob(bp->b_blkno);
3393 
3394         if (bp->b_flags & B_READ) {
3395                 mutex_enter(&rp->r_statelock);
3396                 if (rp->r_cred != NULL) {
3397                         cred = rp->r_cred;
3398                         crhold(cred);
3399                 } else {
3400                         rp->r_cred = cr;
3401                         crhold(cr);
3402                         cred = cr;
3403                         crhold(cred);
3404                 }
3405                 mutex_exit(&rp->r_statelock);
3406         read_again:
3407                 error = bp->b_error = nfsread(bp->b_vp, bp->b_un.b_addr,
3408                     offset, bp->b_bcount, &bp->b_resid, cred);
3409 
3410                 crfree(cred);
3411                 if (!error) {
3412                         if (bp->b_resid) {
3413                                 /*
3414                                  * Didn't get it all because we hit EOF,
3415                                  * zero all the memory beyond the EOF.
3416                                  */
3417                                 /* bzero(rdaddr + */
3418                                 bzero(bp->b_un.b_addr +
3419                                     bp->b_bcount - bp->b_resid, bp->b_resid);
3420                         }
3421                         mutex_enter(&rp->r_statelock);
3422                         if (bp->b_resid == bp->b_bcount &&
3423                             offset >= rp->r_size) {
3424                                 /*
3425                                  * We didn't read anything at all as we are
3426                                  * past EOF.  Return an error indicator back
3427                                  * but don't destroy the pages (yet).
3428                                  */
3429                                 error = NFS_EOF;
3430                         }
3431                         mutex_exit(&rp->r_statelock);
3432                 } else if (error == EACCES) {
3433                         mutex_enter(&rp->r_statelock);
3434                         if (cred != cr) {
3435                                 if (rp->r_cred != NULL)
3436                                         crfree(rp->r_cred);
3437                                 rp->r_cred = cr;
3438                                 crhold(cr);
3439                                 cred = cr;
3440                                 crhold(cred);
3441                                 mutex_exit(&rp->r_statelock);
3442                                 goto read_again;
3443                         }
3444                         mutex_exit(&rp->r_statelock);
3445                 }
3446         } else {
3447                 if (!(rp->r_flags & RSTALE)) {
3448                         mutex_enter(&rp->r_statelock);
3449                         if (rp->r_cred != NULL) {
3450                                 cred = rp->r_cred;
3451                                 crhold(cred);
3452                         } else {
3453                                 rp->r_cred = cr;
3454                                 crhold(cr);
3455                                 cred = cr;
3456                                 crhold(cred);
3457                         }
3458                         mutex_exit(&rp->r_statelock);
3459                 write_again:
3460                         mutex_enter(&rp->r_statelock);
3461                         count = MIN(bp->b_bcount, rp->r_size - offset);
3462                         mutex_exit(&rp->r_statelock);
3463                         if (count < 0)
3464                                 cmn_err(CE_PANIC, "nfs_bio: write count < 0");
3465 #ifdef DEBUG
3466                         if (count == 0) {
3467                                 zcmn_err(getzoneid(), CE_WARN,
3468                                     "nfs_bio: zero length write at %d",
3469                                     offset);
3470                                 nfs_printfhandle(&rp->r_fh);
3471                                 if (nfs_bio_do_stop)
3472                                         debug_enter("nfs_bio");
3473                         }
3474 #endif
3475                         error = nfswrite(bp->b_vp, bp->b_un.b_addr, offset,
3476                             count, cred);
3477                         if (error == EACCES) {
3478                                 mutex_enter(&rp->r_statelock);
3479                                 if (cred != cr) {
3480                                         if (rp->r_cred != NULL)
3481                                                 crfree(rp->r_cred);
3482                                         rp->r_cred = cr;
3483                                         crhold(cr);
3484                                         crfree(cred);
3485                                         cred = cr;
3486                                         crhold(cred);
3487                                         mutex_exit(&rp->r_statelock);
3488                                         goto write_again;
3489                                 }
3490                                 mutex_exit(&rp->r_statelock);
3491                         }
3492                         bp->b_error = error;
3493                         if (error && error != EINTR) {
3494                                 /*
3495                                  * Don't print EDQUOT errors on the console.
3496                                  * Don't print asynchronous EACCES errors.
3497                                  * Don't print EFBIG errors.
3498                                  * Print all other write errors.
3499                                  */
3500                                 if (error != EDQUOT && error != EFBIG &&
3501                                     (error != EACCES ||
3502                                     !(bp->b_flags & B_ASYNC)))
3503                                         nfs_write_error(bp->b_vp, error, cred);
3504                                 /*
3505                                  * Update r_error and r_flags as appropriate.
3506                                  * If the error was ESTALE, then mark the
3507                                  * rnode as not being writeable and save
3508                                  * the error status.  Otherwise, save any
3509                                  * errors which occur from asynchronous
3510                                  * page invalidations.  Any errors occurring
3511                                  * from other operations should be saved
3512                                  * by the caller.
3513                                  */
3514                                 mutex_enter(&rp->r_statelock);
3515                                 if (error == ESTALE) {
3516                                         rp->r_flags |= RSTALE;
3517                                         if (!rp->r_error)
3518                                                 rp->r_error = error;
3519                                 } else if (!rp->r_error &&
3520                                     (bp->b_flags &
3521                                     (B_INVAL|B_FORCE|B_ASYNC)) ==
3522                                     (B_INVAL|B_FORCE|B_ASYNC)) {
3523                                         rp->r_error = error;
3524                                 }
3525                                 mutex_exit(&rp->r_statelock);
3526                         }
3527                         crfree(cred);
3528                 } else {
3529                         error = rp->r_error;
3530                         /*
3531                          * A close may have cleared r_error, if so,
3532                          * propagate ESTALE error return properly
3533                          */
3534                         if (error == 0)
3535                                 error = ESTALE;
3536                 }
3537         }
3538 
3539         if (error != 0 && error != NFS_EOF)
3540                 bp->b_flags |= B_ERROR;
3541 
3542         DTRACE_IO1(done, struct buf *, bp);
3543 
3544         return (error);
3545 }
3546 
3547 /* ARGSUSED */
3548 static int
3549 nfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
3550 {
3551         struct nfs_fid *fp;
3552         rnode_t *rp;
3553 
3554         rp = VTOR(vp);
3555 
3556         if (fidp->fid_len < (sizeof (struct nfs_fid) - sizeof (short))) {
3557                 fidp->fid_len = sizeof (struct nfs_fid) - sizeof (short);
3558                 return (ENOSPC);
3559         }
3560         fp = (struct nfs_fid *)fidp;
3561         fp->nf_pad = 0;
3562         fp->nf_len = sizeof (struct nfs_fid) - sizeof (short);
3563         bcopy(rp->r_fh.fh_buf, fp->nf_data, NFS_FHSIZE);
3564         return (0);
3565 }
3566 
3567 /* ARGSUSED2 */
3568 static int
3569 nfs_rwlock(vnode_t *vp, int write_lock, caller_context_t *ctp)
3570 {
3571         rnode_t *rp = VTOR(vp);
3572 
3573         if (!write_lock) {
3574                 (void) nfs_rw_enter_sig(&rp->r_rwlock, RW_READER, FALSE);
3575                 return (V_WRITELOCK_FALSE);
3576         }
3577 
3578         if ((rp->r_flags & RDIRECTIO) || (VTOMI(vp)->mi_flags & MI_DIRECTIO)) {
3579                 (void) nfs_rw_enter_sig(&rp->r_rwlock, RW_READER, FALSE);
3580                 if (rp->r_mapcnt == 0 && !vn_has_cached_data(vp))
3581                         return (V_WRITELOCK_FALSE);
3582                 nfs_rw_exit(&rp->r_rwlock);
3583         }
3584 
3585         (void) nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, FALSE);
3586         return (V_WRITELOCK_TRUE);
3587 }
3588 
3589 /* ARGSUSED */
3590 static void
3591 nfs_rwunlock(vnode_t *vp, int write_lock, caller_context_t *ctp)
3592 {
3593         rnode_t *rp = VTOR(vp);
3594 
3595         nfs_rw_exit(&rp->r_rwlock);
3596 }
3597 
3598 /* ARGSUSED */
3599 static int
3600 nfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, caller_context_t *ct)
3601 {
3602 
3603         /*
3604          * Because we stuff the readdir cookie into the offset field
3605          * someone may attempt to do an lseek with the cookie which
3606          * we want to succeed.
3607          */
3608         if (vp->v_type == VDIR)
3609                 return (0);
3610         if (*noffp < 0 || *noffp > MAXOFF32_T)
3611                 return (EINVAL);
3612         return (0);
3613 }
3614 
3615 /*
3616  * number of NFS_MAXDATA blocks to read ahead
3617  * optimized for 100 base-T.
3618  */
3619 static int nfs_nra = 4;
3620 
3621 #ifdef DEBUG
3622 static int nfs_lostpage = 0;    /* number of times we lost original page */
3623 #endif
3624 
3625 /*
3626  * Return all the pages from [off..off+len) in file
3627  */
3628 /* ARGSUSED */
3629 static int
3630 nfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
3631         page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
3632         enum seg_rw rw, cred_t *cr, caller_context_t *ct)
3633 {
3634         rnode_t *rp;
3635         int error;
3636         mntinfo_t *mi;
3637 
3638         if (vp->v_flag & VNOMAP)
3639                 return (ENOSYS);
3640 
3641         ASSERT(off <= MAXOFF32_T);
3642         if (nfs_zone() != VTOMI(vp)->mi_zone)
3643                 return (EIO);
3644         if (protp != NULL)
3645                 *protp = PROT_ALL;
3646 
3647         /*
3648          * Now valididate that the caches are up to date.
3649          */
3650         error = nfs_validate_caches(vp, cr);
3651         if (error)
3652                 return (error);
3653 
3654         rp = VTOR(vp);
3655         mi = VTOMI(vp);
3656 retry:
3657         mutex_enter(&rp->r_statelock);
3658 
3659         /*
3660          * Don't create dirty pages faster than they
3661          * can be cleaned so that the system doesn't
3662          * get imbalanced.  If the async queue is
3663          * maxed out, then wait for it to drain before
3664          * creating more dirty pages.  Also, wait for
3665          * any threads doing pagewalks in the vop_getattr
3666          * entry points so that they don't block for
3667          * long periods.
3668          */
3669         if (rw == S_CREATE) {
3670                 while ((mi->mi_max_threads != 0 &&
3671                     rp->r_awcount > 2 * mi->mi_max_threads) ||
3672                     rp->r_gcount > 0)
3673                         cv_wait(&rp->r_cv, &rp->r_statelock);
3674         }
3675 
3676         /*
3677          * If we are getting called as a side effect of an nfs_write()
3678          * operation the local file size might not be extended yet.
3679          * In this case we want to be able to return pages of zeroes.
3680          */
3681         if (off + len > rp->r_size + PAGEOFFSET && seg != segkmap) {
3682                 mutex_exit(&rp->r_statelock);
3683                 return (EFAULT);                /* beyond EOF */
3684         }
3685 
3686         mutex_exit(&rp->r_statelock);
3687 
3688         error = pvn_getpages(nfs_getapage, vp, off, len, protp, pl, plsz,
3689             seg, addr, rw, cr);
3690 
3691         switch (error) {
3692         case NFS_EOF:
3693                 nfs_purge_caches(vp, NFS_NOPURGE_DNLC, cr);
3694                 goto retry;
3695         case ESTALE:
3696                 PURGE_STALE_FH(error, vp, cr);
3697         }
3698 
3699         return (error);
3700 }
3701 
3702 /*
3703  * Called from pvn_getpages to get a particular page.
3704  */
3705 /* ARGSUSED */
3706 static int
3707 nfs_getapage(vnode_t *vp, u_offset_t off, size_t len, uint_t *protp,
3708         page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
3709         enum seg_rw rw, cred_t *cr)
3710 {
3711         rnode_t *rp;
3712         uint_t bsize;
3713         struct buf *bp;
3714         page_t *pp;
3715         u_offset_t lbn;
3716         u_offset_t io_off;
3717         u_offset_t blkoff;
3718         u_offset_t rablkoff;
3719         size_t io_len;
3720         uint_t blksize;
3721         int error;
3722         int readahead;
3723         int readahead_issued = 0;
3724         int ra_window; /* readahead window */
3725         page_t *pagefound;
3726 
3727         if (nfs_zone() != VTOMI(vp)->mi_zone)
3728                 return (EIO);
3729         rp = VTOR(vp);
3730         bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
3731 
3732 reread:
3733         bp = NULL;
3734         pp = NULL;
3735         pagefound = NULL;
3736 
3737         if (pl != NULL)
3738                 pl[0] = NULL;
3739 
3740         error = 0;
3741         lbn = off / bsize;
3742         blkoff = lbn * bsize;
3743 
3744         /*
3745          * Queueing up the readahead before doing the synchronous read
3746          * results in a significant increase in read throughput because
3747          * of the increased parallelism between the async threads and
3748          * the process context.
3749          */
3750         if ((off & ((vp->v_vfsp->vfs_bsize) - 1)) == 0 &&
3751             rw != S_CREATE &&
3752             !(vp->v_flag & VNOCACHE)) {
3753                 mutex_enter(&rp->r_statelock);
3754 
3755                 /*
3756                  * Calculate the number of readaheads to do.
3757                  * a) No readaheads at offset = 0.
3758                  * b) Do maximum(nfs_nra) readaheads when the readahead
3759                  *    window is closed.
3760                  * c) Do readaheads between 1 to (nfs_nra - 1) depending
3761                  *    upon how far the readahead window is open or close.
3762                  * d) No readaheads if rp->r_nextr is not within the scope
3763                  *    of the readahead window (random i/o).
3764                  */
3765 
3766                 if (off == 0)
3767                         readahead = 0;
3768                 else if (blkoff == rp->r_nextr)
3769                         readahead = nfs_nra;
3770                 else if (rp->r_nextr > blkoff &&
3771                     ((ra_window = (rp->r_nextr - blkoff) / bsize)
3772                     <= (nfs_nra - 1)))
3773                         readahead = nfs_nra - ra_window;
3774                 else
3775                         readahead = 0;
3776 
3777                 rablkoff = rp->r_nextr;
3778                 while (readahead > 0 && rablkoff + bsize < rp->r_size) {
3779                         mutex_exit(&rp->r_statelock);
3780                         if (nfs_async_readahead(vp, rablkoff + bsize,
3781                             addr + (rablkoff + bsize - off), seg, cr,
3782                             nfs_readahead) < 0) {
3783                                 mutex_enter(&rp->r_statelock);
3784                                 break;
3785                         }
3786                         readahead--;
3787                         rablkoff += bsize;
3788                         /*
3789                          * Indicate that we did a readahead so
3790                          * readahead offset is not updated
3791                          * by the synchronous read below.
3792                          */
3793                         readahead_issued = 1;
3794                         mutex_enter(&rp->r_statelock);
3795                         /*
3796                          * set readahead offset to
3797                          * offset of last async readahead
3798                          * request.
3799                          */
3800                         rp->r_nextr = rablkoff;
3801                 }
3802                 mutex_exit(&rp->r_statelock);
3803         }
3804 
3805 again:
3806         if ((pagefound = page_exists(vp, off)) == NULL) {
3807                 if (pl == NULL) {
3808                         (void) nfs_async_readahead(vp, blkoff, addr, seg, cr,
3809                             nfs_readahead);
3810                 } else if (rw == S_CREATE) {
3811                         /*
3812                          * Block for this page is not allocated, or the offset
3813                          * is beyond the current allocation size, or we're
3814                          * allocating a swap slot and the page was not found,
3815                          * so allocate it and return a zero page.
3816                          */
3817                         if ((pp = page_create_va(vp, off,
3818                             PAGESIZE, PG_WAIT, seg, addr)) == NULL)
3819                                 cmn_err(CE_PANIC, "nfs_getapage: page_create");
3820                         io_len = PAGESIZE;
3821                         mutex_enter(&rp->r_statelock);
3822                         rp->r_nextr = off + PAGESIZE;
3823                         mutex_exit(&rp->r_statelock);
3824                 } else {
3825                         /*
3826                          * Need to go to server to get a BLOCK, exception to
3827                          * that being while reading at offset = 0 or doing
3828                          * random i/o, in that case read only a PAGE.
3829                          */
3830                         mutex_enter(&rp->r_statelock);
3831                         if (blkoff < rp->r_size &&
3832                             blkoff + bsize >= rp->r_size) {
3833                                 /*
3834                                  * If only a block or less is left in
3835                                  * the file, read all that is remaining.
3836                                  */
3837                                 if (rp->r_size <= off) {
3838                                         /*
3839                                          * Trying to access beyond EOF,
3840                                          * set up to get at least one page.
3841                                          */
3842                                         blksize = off + PAGESIZE - blkoff;
3843                                 } else
3844                                         blksize = rp->r_size - blkoff;
3845                         } else if ((off == 0) ||
3846                             (off != rp->r_nextr && !readahead_issued)) {
3847                                 blksize = PAGESIZE;
3848                                 blkoff = off; /* block = page here */
3849                         } else
3850                                 blksize = bsize;
3851                         mutex_exit(&rp->r_statelock);
3852 
3853                         pp = pvn_read_kluster(vp, off, seg, addr, &io_off,
3854                             &io_len, blkoff, blksize, 0);
3855 
3856                         /*
3857                          * Some other thread has entered the page,
3858                          * so just use it.
3859                          */
3860                         if (pp == NULL)
3861                                 goto again;
3862 
3863                         /*
3864                          * Now round the request size up to page boundaries.
3865                          * This ensures that the entire page will be
3866                          * initialized to zeroes if EOF is encountered.
3867                          */
3868                         io_len = ptob(btopr(io_len));
3869 
3870                         bp = pageio_setup(pp, io_len, vp, B_READ);
3871                         ASSERT(bp != NULL);
3872 
3873                         /*
3874                          * pageio_setup should have set b_addr to 0.  This
3875                          * is correct since we want to do I/O on a page
3876                          * boundary.  bp_mapin will use this addr to calculate
3877                          * an offset, and then set b_addr to the kernel virtual
3878                          * address it allocated for us.
3879                          */
3880                         ASSERT(bp->b_un.b_addr == 0);
3881 
3882                         bp->b_edev = 0;
3883                         bp->b_dev = 0;
3884                         bp->b_lblkno = lbtodb(io_off);
3885                         bp->b_file = vp;
3886                         bp->b_offset = (offset_t)off;
3887                         bp_mapin(bp);
3888 
3889                         /*
3890                          * If doing a write beyond what we believe is EOF,
3891                          * don't bother trying to read the pages from the
3892                          * server, we'll just zero the pages here.  We
3893                          * don't check that the rw flag is S_WRITE here
3894                          * because some implementations may attempt a
3895                          * read access to the buffer before copying data.
3896                          */
3897                         mutex_enter(&rp->r_statelock);
3898                         if (io_off >= rp->r_size && seg == segkmap) {
3899                                 mutex_exit(&rp->r_statelock);
3900                                 bzero(bp->b_un.b_addr, io_len);
3901                         } else {
3902                                 mutex_exit(&rp->r_statelock);
3903                                 error = nfs_bio(bp, cr);
3904                         }
3905 
3906                         /*
3907                          * Unmap the buffer before freeing it.
3908                          */
3909                         bp_mapout(bp);
3910                         pageio_done(bp);
3911 
3912                         if (error == NFS_EOF) {
3913                                 /*
3914                                  * If doing a write system call just return
3915                                  * zeroed pages, else user tried to get pages
3916                                  * beyond EOF, return error.  We don't check
3917                                  * that the rw flag is S_WRITE here because
3918                                  * some implementations may attempt a read
3919                                  * access to the buffer before copying data.
3920                                  */
3921                                 if (seg == segkmap)
3922                                         error = 0;
3923                                 else
3924                                         error = EFAULT;
3925                         }
3926 
3927                         if (!readahead_issued && !error) {
3928                                 mutex_enter(&rp->r_statelock);
3929                                 rp->r_nextr = io_off + io_len;
3930                                 mutex_exit(&rp->r_statelock);
3931                         }
3932                 }
3933         }
3934 
3935 out:
3936         if (pl == NULL)
3937                 return (error);
3938 
3939         if (error) {
3940                 if (pp != NULL)
3941                         pvn_read_done(pp, B_ERROR);
3942                 return (error);
3943         }
3944 
3945         if (pagefound) {
3946                 se_t se = (rw == S_CREATE ? SE_EXCL : SE_SHARED);
3947 
3948                 /*
3949                  * Page exists in the cache, acquire the appropriate lock.
3950                  * If this fails, start all over again.
3951                  */
3952                 if ((pp = page_lookup(vp, off, se)) == NULL) {
3953 #ifdef DEBUG
3954                         nfs_lostpage++;
3955 #endif
3956                         goto reread;
3957                 }
3958                 pl[0] = pp;
3959                 pl[1] = NULL;
3960                 return (0);
3961         }
3962 
3963         if (pp != NULL)
3964                 pvn_plist_init(pp, pl, plsz, off, io_len, rw);
3965 
3966         return (error);
3967 }
3968 
3969 static void
3970 nfs_readahead(vnode_t *vp, u_offset_t blkoff, caddr_t addr, struct seg *seg,
3971         cred_t *cr)
3972 {
3973         int error;
3974         page_t *pp;
3975         u_offset_t io_off;
3976         size_t io_len;
3977         struct buf *bp;
3978         uint_t bsize, blksize;
3979         rnode_t *rp = VTOR(vp);
3980 
3981         ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
3982 
3983         bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
3984 
3985         mutex_enter(&rp->r_statelock);
3986         if (blkoff < rp->r_size && blkoff + bsize > rp->r_size) {
3987                 /*
3988                  * If less than a block left in file read less
3989                  * than a block.
3990                  */
3991                 blksize = rp->r_size - blkoff;
3992         } else
3993                 blksize = bsize;
3994         mutex_exit(&rp->r_statelock);
3995 
3996         pp = pvn_read_kluster(vp, blkoff, segkmap, addr,
3997             &io_off, &io_len, blkoff, blksize, 1);
3998         /*
3999          * The isra flag passed to the kluster function is 1, we may have
4000          * gotten a return value of NULL for a variety of reasons (# of free
4001          * pages < minfree, someone entered the page on the vnode etc). In all
4002          * cases, we want to punt on the readahead.
4003          */
4004         if (pp == NULL)
4005                 return;
4006 
4007         /*
4008          * Now round the request size up to page boundaries.
4009          * This ensures that the entire page will be
4010          * initialized to zeroes if EOF is encountered.
4011          */
4012         io_len = ptob(btopr(io_len));
4013 
4014         bp = pageio_setup(pp, io_len, vp, B_READ);
4015         ASSERT(bp != NULL);
4016 
4017         /*
4018          * pageio_setup should have set b_addr to 0.  This is correct since
4019          * we want to do I/O on a page boundary. bp_mapin() will use this addr
4020          * to calculate an offset, and then set b_addr to the kernel virtual
4021          * address it allocated for us.
4022          */
4023         ASSERT(bp->b_un.b_addr == 0);
4024 
4025         bp->b_edev = 0;
4026         bp->b_dev = 0;
4027         bp->b_lblkno = lbtodb(io_off);
4028         bp->b_file = vp;
4029         bp->b_offset = (offset_t)blkoff;
4030         bp_mapin(bp);
4031 
4032         /*
4033          * If doing a write beyond what we believe is EOF, don't bother trying
4034          * to read the pages from the server, we'll just zero the pages here.
4035          * We don't check that the rw flag is S_WRITE here because some
4036          * implementations may attempt a read access to the buffer before
4037          * copying data.
4038          */
4039         mutex_enter(&rp->r_statelock);
4040         if (io_off >= rp->r_size && seg == segkmap) {
4041                 mutex_exit(&rp->r_statelock);
4042                 bzero(bp->b_un.b_addr, io_len);
4043                 error = 0;
4044         } else {
4045                 mutex_exit(&rp->r_statelock);
4046                 error = nfs_bio(bp, cr);
4047                 if (error == NFS_EOF)
4048                         error = 0;
4049         }
4050 
4051         /*
4052          * Unmap the buffer before freeing it.
4053          */
4054         bp_mapout(bp);
4055         pageio_done(bp);
4056 
4057         pvn_read_done(pp, error ? B_READ | B_ERROR : B_READ);
4058 
4059         /*
4060          * In case of error set readahead offset
4061          * to the lowest offset.
4062          * pvn_read_done() calls VN_DISPOSE to destroy the pages
4063          */
4064         if (error && rp->r_nextr > io_off) {
4065                 mutex_enter(&rp->r_statelock);
4066                 if (rp->r_nextr > io_off)
4067                         rp->r_nextr = io_off;
4068                 mutex_exit(&rp->r_statelock);
4069         }
4070 }
4071 
4072 /*
4073  * Flags are composed of {B_INVAL, B_FREE, B_DONTNEED, B_FORCE}
4074  * If len == 0, do from off to EOF.
4075  *
4076  * The normal cases should be len == 0 && off == 0 (entire vp list),
4077  * len == MAXBSIZE (from segmap_release actions), and len == PAGESIZE
4078  * (from pageout).
4079  */
4080 /* ARGSUSED */
4081 static int
4082 nfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr,
4083         caller_context_t *ct)
4084 {
4085         int error;
4086         rnode_t *rp;
4087 
4088         ASSERT(cr != NULL);
4089 
4090         /*
4091          * XXX - Why should this check be made here?
4092          */
4093         if (vp->v_flag & VNOMAP)
4094                 return (ENOSYS);
4095 
4096         if (len == 0 && !(flags & B_INVAL) && vn_is_readonly(vp))
4097                 return (0);
4098 
4099         if (!(flags & B_ASYNC) && nfs_zone() != VTOMI(vp)->mi_zone)
4100                 return (EIO);
4101         ASSERT(off <= MAXOFF32_T);
4102 
4103         rp = VTOR(vp);
4104         mutex_enter(&rp->r_statelock);
4105         rp->r_count++;
4106         mutex_exit(&rp->r_statelock);
4107         error = nfs_putpages(vp, off, len, flags, cr);
4108         mutex_enter(&rp->r_statelock);
4109         rp->r_count--;
4110         cv_broadcast(&rp->r_cv);
4111         mutex_exit(&rp->r_statelock);
4112 
4113         return (error);
4114 }
4115 
4116 /*
4117  * Write out a single page, possibly klustering adjacent dirty pages.
4118  */
4119 int
4120 nfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
4121         int flags, cred_t *cr)
4122 {
4123         u_offset_t io_off;
4124         u_offset_t lbn_off;
4125         u_offset_t lbn;
4126         size_t io_len;
4127         uint_t bsize;
4128         int error;
4129         rnode_t *rp;
4130 
4131         ASSERT(!vn_is_readonly(vp));
4132         ASSERT(pp != NULL);
4133         ASSERT(cr != NULL);
4134         ASSERT((flags & B_ASYNC) || nfs_zone() == VTOMI(vp)->mi_zone);
4135 
4136         rp = VTOR(vp);
4137         ASSERT(rp->r_count > 0);
4138 
4139         ASSERT(pp->p_offset <= MAXOFF32_T);
4140 
4141         bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
4142         lbn = pp->p_offset / bsize;
4143         lbn_off = lbn * bsize;
4144 
4145         /*
4146          * Find a kluster that fits in one block, or in
4147          * one page if pages are bigger than blocks.  If
4148          * there is less file space allocated than a whole
4149          * page, we'll shorten the i/o request below.
4150          */
4151         pp = pvn_write_kluster(vp, pp, &io_off, &io_len, lbn_off,
4152             roundup(bsize, PAGESIZE), flags);
4153 
4154         /*
4155          * pvn_write_kluster shouldn't have returned a page with offset
4156          * behind the original page we were given.  Verify that.
4157          */
4158         ASSERT((pp->p_offset / bsize) >= lbn);
4159 
4160         /*
4161          * Now pp will have the list of kept dirty pages marked for
4162          * write back.  It will also handle invalidation and freeing
4163          * of pages that are not dirty.  Check for page length rounding
4164          * problems.
4165          */
4166         if (io_off + io_len > lbn_off + bsize) {
4167                 ASSERT((io_off + io_len) - (lbn_off + bsize) < PAGESIZE);
4168                 io_len = lbn_off + bsize - io_off;
4169         }
4170         /*
4171          * The RMODINPROGRESS flag makes sure that nfs(3)_bio() sees a
4172          * consistent value of r_size. RMODINPROGRESS is set in writerp().
4173          * When RMODINPROGRESS is set it indicates that a uiomove() is in
4174          * progress and the r_size has not been made consistent with the
4175          * new size of the file. When the uiomove() completes the r_size is
4176          * updated and the RMODINPROGRESS flag is cleared.
4177          *
4178          * The RMODINPROGRESS flag makes sure that nfs(3)_bio() sees a
4179          * consistent value of r_size. Without this handshaking, it is
4180          * possible that nfs(3)_bio() picks  up the old value of r_size
4181          * before the uiomove() in writerp() completes. This will result
4182          * in the write through nfs(3)_bio() being dropped.
4183          *
4184          * More precisely, there is a window between the time the uiomove()
4185          * completes and the time the r_size is updated. If a VOP_PUTPAGE()
4186          * operation intervenes in this window, the page will be picked up,
4187          * because it is dirty (it will be unlocked, unless it was
4188          * pagecreate'd). When the page is picked up as dirty, the dirty
4189          * bit is reset (pvn_getdirty()). In nfs(3)write(), r_size is
4190          * checked. This will still be the old size. Therefore the page will
4191          * not be written out. When segmap_release() calls VOP_PUTPAGE(),
4192          * the page will be found to be clean and the write will be dropped.
4193          */
4194         if (rp->r_flags & RMODINPROGRESS) {
4195                 mutex_enter(&rp->r_statelock);
4196                 if ((rp->r_flags & RMODINPROGRESS) &&
4197                     rp->r_modaddr + MAXBSIZE > io_off &&
4198                     rp->r_modaddr < io_off + io_len) {
4199                         page_t *plist;
4200                         /*
4201                          * A write is in progress for this region of the file.
4202                          * If we did not detect RMODINPROGRESS here then this
4203                          * path through nfs_putapage() would eventually go to
4204                          * nfs(3)_bio() and may not write out all of the data
4205                          * in the pages. We end up losing data. So we decide
4206                          * to set the modified bit on each page in the page
4207                          * list and mark the rnode with RDIRTY. This write
4208                          * will be restarted at some later time.
4209                          */
4210                         plist = pp;
4211                         while (plist != NULL) {
4212                                 pp = plist;
4213                                 page_sub(&plist, pp);
4214                                 hat_setmod(pp);
4215                                 page_io_unlock(pp);
4216                                 page_unlock(pp);
4217                         }
4218                         rp->r_flags |= RDIRTY;
4219                         mutex_exit(&rp->r_statelock);
4220                         if (offp)
4221                                 *offp = io_off;
4222                         if (lenp)
4223                                 *lenp = io_len;
4224                         return (0);
4225                 }
4226                 mutex_exit(&rp->r_statelock);
4227         }
4228 
4229         if (flags & B_ASYNC) {
4230                 error = nfs_async_putapage(vp, pp, io_off, io_len, flags, cr,
4231                     nfs_sync_putapage);
4232         } else
4233                 error = nfs_sync_putapage(vp, pp, io_off, io_len, flags, cr);
4234 
4235         if (offp)
4236                 *offp = io_off;
4237         if (lenp)
4238                 *lenp = io_len;
4239         return (error);
4240 }
4241 
4242 static int
4243 nfs_sync_putapage(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len,
4244         int flags, cred_t *cr)
4245 {
4246         int error;
4247         rnode_t *rp;
4248 
4249         flags |= B_WRITE;
4250 
4251         ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
4252         error = nfs_rdwrlbn(vp, pp, io_off, io_len, flags, cr);
4253 
4254         rp = VTOR(vp);
4255 
4256         if ((error == ENOSPC || error == EDQUOT || error == EACCES) &&
4257             (flags & (B_INVAL|B_FORCE)) != (B_INVAL|B_FORCE)) {
4258                 if (!(rp->r_flags & ROUTOFSPACE)) {
4259                         mutex_enter(&rp->r_statelock);
4260                         rp->r_flags |= ROUTOFSPACE;
4261                         mutex_exit(&rp->r_statelock);
4262                 }
4263                 flags |= B_ERROR;
4264                 pvn_write_done(pp, flags);
4265                 /*
4266                  * If this was not an async thread, then try again to
4267                  * write out the pages, but this time, also destroy
4268                  * them whether or not the write is successful.  This
4269                  * will prevent memory from filling up with these
4270                  * pages and destroying them is the only alternative
4271                  * if they can't be written out.
4272                  *
4273                  * Don't do this if this is an async thread because
4274                  * when the pages are unlocked in pvn_write_done,
4275                  * some other thread could have come along, locked
4276                  * them, and queued for an async thread.  It would be
4277                  * possible for all of the async threads to be tied
4278                  * up waiting to lock the pages again and they would
4279                  * all already be locked and waiting for an async
4280                  * thread to handle them.  Deadlock.
4281                  */
4282                 if (!(flags & B_ASYNC)) {
4283                         error = nfs_putpage(vp, io_off, io_len,
4284                             B_INVAL | B_FORCE, cr, NULL);
4285                 }
4286         } else {
4287                 if (error)
4288                         flags |= B_ERROR;
4289                 else if (rp->r_flags & ROUTOFSPACE) {
4290                         mutex_enter(&rp->r_statelock);
4291                         rp->r_flags &= ~ROUTOFSPACE;
4292                         mutex_exit(&rp->r_statelock);
4293                 }
4294                 pvn_write_done(pp, flags);
4295         }
4296 
4297         return (error);
4298 }
4299 
4300 /* ARGSUSED */
4301 static int
4302 nfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
4303         size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
4304         caller_context_t *ct)
4305 {
4306         struct segvn_crargs vn_a;
4307         int error;
4308         rnode_t *rp;
4309         struct vattr va;
4310 
4311         if (nfs_zone() != VTOMI(vp)->mi_zone)
4312                 return (EIO);
4313 
4314         if (vp->v_flag & VNOMAP)
4315                 return (ENOSYS);
4316 
4317         if (off > MAXOFF32_T)
4318                 return (EFBIG);
4319 
4320         if (off < 0 || off + len < 0)
4321                 return (ENXIO);
4322 
4323         if (vp->v_type != VREG)
4324                 return (ENODEV);
4325 
4326         /*
4327          * If there is cached data and if close-to-open consistency
4328          * checking is not turned off and if the file system is not
4329          * mounted readonly, then force an over the wire getattr.
4330          * Otherwise, just invoke nfsgetattr to get a copy of the
4331          * attributes.  The attribute cache will be used unless it
4332          * is timed out and if it is, then an over the wire getattr
4333          * will be issued.
4334          */
4335         va.va_mask = AT_ALL;
4336         if (vn_has_cached_data(vp) &&
4337             !(VTOMI(vp)->mi_flags & MI_NOCTO) && !vn_is_readonly(vp))
4338                 error = nfs_getattr_otw(vp, &va, cr);
4339         else
4340                 error = nfsgetattr(vp, &va, cr);
4341         if (error)
4342                 return (error);
4343 
4344         /*
4345          * Check to see if the vnode is currently marked as not cachable.
4346          * This means portions of the file are locked (through VOP_FRLOCK).
4347          * In this case the map request must be refused.  We use
4348          * rp->r_lkserlock to avoid a race with concurrent lock requests.
4349          */
4350         rp = VTOR(vp);
4351 
4352         /*
4353          * Atomically increment r_inmap after acquiring r_rwlock. The
4354          * idea here is to acquire r_rwlock to block read/write and
4355          * not to protect r_inmap. r_inmap will inform nfs_read/write()
4356          * that we are in nfs_map(). Now, r_rwlock is acquired in order
4357          * and we can prevent the deadlock that would have occurred
4358          * when nfs_addmap() would have acquired it out of order.
4359          *
4360          * Since we are not protecting r_inmap by any lock, we do not
4361          * hold any lock when we decrement it. We atomically decrement
4362          * r_inmap after we release r_lkserlock.
4363          */
4364 
4365         if (nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, INTR(vp)))
4366                 return (EINTR);
4367         atomic_inc_uint(&rp->r_inmap);
4368         nfs_rw_exit(&rp->r_rwlock);
4369 
4370         if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_READER, INTR(vp))) {
4371                 atomic_dec_uint(&rp->r_inmap);
4372                 return (EINTR);
4373         }
4374         if (vp->v_flag & VNOCACHE) {
4375                 error = EAGAIN;
4376                 goto done;
4377         }
4378 
4379         /*
4380          * Don't allow concurrent locks and mapping if mandatory locking is
4381          * enabled.
4382          */
4383         if ((flk_has_remote_locks(vp) || lm_has_sleep(vp)) &&
4384             MANDLOCK(vp, va.va_mode)) {
4385                 error = EAGAIN;
4386                 goto done;
4387         }
4388 
4389         as_rangelock(as);
4390         error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
4391         if (error != 0) {
4392                 as_rangeunlock(as);
4393                 goto done;
4394         }
4395 
4396         vn_a.vp = vp;
4397         vn_a.offset = off;
4398         vn_a.type = (flags & MAP_TYPE);
4399         vn_a.prot = (uchar_t)prot;
4400         vn_a.maxprot = (uchar_t)maxprot;
4401         vn_a.flags = (flags & ~MAP_TYPE);
4402         vn_a.cred = cr;
4403         vn_a.amp = NULL;
4404         vn_a.szc = 0;
4405         vn_a.lgrp_mem_policy_flags = 0;
4406 
4407         error = as_map(as, *addrp, len, segvn_create, &vn_a);
4408         as_rangeunlock(as);
4409 
4410 done:
4411         nfs_rw_exit(&rp->r_lkserlock);
4412         atomic_dec_uint(&rp->r_inmap);
4413         return (error);
4414 }
4415 
4416 /* ARGSUSED */
4417 static int
4418 nfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4419         size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
4420         caller_context_t *ct)
4421 {
4422         rnode_t *rp;
4423 
4424         if (vp->v_flag & VNOMAP)
4425                 return (ENOSYS);
4426         if (nfs_zone() != VTOMI(vp)->mi_zone)
4427                 return (EIO);
4428 
4429         rp = VTOR(vp);
4430         atomic_add_long((ulong_t *)&rp->r_mapcnt, btopr(len));
4431 
4432         return (0);
4433 }
4434 
4435 /* ARGSUSED */
4436 static int
4437 nfs_frlock(vnode_t *vp, int cmd, struct flock64 *bfp, int flag, offset_t offset,
4438         struct flk_callback *flk_cbp, cred_t *cr, caller_context_t *ct)
4439 {
4440         netobj lm_fh;
4441         int rc;
4442         u_offset_t start, end;
4443         rnode_t *rp;
4444         int error = 0, intr = INTR(vp);
4445 
4446         /* check for valid cmd parameter */
4447         if (cmd != F_GETLK && cmd != F_SETLK && cmd != F_SETLKW)
4448                 return (EINVAL);
4449         if (nfs_zone() != VTOMI(vp)->mi_zone)
4450                 return (EIO);
4451 
4452         /* Verify l_type. */
4453         switch (bfp->l_type) {
4454         case F_RDLCK:
4455                 if (cmd != F_GETLK && !(flag & FREAD))
4456                         return (EBADF);
4457                 break;
4458         case F_WRLCK:
4459                 if (cmd != F_GETLK && !(flag & FWRITE))
4460                         return (EBADF);
4461                 break;
4462         case F_UNLCK:
4463                 intr = 0;
4464                 break;
4465 
4466         default:
4467                 return (EINVAL);
4468         }
4469 
4470         /* check the validity of the lock range */
4471         if (rc = flk_convert_lock_data(vp, bfp, &start, &end, offset))
4472                 return (rc);
4473         if (rc = flk_check_lock_data(start, end, MAXOFF32_T))
4474                 return (rc);
4475 
4476         /*
4477          * If the filesystem is mounted using local locking, pass the
4478          * request off to the local locking code.
4479          */
4480         if (VTOMI(vp)->mi_flags & MI_LLOCK) {
4481                 if (offset > MAXOFF32_T)
4482                         return (EFBIG);
4483                 if (cmd == F_SETLK || cmd == F_SETLKW) {
4484                         /*
4485                          * For complete safety, we should be holding
4486                          * r_lkserlock.  However, we can't call
4487                          * lm_safelock and then fs_frlock while
4488                          * holding r_lkserlock, so just invoke
4489                          * lm_safelock and expect that this will
4490                          * catch enough of the cases.
4491                          */
4492                         if (!lm_safelock(vp, bfp, cr))
4493                                 return (EAGAIN);
4494                 }
4495                 return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
4496         }
4497 
4498         rp = VTOR(vp);
4499 
4500         /*
4501          * Check whether the given lock request can proceed, given the
4502          * current file mappings.
4503          */
4504         if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_WRITER, intr))
4505                 return (EINTR);
4506         if (cmd == F_SETLK || cmd == F_SETLKW) {
4507                 if (!lm_safelock(vp, bfp, cr)) {
4508                         rc = EAGAIN;
4509                         goto done;
4510                 }
4511         }
4512 
4513         /*
4514          * Flush the cache after waiting for async I/O to finish.  For new
4515          * locks, this is so that the process gets the latest bits from the
4516          * server.  For unlocks, this is so that other clients see the
4517          * latest bits once the file has been unlocked.  If currently dirty
4518          * pages can't be flushed, then don't allow a lock to be set.  But
4519          * allow unlocks to succeed, to avoid having orphan locks on the
4520          * server.
4521          */
4522         if (cmd != F_GETLK) {
4523                 mutex_enter(&rp->r_statelock);
4524                 while (rp->r_count > 0) {
4525                         if (intr) {
4526                                 klwp_t *lwp = ttolwp(curthread);
4527 
4528                                 if (lwp != NULL)
4529                                         lwp->lwp_nostop++;
4530                                 if (cv_wait_sig(&rp->r_cv, &rp->r_statelock)
4531                                     == 0) {
4532                                         if (lwp != NULL)
4533                                                 lwp->lwp_nostop--;
4534                                         rc = EINTR;
4535                                         break;
4536                                 }
4537                                 if (lwp != NULL)
4538                                         lwp->lwp_nostop--;
4539                         } else
4540                         cv_wait(&rp->r_cv, &rp->r_statelock);
4541                 }
4542                 mutex_exit(&rp->r_statelock);
4543                 if (rc != 0)
4544                         goto done;
4545                 error = nfs_putpage(vp, (offset_t)0, 0, B_INVAL, cr, ct);
4546                 if (error) {
4547                         if (error == ENOSPC || error == EDQUOT) {
4548                                 mutex_enter(&rp->r_statelock);
4549                                 if (!rp->r_error)
4550                                         rp->r_error = error;
4551                                 mutex_exit(&rp->r_statelock);
4552                         }
4553                         if (bfp->l_type != F_UNLCK) {
4554                                 rc = ENOLCK;
4555                                 goto done;
4556                         }
4557                 }
4558         }
4559 
4560         lm_fh.n_len = sizeof (fhandle_t);
4561         lm_fh.n_bytes = (char *)VTOFH(vp);
4562 
4563         /*
4564          * Call the lock manager to do the real work of contacting
4565          * the server and obtaining the lock.
4566          */
4567         rc = lm_frlock(vp, cmd, bfp, flag, offset, cr, &lm_fh, flk_cbp);
4568 
4569         if (rc == 0)
4570                 nfs_lockcompletion(vp, cmd);
4571 
4572 done:
4573         nfs_rw_exit(&rp->r_lkserlock);
4574         return (rc);
4575 }
4576 
4577 /*
4578  * Free storage space associated with the specified vnode.  The portion
4579  * to be freed is specified by bfp->l_start and bfp->l_len (already
4580  * normalized to a "whence" of 0).
4581  *
4582  * This is an experimental facility whose continued existence is not
4583  * guaranteed.  Currently, we only support the special case
4584  * of l_len == 0, meaning free to end of file.
4585  */
4586 /* ARGSUSED */
4587 static int
4588 nfs_space(vnode_t *vp, int cmd, struct flock64 *bfp, int flag,
4589         offset_t offset, cred_t *cr, caller_context_t *ct)
4590 {
4591         int error;
4592 
4593         ASSERT(vp->v_type == VREG);
4594         if (cmd != F_FREESP)
4595                 return (EINVAL);
4596 
4597         if (offset > MAXOFF32_T)
4598                 return (EFBIG);
4599 
4600         if ((bfp->l_start > MAXOFF32_T) || (bfp->l_end > MAXOFF32_T) ||
4601             (bfp->l_len > MAXOFF32_T))
4602                 return (EFBIG);
4603 
4604         if (nfs_zone() != VTOMI(vp)->mi_zone)
4605                 return (EIO);
4606 
4607         error = convoff(vp, bfp, 0, offset);
4608         if (!error) {
4609                 ASSERT(bfp->l_start >= 0);
4610                 if (bfp->l_len == 0) {
4611                         struct vattr va;
4612 
4613                         /*
4614                          * ftruncate should not change the ctime and
4615                          * mtime if we truncate the file to its
4616                          * previous size.
4617                          */
4618                         va.va_mask = AT_SIZE;
4619                         error = nfsgetattr(vp, &va, cr);
4620                         if (error || va.va_size == bfp->l_start)
4621                                 return (error);
4622                         va.va_mask = AT_SIZE;
4623                         va.va_size = bfp->l_start;
4624                         error = nfssetattr(vp, &va, 0, cr);
4625 
4626                         if (error == 0) {
4627                                 if (bfp->l_start == 0) {
4628                                         vnevent_truncate(vp, ct);
4629                                 } else {
4630                                         vnevent_resize(vp, ct);
4631                                 }
4632                         }
4633                 } else
4634                         error = EINVAL;
4635         }
4636 
4637         return (error);
4638 }
4639 
4640 /* ARGSUSED */
4641 static int
4642 nfs_realvp(vnode_t *vp, vnode_t **vpp, caller_context_t *ct)
4643 {
4644 
4645         return (EINVAL);
4646 }
4647 
4648 /*
4649  * Setup and add an address space callback to do the work of the delmap call.
4650  * The callback will (and must be) deleted in the actual callback function.
4651  *
4652  * This is done in order to take care of the problem that we have with holding
4653  * the address space's a_lock for a long period of time (e.g. if the NFS server
4654  * is down).  Callbacks will be executed in the address space code while the
4655  * a_lock is not held.  Holding the address space's a_lock causes things such
4656  * as ps and fork to hang because they are trying to acquire this lock as well.
4657  */
4658 /* ARGSUSED */
4659 static int
4660 nfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4661         size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr,
4662         caller_context_t *ct)
4663 {
4664         int                     caller_found;
4665         int                     error;
4666         rnode_t                 *rp;
4667         nfs_delmap_args_t       *dmapp;
4668         nfs_delmapcall_t        *delmap_call;
4669 
4670         if (vp->v_flag & VNOMAP)
4671                 return (ENOSYS);
4672         /*
4673          * A process may not change zones if it has NFS pages mmap'ed
4674          * in, so we can't legitimately get here from the wrong zone.
4675          */
4676         ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
4677 
4678         rp = VTOR(vp);
4679 
4680         /*
4681          * The way that the address space of this process deletes its mapping
4682          * of this file is via the following call chains:
4683          * - as_free()->SEGOP_UNMAP()/segvn_unmap()->VOP_DELMAP()/nfs_delmap()
4684          * - as_unmap()->SEGOP_UNMAP()/segvn_unmap()->VOP_DELMAP()/nfs_delmap()
4685          *
4686          * With the use of address space callbacks we are allowed to drop the
4687          * address space lock, a_lock, while executing the NFS operations that
4688          * need to go over the wire.  Returning EAGAIN to the caller of this
4689          * function is what drives the execution of the callback that we add
4690          * below.  The callback will be executed by the address space code
4691          * after dropping the a_lock.  When the callback is finished, since
4692          * we dropped the a_lock, it must be re-acquired and segvn_unmap()
4693          * is called again on the same segment to finish the rest of the work
4694          * that needs to happen during unmapping.
4695          *
4696          * This action of calling back into the segment driver causes
4697          * nfs_delmap() to get called again, but since the callback was
4698          * already executed at this point, it already did the work and there
4699          * is nothing left for us to do.
4700          *
4701          * To Summarize:
4702          * - The first time nfs_delmap is called by the current thread is when
4703          * we add the caller associated with this delmap to the delmap caller
4704          * list, add the callback, and return EAGAIN.
4705          * - The second time in this call chain when nfs_delmap is called we
4706          * will find this caller in the delmap caller list and realize there
4707          * is no more work to do thus removing this caller from the list and
4708          * returning the error that was set in the callback execution.
4709          */
4710         caller_found = nfs_find_and_delete_delmapcall(rp, &error);
4711         if (caller_found) {
4712                 /*
4713                  * 'error' is from the actual delmap operations.  To avoid
4714                  * hangs, we need to handle the return of EAGAIN differently
4715                  * since this is what drives the callback execution.
4716                  * In this case, we don't want to return EAGAIN and do the
4717                  * callback execution because there are none to execute.
4718                  */
4719                 if (error == EAGAIN)
4720                         return (0);
4721                 else
4722                         return (error);
4723         }
4724 
4725         /* current caller was not in the list */
4726         delmap_call = nfs_init_delmapcall();
4727 
4728         mutex_enter(&rp->r_statelock);
4729         list_insert_tail(&rp->r_indelmap, delmap_call);
4730         mutex_exit(&rp->r_statelock);
4731 
4732         dmapp = kmem_alloc(sizeof (nfs_delmap_args_t), KM_SLEEP);
4733 
4734         dmapp->vp = vp;
4735         dmapp->off = off;
4736         dmapp->addr = addr;
4737         dmapp->len = len;
4738         dmapp->prot = prot;
4739         dmapp->maxprot = maxprot;
4740         dmapp->flags = flags;
4741         dmapp->cr = cr;
4742         dmapp->caller = delmap_call;
4743 
4744         error = as_add_callback(as, nfs_delmap_callback, dmapp,
4745             AS_UNMAP_EVENT, addr, len, KM_SLEEP);
4746 
4747         return (error ? error : EAGAIN);
4748 }
4749 
4750 /*
4751  * Remove some pages from an mmap'd vnode.  Just update the
4752  * count of pages.  If doing close-to-open, then flush all
4753  * of the pages associated with this file.  Otherwise, start
4754  * an asynchronous page flush to write out any dirty pages.
4755  * This will also associate a credential with the rnode which
4756  * can be used to write the pages.
4757  */
4758 /* ARGSUSED */
4759 static void
4760 nfs_delmap_callback(struct as *as, void *arg, uint_t event)
4761 {
4762         int                     error;
4763         rnode_t                 *rp;
4764         mntinfo_t               *mi;
4765         nfs_delmap_args_t       *dmapp = (nfs_delmap_args_t *)arg;
4766 
4767         rp = VTOR(dmapp->vp);
4768         mi = VTOMI(dmapp->vp);
4769 
4770         atomic_add_long((ulong_t *)&rp->r_mapcnt, -btopr(dmapp->len));
4771         ASSERT(rp->r_mapcnt >= 0);
4772 
4773         /*
4774          * Initiate a page flush if there are pages, the file system
4775          * was not mounted readonly, the segment was mapped shared, and
4776          * the pages themselves were writeable.
4777          */
4778         if (vn_has_cached_data(dmapp->vp) && !vn_is_readonly(dmapp->vp) &&
4779             dmapp->flags == MAP_SHARED && (dmapp->maxprot & PROT_WRITE)) {
4780                 mutex_enter(&rp->r_statelock);
4781                 rp->r_flags |= RDIRTY;
4782                 mutex_exit(&rp->r_statelock);
4783                 /*
4784                  * If this is a cross-zone access a sync putpage won't work, so
4785                  * the best we can do is try an async putpage.  That seems
4786                  * better than something more draconian such as discarding the
4787                  * dirty pages.
4788                  */
4789                 if ((mi->mi_flags & MI_NOCTO) ||
4790                     nfs_zone() != mi->mi_zone)
4791                         error = nfs_putpage(dmapp->vp, dmapp->off, dmapp->len,
4792                             B_ASYNC, dmapp->cr, NULL);
4793                 else
4794                         error = nfs_putpage(dmapp->vp, dmapp->off, dmapp->len,
4795                             0, dmapp->cr, NULL);
4796                 if (!error) {
4797                         mutex_enter(&rp->r_statelock);
4798                         error = rp->r_error;
4799                         rp->r_error = 0;
4800                         mutex_exit(&rp->r_statelock);
4801                 }
4802         } else
4803                 error = 0;
4804 
4805         if ((rp->r_flags & RDIRECTIO) || (mi->mi_flags & MI_DIRECTIO))
4806                 (void) nfs_putpage(dmapp->vp, dmapp->off, dmapp->len,
4807                     B_INVAL, dmapp->cr, NULL);
4808 
4809         dmapp->caller->error = error;
4810         (void) as_delete_callback(as, arg);
4811         kmem_free(dmapp, sizeof (nfs_delmap_args_t));
4812 }
4813 
4814 /* ARGSUSED */
4815 static int
4816 nfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
4817         caller_context_t *ct)
4818 {
4819         int error = 0;
4820 
4821         if (nfs_zone() != VTOMI(vp)->mi_zone)
4822                 return (EIO);
4823         /*
4824          * This looks a little weird because it's written in a general
4825          * manner but we make little use of cases.  If cntl() ever gets
4826          * widely used, the outer switch will make more sense.
4827          */
4828 
4829         switch (cmd) {
4830 
4831         /*
4832          * Large file spec - need to base answer new query with
4833          * hardcoded constant based on the protocol.
4834          */
4835         case _PC_FILESIZEBITS:
4836                 *valp = 32;
4837                 return (0);
4838 
4839         case _PC_LINK_MAX:
4840         case _PC_NAME_MAX:
4841         case _PC_PATH_MAX:
4842         case _PC_SYMLINK_MAX:
4843         case _PC_CHOWN_RESTRICTED:
4844         case _PC_NO_TRUNC: {
4845                 mntinfo_t *mi;
4846                 struct pathcnf *pc;
4847 
4848                 if ((mi = VTOMI(vp)) == NULL || (pc = mi->mi_pathconf) == NULL)
4849                         return (EINVAL);
4850                 error = _PC_ISSET(cmd, pc->pc_mask);    /* error or bool */
4851                 switch (cmd) {
4852                 case _PC_LINK_MAX:
4853                         *valp = pc->pc_link_max;
4854                         break;
4855                 case _PC_NAME_MAX:
4856                         *valp = pc->pc_name_max;
4857                         break;
4858                 case _PC_PATH_MAX:
4859                 case _PC_SYMLINK_MAX:
4860                         *valp = pc->pc_path_max;
4861                         break;
4862                 case _PC_CHOWN_RESTRICTED:
4863                         /*
4864                          * if we got here, error is really a boolean which
4865                          * indicates whether cmd is set or not.
4866                          */
4867                         *valp = error ? 1 : 0;  /* see above */
4868                         error = 0;
4869                         break;
4870                 case _PC_NO_TRUNC:
4871                         /*
4872                          * if we got here, error is really a boolean which
4873                          * indicates whether cmd is set or not.
4874                          */
4875                         *valp = error ? 1 : 0;  /* see above */
4876                         error = 0;
4877                         break;
4878                 }
4879                 return (error ? EINVAL : 0);
4880                 }
4881 
4882         case _PC_XATTR_EXISTS:
4883                 *valp = 0;
4884                 if (vp->v_vfsp->vfs_flag & VFS_XATTR) {
4885                         vnode_t *avp;
4886                         rnode_t *rp;
4887                         mntinfo_t *mi = VTOMI(vp);
4888 
4889                         if (!(mi->mi_flags & MI_EXTATTR))
4890                                 return (0);
4891 
4892                         rp = VTOR(vp);
4893                         if (nfs_rw_enter_sig(&rp->r_rwlock, RW_READER,
4894                             INTR(vp)))
4895                                 return (EINTR);
4896 
4897                         error = nfslookup_dnlc(vp, XATTR_DIR_NAME, &avp, cr);
4898                         if (error || avp == NULL)
4899                                 error = acl_getxattrdir2(vp, &avp, 0, cr, 0);
4900 
4901                         nfs_rw_exit(&rp->r_rwlock);
4902 
4903                         if (error == 0 && avp != NULL) {
4904                                 error = do_xattr_exists_check(avp, valp, cr);
4905                                 VN_RELE(avp);
4906                         }
4907                 }
4908                 return (error ? EINVAL : 0);
4909 
4910         case _PC_ACL_ENABLED:
4911                 *valp = _ACL_ACLENT_ENABLED;
4912                 return (0);
4913 
4914         default:
4915                 return (EINVAL);
4916         }
4917 }
4918 
4919 /*
4920  * Called by async thread to do synchronous pageio. Do the i/o, wait
4921  * for it to complete, and cleanup the page list when done.
4922  */
4923 static int
4924 nfs_sync_pageio(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len,
4925         int flags, cred_t *cr)
4926 {
4927         int error;
4928 
4929         ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
4930         error = nfs_rdwrlbn(vp, pp, io_off, io_len, flags, cr);
4931         if (flags & B_READ)
4932                 pvn_read_done(pp, (error ? B_ERROR : 0) | flags);
4933         else
4934                 pvn_write_done(pp, (error ? B_ERROR : 0) | flags);
4935         return (error);
4936 }
4937 
4938 /* ARGSUSED */
4939 static int
4940 nfs_pageio(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len,
4941         int flags, cred_t *cr, caller_context_t *ct)
4942 {
4943         int error;
4944         rnode_t *rp;
4945 
4946         if (pp == NULL)
4947                 return (EINVAL);
4948 
4949         if (io_off > MAXOFF32_T)
4950                 return (EFBIG);
4951         if (nfs_zone() != VTOMI(vp)->mi_zone)
4952                 return (EIO);
4953         rp = VTOR(vp);
4954         mutex_enter(&rp->r_statelock);
4955         rp->r_count++;
4956         mutex_exit(&rp->r_statelock);
4957 
4958         if (flags & B_ASYNC) {
4959                 error = nfs_async_pageio(vp, pp, io_off, io_len, flags, cr,
4960                     nfs_sync_pageio);
4961         } else
4962                 error = nfs_rdwrlbn(vp, pp, io_off, io_len, flags, cr);
4963         mutex_enter(&rp->r_statelock);
4964         rp->r_count--;
4965         cv_broadcast(&rp->r_cv);
4966         mutex_exit(&rp->r_statelock);
4967         return (error);
4968 }
4969 
4970 /* ARGSUSED */
4971 static int
4972 nfs_setsecattr(vnode_t *vp, vsecattr_t *vsecattr, int flag, cred_t *cr,
4973         caller_context_t *ct)
4974 {
4975         int error;
4976         mntinfo_t *mi;
4977 
4978         mi = VTOMI(vp);
4979 
4980         if (nfs_zone() != mi->mi_zone)
4981                 return (EIO);
4982         if (mi->mi_flags & MI_ACL) {
4983                 error = acl_setacl2(vp, vsecattr, flag, cr);
4984                 if (mi->mi_flags & MI_ACL)
4985                         return (error);
4986         }
4987 
4988         return (ENOSYS);
4989 }
4990 
4991 /* ARGSUSED */
4992 static int
4993 nfs_getsecattr(vnode_t *vp, vsecattr_t *vsecattr, int flag, cred_t *cr,
4994         caller_context_t *ct)
4995 {
4996         int error;
4997         mntinfo_t *mi;
4998 
4999         mi = VTOMI(vp);
5000 
5001         if (nfs_zone() != mi->mi_zone)
5002                 return (EIO);
5003         if (mi->mi_flags & MI_ACL) {
5004                 error = acl_getacl2(vp, vsecattr, flag, cr);
5005                 if (mi->mi_flags & MI_ACL)
5006                         return (error);
5007         }
5008 
5009         return (fs_fab_acl(vp, vsecattr, flag, cr, ct));
5010 }
5011 
5012 /* ARGSUSED */
5013 static int
5014 nfs_shrlock(vnode_t *vp, int cmd, struct shrlock *shr, int flag, cred_t *cr,
5015         caller_context_t *ct)
5016 {
5017         int error;
5018         struct shrlock nshr;
5019         struct nfs_owner nfs_owner;
5020         netobj lm_fh;
5021 
5022         if (nfs_zone() != VTOMI(vp)->mi_zone)
5023                 return (EIO);
5024 
5025         /*
5026          * check for valid cmd parameter
5027          */
5028         if (cmd != F_SHARE && cmd != F_UNSHARE && cmd != F_HASREMOTELOCKS)
5029                 return (EINVAL);
5030 
5031         /*
5032          * Check access permissions
5033          */
5034         if (cmd == F_SHARE &&
5035             (((shr->s_access & F_RDACC) && !(flag & FREAD)) ||
5036             ((shr->s_access & F_WRACC) && !(flag & FWRITE))))
5037                 return (EBADF);
5038 
5039         /*
5040          * If the filesystem is mounted using local locking, pass the
5041          * request off to the local share code.
5042          */
5043         if (VTOMI(vp)->mi_flags & MI_LLOCK)
5044                 return (fs_shrlock(vp, cmd, shr, flag, cr, ct));
5045 
5046         switch (cmd) {
5047         case F_SHARE:
5048         case F_UNSHARE:
5049                 lm_fh.n_len = sizeof (fhandle_t);
5050                 lm_fh.n_bytes = (char *)VTOFH(vp);
5051 
5052                 /*
5053                  * If passed an owner that is too large to fit in an
5054                  * nfs_owner it is likely a recursive call from the
5055                  * lock manager client and pass it straight through.  If
5056                  * it is not a nfs_owner then simply return an error.
5057                  */
5058                 if (shr->s_own_len > sizeof (nfs_owner.lowner)) {
5059                         if (((struct nfs_owner *)shr->s_owner)->magic !=
5060                             NFS_OWNER_MAGIC)
5061                                 return (EINVAL);
5062 
5063                         if (error = lm_shrlock(vp, cmd, shr, flag, &lm_fh)) {
5064                                 error = set_errno(error);
5065                         }
5066                         return (error);
5067                 }
5068                 /*
5069                  * Remote share reservations owner is a combination of
5070                  * a magic number, hostname, and the local owner
5071                  */
5072                 bzero(&nfs_owner, sizeof (nfs_owner));
5073                 nfs_owner.magic = NFS_OWNER_MAGIC;
5074                 (void) strncpy(nfs_owner.hname, uts_nodename(),
5075                     sizeof (nfs_owner.hname));
5076                 bcopy(shr->s_owner, nfs_owner.lowner, shr->s_own_len);
5077                 nshr.s_access = shr->s_access;
5078                 nshr.s_deny = shr->s_deny;
5079                 nshr.s_sysid = 0;
5080                 nshr.s_pid = ttoproc(curthread)->p_pid;
5081                 nshr.s_own_len = sizeof (nfs_owner);
5082                 nshr.s_owner = (caddr_t)&nfs_owner;
5083 
5084                 if (error = lm_shrlock(vp, cmd, &nshr, flag, &lm_fh)) {
5085                         error = set_errno(error);
5086                 }
5087 
5088                 break;
5089 
5090         case F_HASREMOTELOCKS:
5091                 /*
5092                  * NFS client can't store remote locks itself
5093                  */
5094                 shr->s_access = 0;
5095                 error = 0;
5096                 break;
5097 
5098         default:
5099                 error = EINVAL;
5100                 break;
5101         }
5102 
5103         return (error);
5104 }