Print this page
    
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/common/fs/nfs/nfs_vnops.c
          +++ new/usr/src/uts/common/fs/nfs/nfs_vnops.c
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
  23   23   *
  24   24   *      Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T.
  25   25   *      All rights reserved.
  26   26   */
  27   27  
  28   28  /*
  29   29   * Copyright (c) 2014, Joyent, Inc. All rights reserved.
  30   30   * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
  31   31   */
  32   32  
  33   33  #include <sys/param.h>
  34   34  #include <sys/types.h>
  35   35  #include <sys/systm.h>
  36   36  #include <sys/cred.h>
  37   37  #include <sys/time.h>
  38   38  #include <sys/vnode.h>
  39   39  #include <sys/vfs.h>
  40   40  #include <sys/vfs_opreg.h>
  41   41  #include <sys/file.h>
  42   42  #include <sys/filio.h>
  43   43  #include <sys/uio.h>
  44   44  #include <sys/buf.h>
  45   45  #include <sys/mman.h>
  46   46  #include <sys/pathname.h>
  47   47  #include <sys/dirent.h>
  48   48  #include <sys/debug.h>
  49   49  #include <sys/vmsystm.h>
  50   50  #include <sys/fcntl.h>
  51   51  #include <sys/flock.h>
  52   52  #include <sys/swap.h>
  53   53  #include <sys/errno.h>
  54   54  #include <sys/strsubr.h>
  55   55  #include <sys/sysmacros.h>
  56   56  #include <sys/kmem.h>
  57   57  #include <sys/cmn_err.h>
  58   58  #include <sys/pathconf.h>
  59   59  #include <sys/utsname.h>
  60   60  #include <sys/dnlc.h>
  61   61  #include <sys/acl.h>
  62   62  #include <sys/atomic.h>
  63   63  #include <sys/policy.h>
  64   64  #include <sys/sdt.h>
  65   65  
  66   66  #include <rpc/types.h>
  67   67  #include <rpc/auth.h>
  68   68  #include <rpc/clnt.h>
  69   69  
  70   70  #include <nfs/nfs.h>
  71   71  #include <nfs/nfs_clnt.h>
  72   72  #include <nfs/rnode.h>
  73   73  #include <nfs/nfs_acl.h>
  74   74  #include <nfs/lm.h>
  75   75  
  76   76  #include <vm/hat.h>
  77   77  #include <vm/as.h>
  78   78  #include <vm/page.h>
  79   79  #include <vm/pvn.h>
  80   80  #include <vm/seg.h>
  81   81  #include <vm/seg_map.h>
  82   82  #include <vm/seg_kpm.h>
  83   83  #include <vm/seg_vn.h>
  84   84  
  85   85  #include <fs/fs_subr.h>
  86   86  
  87   87  #include <sys/ddi.h>
  88   88  
  89   89  static int      nfs_rdwrlbn(vnode_t *, page_t *, u_offset_t, size_t, int,
  90   90                          cred_t *);
  91   91  static int      nfswrite(vnode_t *, caddr_t, uint_t, int, cred_t *);
  92   92  static int      nfsread(vnode_t *, caddr_t, uint_t, int, size_t *, cred_t *);
  93   93  static int      nfssetattr(vnode_t *, struct vattr *, int, cred_t *);
  94   94  static int      nfslookup_dnlc(vnode_t *, char *, vnode_t **, cred_t *);
  95   95  static int      nfslookup_otw(vnode_t *, char *, vnode_t **, cred_t *, int);
  96   96  static int      nfsrename(vnode_t *, char *, vnode_t *, char *, cred_t *,
  97   97                          caller_context_t *);
  98   98  static int      nfsreaddir(vnode_t *, rddir_cache *, cred_t *);
  99   99  static int      nfs_bio(struct buf *, cred_t *);
 100  100  static int      nfs_getapage(vnode_t *, u_offset_t, size_t, uint_t *,
 101  101                          page_t *[], size_t, struct seg *, caddr_t,
 102  102                          enum seg_rw, cred_t *);
 103  103  static void     nfs_readahead(vnode_t *, u_offset_t, caddr_t, struct seg *,
 104  104                          cred_t *);
 105  105  static int      nfs_sync_putapage(vnode_t *, page_t *, u_offset_t, size_t,
 106  106                          int, cred_t *);
 107  107  static int      nfs_sync_pageio(vnode_t *, page_t *, u_offset_t, size_t,
 108  108                          int, cred_t *);
 109  109  static void     nfs_delmap_callback(struct as *, void *, uint_t);
 110  110  
 111  111  /*
 112  112   * Error flags used to pass information about certain special errors
 113  113   * which need to be handled specially.
 114  114   */
 115  115  #define NFS_EOF                 -98
 116  116  
 117  117  /*
 118  118   * These are the vnode ops routines which implement the vnode interface to
 119  119   * the networked file system.  These routines just take their parameters,
 120  120   * make them look networkish by putting the right info into interface structs,
 121  121   * and then calling the appropriate remote routine(s) to do the work.
 122  122   *
 123  123   * Note on directory name lookup cacheing:  If we detect a stale fhandle,
 124  124   * we purge the directory cache relative to that vnode.  This way, the
 125  125   * user won't get burned by the cache repeatedly.  See <nfs/rnode.h> for
 126  126   * more details on rnode locking.
 127  127   */
 128  128  
 129  129  static int      nfs_open(vnode_t **, int, cred_t *, caller_context_t *);
 130  130  static int      nfs_close(vnode_t *, int, int, offset_t, cred_t *,
 131  131                          caller_context_t *);
 132  132  static int      nfs_read(vnode_t *, struct uio *, int, cred_t *,
 133  133                          caller_context_t *);
 134  134  static int      nfs_write(vnode_t *, struct uio *, int, cred_t *,
 135  135                          caller_context_t *);
 136  136  static int      nfs_ioctl(vnode_t *, int, intptr_t, int, cred_t *, int *,
 137  137                          caller_context_t *);
 138  138  static int      nfs_getattr(vnode_t *, struct vattr *, int, cred_t *,
 139  139                          caller_context_t *);
 140  140  static int      nfs_setattr(vnode_t *, struct vattr *, int, cred_t *,
 141  141                          caller_context_t *);
 142  142  static int      nfs_access(vnode_t *, int, int, cred_t *, caller_context_t *);
 143  143  static int      nfs_accessx(void *, int, cred_t *);
 144  144  static int      nfs_readlink(vnode_t *, struct uio *, cred_t *,
 145  145                          caller_context_t *);
 146  146  static int      nfs_fsync(vnode_t *, int, cred_t *, caller_context_t *);
 147  147  static void     nfs_inactive(vnode_t *, cred_t *, caller_context_t *);
 148  148  static int      nfs_lookup(vnode_t *, char *, vnode_t **, struct pathname *,
 149  149                          int, vnode_t *, cred_t *, caller_context_t *,
 150  150                          int *, pathname_t *);
 151  151  static int      nfs_create(vnode_t *, char *, struct vattr *, enum vcexcl,
 152  152                          int, vnode_t **, cred_t *, int, caller_context_t *,
 153  153                          vsecattr_t *);
 154  154  static int      nfs_remove(vnode_t *, char *, cred_t *, caller_context_t *,
 155  155                          int);
 156  156  static int      nfs_link(vnode_t *, vnode_t *, char *, cred_t *,
 157  157                          caller_context_t *, int);
 158  158  static int      nfs_rename(vnode_t *, char *, vnode_t *, char *, cred_t *,
 159  159                          caller_context_t *, int);
 160  160  static int      nfs_mkdir(vnode_t *, char *, struct vattr *, vnode_t **,
 161  161                          cred_t *, caller_context_t *, int, vsecattr_t *);
 162  162  static int      nfs_rmdir(vnode_t *, char *, vnode_t *, cred_t *,
 163  163                          caller_context_t *, int);
 164  164  static int      nfs_symlink(vnode_t *, char *, struct vattr *, char *,
 165  165                          cred_t *, caller_context_t *, int);
 166  166  static int      nfs_readdir(vnode_t *, struct uio *, cred_t *, int *,
 167  167                          caller_context_t *, int);
 168  168  static int      nfs_fid(vnode_t *, fid_t *, caller_context_t *);
 169  169  static int      nfs_rwlock(vnode_t *, int, caller_context_t *);
 170  170  static void     nfs_rwunlock(vnode_t *, int, caller_context_t *);
 171  171  static int      nfs_seek(vnode_t *, offset_t, offset_t *, caller_context_t *);
 172  172  static int      nfs_getpage(vnode_t *, offset_t, size_t, uint_t *,
 173  173                          page_t *[], size_t, struct seg *, caddr_t,
 174  174                          enum seg_rw, cred_t *, caller_context_t *);
 175  175  static int      nfs_putpage(vnode_t *, offset_t, size_t, int, cred_t *,
 176  176                          caller_context_t *);
 177  177  static int      nfs_map(vnode_t *, offset_t, struct as *, caddr_t *, size_t,
 178  178                          uchar_t, uchar_t, uint_t, cred_t *, caller_context_t *);
 179  179  static int      nfs_addmap(vnode_t *, offset_t, struct as *, caddr_t, size_t,
 180  180                          uchar_t, uchar_t, uint_t, cred_t *, caller_context_t *);
 181  181  static int      nfs_frlock(vnode_t *, int, struct flock64 *, int, offset_t,
 182  182                          struct flk_callback *, cred_t *, caller_context_t *);
 183  183  static int      nfs_space(vnode_t *, int, struct flock64 *, int, offset_t,
 184  184                          cred_t *, caller_context_t *);
 185  185  static int      nfs_realvp(vnode_t *, vnode_t **, caller_context_t *);
 186  186  static int      nfs_delmap(vnode_t *, offset_t, struct as *, caddr_t, size_t,
 187  187                          uint_t, uint_t, uint_t, cred_t *, caller_context_t *);
 188  188  static int      nfs_pathconf(vnode_t *, int, ulong_t *, cred_t *,
 189  189                          caller_context_t *);
 190  190  static int      nfs_pageio(vnode_t *, page_t *, u_offset_t, size_t, int,
 191  191                          cred_t *, caller_context_t *);
 192  192  static int      nfs_setsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
 193  193                          caller_context_t *);
 194  194  static int      nfs_getsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
 195  195                          caller_context_t *);
 196  196  static int      nfs_shrlock(vnode_t *, int, struct shrlock *, int, cred_t *,
 197  197                          caller_context_t *);
 198  198  
 199  199  struct vnodeops *nfs_vnodeops;
 200  200  
 201  201  const fs_operation_def_t nfs_vnodeops_template[] = {
 202  202          VOPNAME_OPEN,           { .vop_open = nfs_open },
 203  203          VOPNAME_CLOSE,          { .vop_close = nfs_close },
 204  204          VOPNAME_READ,           { .vop_read = nfs_read },
 205  205          VOPNAME_WRITE,          { .vop_write = nfs_write },
 206  206          VOPNAME_IOCTL,          { .vop_ioctl = nfs_ioctl },
 207  207          VOPNAME_GETATTR,        { .vop_getattr = nfs_getattr },
 208  208          VOPNAME_SETATTR,        { .vop_setattr = nfs_setattr },
 209  209          VOPNAME_ACCESS,         { .vop_access = nfs_access },
 210  210          VOPNAME_LOOKUP,         { .vop_lookup = nfs_lookup },
 211  211          VOPNAME_CREATE,         { .vop_create = nfs_create },
 212  212          VOPNAME_REMOVE,         { .vop_remove = nfs_remove },
 213  213          VOPNAME_LINK,           { .vop_link = nfs_link },
 214  214          VOPNAME_RENAME,         { .vop_rename = nfs_rename },
 215  215          VOPNAME_MKDIR,          { .vop_mkdir = nfs_mkdir },
 216  216          VOPNAME_RMDIR,          { .vop_rmdir = nfs_rmdir },
 217  217          VOPNAME_READDIR,        { .vop_readdir = nfs_readdir },
 218  218          VOPNAME_SYMLINK,        { .vop_symlink = nfs_symlink },
 219  219          VOPNAME_READLINK,       { .vop_readlink = nfs_readlink },
 220  220          VOPNAME_FSYNC,          { .vop_fsync = nfs_fsync },
 221  221          VOPNAME_INACTIVE,       { .vop_inactive = nfs_inactive },
 222  222          VOPNAME_FID,            { .vop_fid = nfs_fid },
 223  223          VOPNAME_RWLOCK,         { .vop_rwlock = nfs_rwlock },
 224  224          VOPNAME_RWUNLOCK,       { .vop_rwunlock = nfs_rwunlock },
 225  225          VOPNAME_SEEK,           { .vop_seek = nfs_seek },
 226  226          VOPNAME_FRLOCK,         { .vop_frlock = nfs_frlock },
 227  227          VOPNAME_SPACE,          { .vop_space = nfs_space },
 228  228          VOPNAME_REALVP,         { .vop_realvp = nfs_realvp },
 229  229          VOPNAME_GETPAGE,        { .vop_getpage = nfs_getpage },
 230  230          VOPNAME_PUTPAGE,        { .vop_putpage = nfs_putpage },
 231  231          VOPNAME_MAP,            { .vop_map = nfs_map },
 232  232          VOPNAME_ADDMAP,         { .vop_addmap = nfs_addmap },
 233  233          VOPNAME_DELMAP,         { .vop_delmap = nfs_delmap },
 234  234          VOPNAME_DUMP,           { .vop_dump = nfs_dump },
 235  235          VOPNAME_PATHCONF,       { .vop_pathconf = nfs_pathconf },
 236  236          VOPNAME_PAGEIO,         { .vop_pageio = nfs_pageio },
 237  237          VOPNAME_SETSECATTR,     { .vop_setsecattr = nfs_setsecattr },
 238  238          VOPNAME_GETSECATTR,     { .vop_getsecattr = nfs_getsecattr },
 239  239          VOPNAME_SHRLOCK,        { .vop_shrlock = nfs_shrlock },
 240  240          VOPNAME_VNEVENT,        { .vop_vnevent = fs_vnevent_support },
 241  241          NULL,                   NULL
 242  242  };
 243  243  
 244  244  /*
 245  245   * XXX:  This is referenced in modstubs.s
 246  246   */
 247  247  struct vnodeops *
 248  248  nfs_getvnodeops(void)
 249  249  {
 250  250          return (nfs_vnodeops);
 251  251  }
 252  252  
 253  253  /* ARGSUSED */
 254  254  static int
 255  255  nfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
 256  256  {
 257  257          int error;
 258  258          struct vattr va;
 259  259          rnode_t *rp;
 260  260          vnode_t *vp;
 261  261  
 262  262          vp = *vpp;
 263  263          rp = VTOR(vp);
 264  264          if (nfs_zone() != VTOMI(vp)->mi_zone)
 265  265                  return (EIO);
 266  266          mutex_enter(&rp->r_statelock);
 267  267          if (rp->r_cred == NULL) {
 268  268                  crhold(cr);
 269  269                  rp->r_cred = cr;
 270  270          }
 271  271          mutex_exit(&rp->r_statelock);
 272  272  
 273  273          /*
 274  274           * If there is no cached data or if close-to-open
 275  275           * consistency checking is turned off, we can avoid
 276  276           * the over the wire getattr.  Otherwise, if the
 277  277           * file system is mounted readonly, then just verify
 278  278           * the caches are up to date using the normal mechanism.
 279  279           * Else, if the file is not mmap'd, then just mark
 280  280           * the attributes as timed out.  They will be refreshed
 281  281           * and the caches validated prior to being used.
 282  282           * Else, the file system is mounted writeable so
 283  283           * force an over the wire GETATTR in order to ensure
 284  284           * that all cached data is valid.
 285  285           */
 286  286          if (vp->v_count > 1 ||
 287  287              ((vn_has_cached_data(vp) || HAVE_RDDIR_CACHE(rp)) &&
 288  288              !(VTOMI(vp)->mi_flags & MI_NOCTO))) {
 289  289                  if (vn_is_readonly(vp))
 290  290                          error = nfs_validate_caches(vp, cr);
 291  291                  else if (rp->r_mapcnt == 0 && vp->v_count == 1) {
 292  292                          PURGE_ATTRCACHE(vp);
 293  293                          error = 0;
 294  294                  } else {
 295  295                          va.va_mask = AT_ALL;
 296  296                          error = nfs_getattr_otw(vp, &va, cr);
 297  297                  }
 298  298          } else
 299  299                  error = 0;
 300  300  
 301  301          return (error);
 302  302  }
 303  303  
 304  304  /* ARGSUSED */
 305  305  static int
 306  306  nfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
 307  307          caller_context_t *ct)
 308  308  {
 309  309          rnode_t *rp;
 310  310          int error;
 311  311          struct vattr va;
 312  312  
 313  313          /*
 314  314           * zone_enter(2) prevents processes from changing zones with NFS files
 315  315           * open; if we happen to get here from the wrong zone we can't do
 316  316           * anything over the wire.
 317  317           */
 318  318          if (VTOMI(vp)->mi_zone != nfs_zone()) {
 319  319                  /*
 320  320                   * We could attempt to clean up locks, except we're sure
 321  321                   * that the current process didn't acquire any locks on
 322  322                   * the file: any attempt to lock a file belong to another zone
 323  323                   * will fail, and one can't lock an NFS file and then change
 324  324                   * zones, as that fails too.
 325  325                   *
 326  326                   * Returning an error here is the sane thing to do.  A
 327  327                   * subsequent call to VN_RELE() which translates to a
 328  328                   * nfs_inactive() will clean up state: if the zone of the
 329  329                   * vnode's origin is still alive and kicking, an async worker
 330  330                   * thread will handle the request (from the correct zone), and
 331  331                   * everything (minus the final nfs_getattr_otw() call) should
 332  332                   * be OK. If the zone is going away nfs_async_inactive() will
 333  333                   * throw away cached pages inline.
 334  334                   */
 335  335                  return (EIO);
 336  336          }
 337  337  
 338  338          /*
 339  339           * If we are using local locking for this filesystem, then
 340  340           * release all of the SYSV style record locks.  Otherwise,
 341  341           * we are doing network locking and we need to release all
 342  342           * of the network locks.  All of the locks held by this
 343  343           * process on this file are released no matter what the
 344  344           * incoming reference count is.
 345  345           */
 346  346          if (VTOMI(vp)->mi_flags & MI_LLOCK) {
 347  347                  cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
 348  348                  cleanshares(vp, ttoproc(curthread)->p_pid);
 349  349          } else
 350  350                  nfs_lockrelease(vp, flag, offset, cr);
 351  351  
 352  352          if (count > 1)
 353  353                  return (0);
 354  354  
 355  355          /*
 356  356           * If the file has been `unlinked', then purge the
 357  357           * DNLC so that this vnode will get reycled quicker
 358  358           * and the .nfs* file on the server will get removed.
 359  359           */
 360  360          rp = VTOR(vp);
 361  361          if (rp->r_unldvp != NULL)
 362  362                  dnlc_purge_vp(vp);
 363  363  
 364  364          /*
 365  365           * If the file was open for write and there are pages,
 366  366           * then if the file system was mounted using the "no-close-
 367  367           *      to-open" semantics, then start an asynchronous flush
 368  368           *      of the all of the pages in the file.
 369  369           * else the file system was not mounted using the "no-close-
 370  370           *      to-open" semantics, then do a synchronous flush and
 371  371           *      commit of all of the dirty and uncommitted pages.
 372  372           *
 373  373           * The asynchronous flush of the pages in the "nocto" path
 374  374           * mostly just associates a cred pointer with the rnode so
 375  375           * writes which happen later will have a better chance of
 376  376           * working.  It also starts the data being written to the
 377  377           * server, but without unnecessarily delaying the application.
 378  378           */
 379  379          if ((flag & FWRITE) && vn_has_cached_data(vp)) {
 380  380                  if ((VTOMI(vp)->mi_flags & MI_NOCTO)) {
 381  381                          error = nfs_putpage(vp, (offset_t)0, 0, B_ASYNC,
 382  382                              cr, ct);
 383  383                          if (error == EAGAIN)
 384  384                                  error = 0;
 385  385                  } else
 386  386                          error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
 387  387                  if (!error) {
 388  388                          mutex_enter(&rp->r_statelock);
 389  389                          error = rp->r_error;
 390  390                          rp->r_error = 0;
 391  391                          mutex_exit(&rp->r_statelock);
 392  392                  }
 393  393          } else {
 394  394                  mutex_enter(&rp->r_statelock);
 395  395                  error = rp->r_error;
 396  396                  rp->r_error = 0;
 397  397                  mutex_exit(&rp->r_statelock);
 398  398          }
 399  399  
 400  400          /*
 401  401           * If RWRITEATTR is set, then issue an over the wire GETATTR to
 402  402           * refresh the attribute cache with a set of attributes which
 403  403           * weren't returned from a WRITE.  This will enable the close-
 404  404           * to-open processing to work.
 405  405           */
 406  406          if (rp->r_flags & RWRITEATTR)
 407  407                  (void) nfs_getattr_otw(vp, &va, cr);
 408  408  
 409  409          return (error);
 410  410  }
 411  411  
 412  412  /* ARGSUSED */
 413  413  static int
 414  414  nfs_read(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr,
 415  415          caller_context_t *ct)
 416  416  {
 417  417          rnode_t *rp;
 418  418          u_offset_t off;
 419  419          offset_t diff;
 420  420          int on;
 421  421          size_t n;
 422  422          caddr_t base;
 423  423          uint_t flags;
 424  424          int error;
 425  425          mntinfo_t *mi;
 426  426  
 427  427          rp = VTOR(vp);
 428  428          mi = VTOMI(vp);
 429  429  
 430  430          if (nfs_zone() != mi->mi_zone)
 431  431                  return (EIO);
 432  432  
 433  433          ASSERT(nfs_rw_lock_held(&rp->r_rwlock, RW_READER));
 434  434  
 435  435          if (vp->v_type != VREG)
 436  436                  return (EISDIR);
 437  437  
 438  438          if (uiop->uio_resid == 0)
 439  439                  return (0);
 440  440  
 441  441          if (uiop->uio_loffset > MAXOFF32_T)
 442  442                  return (EFBIG);
 443  443  
 444  444          if (uiop->uio_loffset < 0 ||
 445  445              uiop->uio_loffset + uiop->uio_resid > MAXOFF32_T)
 446  446                  return (EINVAL);
 447  447  
 448  448          /*
 449  449           * Bypass VM if caching has been disabled (e.g., locking) or if
 450  450           * using client-side direct I/O and the file is not mmap'd and
 451  451           * there are no cached pages.
 452  452           */
 453  453          if ((vp->v_flag & VNOCACHE) ||
 454  454              (((rp->r_flags & RDIRECTIO) || (mi->mi_flags & MI_DIRECTIO)) &&
 455  455              rp->r_mapcnt == 0 && rp->r_inmap == 0 &&
 456  456              !vn_has_cached_data(vp))) {
 457  457                  size_t bufsize;
 458  458                  size_t resid = 0;
 459  459  
 460  460                  /*
 461  461                   * Let's try to do read in as large a chunk as we can
 462  462                   * (Filesystem (NFS client) bsize if possible/needed).
 463  463                   * For V3, this is 32K and for V2, this is 8K.
 464  464                   */
 465  465                  bufsize = MIN(uiop->uio_resid, VTOMI(vp)->mi_curread);
 466  466                  base = kmem_alloc(bufsize, KM_SLEEP);
 467  467                  do {
 468  468                          n = MIN(uiop->uio_resid, bufsize);
 469  469                          error = nfsread(vp, base, uiop->uio_offset, n,
 470  470                              &resid, cr);
 471  471                          if (!error) {
 472  472                                  n -= resid;
 473  473                                  error = uiomove(base, n, UIO_READ, uiop);
 474  474                          }
 475  475                  } while (!error && uiop->uio_resid > 0 && n > 0);
 476  476                  kmem_free(base, bufsize);
 477  477                  return (error);
 478  478          }
 479  479  
 480  480          error = 0;
 481  481  
 482  482          do {
 483  483                  off = uiop->uio_loffset & MAXBMASK; /* mapping offset */
 484  484                  on = uiop->uio_loffset & MAXBOFFSET; /* Relative offset */
 485  485                  n = MIN(MAXBSIZE - on, uiop->uio_resid);
 486  486  
 487  487                  error = nfs_validate_caches(vp, cr);
 488  488                  if (error)
 489  489                          break;
 490  490  
 491  491                  mutex_enter(&rp->r_statelock);
 492  492                  while (rp->r_flags & RINCACHEPURGE) {
 493  493                          if (!cv_wait_sig(&rp->r_cv, &rp->r_statelock)) {
 494  494                                  mutex_exit(&rp->r_statelock);
 495  495                                  return (EINTR);
 496  496                          }
 497  497                  }
 498  498                  diff = rp->r_size - uiop->uio_loffset;
 499  499                  mutex_exit(&rp->r_statelock);
 500  500                  if (diff <= 0)
 501  501                          break;
 502  502                  if (diff < n)
 503  503                          n = (size_t)diff;
 504  504  
 505  505                  if (vpm_enable) {
 506  506                          /*
 507  507                           * Copy data.
 508  508                           */
 509  509                          error = vpm_data_copy(vp, off + on, n, uiop,
 510  510                              1, NULL, 0, S_READ);
 511  511                  } else {
 512  512                          base = segmap_getmapflt(segkmap, vp, off + on, n,
 513  513                              1, S_READ);
 514  514                          error = uiomove(base + on, n, UIO_READ, uiop);
 515  515                  }
 516  516  
 517  517                  if (!error) {
 518  518                          /*
 519  519                           * If read a whole block or read to eof,
 520  520                           * won't need this buffer again soon.
 521  521                           */
 522  522                          mutex_enter(&rp->r_statelock);
 523  523                          if (n + on == MAXBSIZE ||
 524  524                              uiop->uio_loffset == rp->r_size)
 525  525                                  flags = SM_DONTNEED;
 526  526                          else
 527  527                                  flags = 0;
 528  528                          mutex_exit(&rp->r_statelock);
 529  529                          if (vpm_enable) {
 530  530                                  error = vpm_sync_pages(vp, off, n, flags);
 531  531                          } else {
 532  532                                  error = segmap_release(segkmap, base, flags);
 533  533                          }
 534  534                  } else {
 535  535                          if (vpm_enable) {
 536  536                                  (void) vpm_sync_pages(vp, off, n, 0);
 537  537                          } else {
 538  538                                  (void) segmap_release(segkmap, base, 0);
 539  539                          }
 540  540                  }
 541  541          } while (!error && uiop->uio_resid > 0);
 542  542  
 543  543          return (error);
 544  544  }
 545  545  
 546  546  /* ARGSUSED */
 547  547  static int
 548  548  nfs_write(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr,
 549  549          caller_context_t *ct)
 550  550  {
 551  551          rnode_t *rp;
 552  552          u_offset_t off;
 553  553          caddr_t base;
 554  554          uint_t flags;
 555  555          int remainder;
 556  556          size_t n;
 557  557          int on;
 558  558          int error;
 559  559          int resid;
 560  560          offset_t offset;
 561  561          rlim_t limit;
 562  562          mntinfo_t *mi;
 563  563  
 564  564          rp = VTOR(vp);
 565  565  
 566  566          mi = VTOMI(vp);
 567  567          if (nfs_zone() != mi->mi_zone)
 568  568                  return (EIO);
 569  569          if (vp->v_type != VREG)
 570  570                  return (EISDIR);
 571  571  
 572  572          if (uiop->uio_resid == 0)
 573  573                  return (0);
 574  574  
 575  575          if (ioflag & FAPPEND) {
 576  576                  struct vattr va;
 577  577  
 578  578                  /*
 579  579                   * Must serialize if appending.
 580  580                   */
 581  581                  if (nfs_rw_lock_held(&rp->r_rwlock, RW_READER)) {
 582  582                          nfs_rw_exit(&rp->r_rwlock);
 583  583                          if (nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER,
 584  584                              INTR(vp)))
 585  585                                  return (EINTR);
 586  586                  }
 587  587  
 588  588                  va.va_mask = AT_SIZE;
 589  589                  error = nfsgetattr(vp, &va, cr);
 590  590                  if (error)
 591  591                          return (error);
 592  592                  uiop->uio_loffset = va.va_size;
 593  593          }
 594  594  
 595  595          if (uiop->uio_loffset > MAXOFF32_T)
 596  596                  return (EFBIG);
 597  597  
 598  598          offset = uiop->uio_loffset + uiop->uio_resid;
 599  599  
 600  600          if (uiop->uio_loffset < 0 || offset > MAXOFF32_T)
 601  601                  return (EINVAL);
 602  602  
 603  603          if (uiop->uio_llimit > (rlim64_t)MAXOFF32_T) {
 604  604                  limit = MAXOFF32_T;
 605  605          } else {
 606  606                  limit = (rlim_t)uiop->uio_llimit;
 607  607          }
 608  608  
 609  609          /*
 610  610           * Check to make sure that the process will not exceed
 611  611           * its limit on file size.  It is okay to write up to
 612  612           * the limit, but not beyond.  Thus, the write which
 613  613           * reaches the limit will be short and the next write
 614  614           * will return an error.
 615  615           */
 616  616          remainder = 0;
 617  617          if (offset > limit) {
 618  618                  remainder = offset - limit;
 619  619                  uiop->uio_resid = limit - uiop->uio_offset;
 620  620                  if (uiop->uio_resid <= 0) {
 621  621                          proc_t *p = ttoproc(curthread);
 622  622  
 623  623                          uiop->uio_resid += remainder;
 624  624                          mutex_enter(&p->p_lock);
 625  625                          (void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE],
 626  626                              p->p_rctls, p, RCA_UNSAFE_SIGINFO);
 627  627                          mutex_exit(&p->p_lock);
 628  628                          return (EFBIG);
 629  629                  }
 630  630          }
 631  631  
 632  632          if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_READER, INTR(vp)))
 633  633                  return (EINTR);
 634  634  
 635  635          /*
 636  636           * Bypass VM if caching has been disabled (e.g., locking) or if
 637  637           * using client-side direct I/O and the file is not mmap'd and
 638  638           * there are no cached pages.
 639  639           */
 640  640          if ((vp->v_flag & VNOCACHE) ||
 641  641              (((rp->r_flags & RDIRECTIO) || (mi->mi_flags & MI_DIRECTIO)) &&
 642  642              rp->r_mapcnt == 0 && rp->r_inmap == 0 &&
 643  643              !vn_has_cached_data(vp))) {
 644  644                  size_t bufsize;
 645  645                  int count;
 646  646                  uint_t org_offset;
 647  647  
 648  648  nfs_fwrite:
 649  649                  if (rp->r_flags & RSTALE) {
 650  650                          resid = uiop->uio_resid;
 651  651                          offset = uiop->uio_loffset;
 652  652                          error = rp->r_error;
 653  653                          /*
 654  654                           * A close may have cleared r_error, if so,
 655  655                           * propagate ESTALE error return properly
 656  656                           */
 657  657                          if (error == 0)
 658  658                                  error = ESTALE;
 659  659                          goto bottom;
 660  660                  }
 661  661                  bufsize = MIN(uiop->uio_resid, mi->mi_curwrite);
 662  662                  base = kmem_alloc(bufsize, KM_SLEEP);
 663  663                  do {
 664  664                          resid = uiop->uio_resid;
 665  665                          offset = uiop->uio_loffset;
 666  666                          count = MIN(uiop->uio_resid, bufsize);
 667  667                          org_offset = uiop->uio_offset;
 668  668                          error = uiomove(base, count, UIO_WRITE, uiop);
 669  669                          if (!error) {
 670  670                                  error = nfswrite(vp, base, org_offset,
 671  671                                      count, cr);
 672  672                          }
 673  673                  } while (!error && uiop->uio_resid > 0);
 674  674                  kmem_free(base, bufsize);
 675  675                  goto bottom;
 676  676          }
 677  677  
 678  678          do {
 679  679                  off = uiop->uio_loffset & MAXBMASK; /* mapping offset */
 680  680                  on = uiop->uio_loffset & MAXBOFFSET; /* Relative offset */
 681  681                  n = MIN(MAXBSIZE - on, uiop->uio_resid);
 682  682  
 683  683                  resid = uiop->uio_resid;
 684  684                  offset = uiop->uio_loffset;
 685  685  
 686  686                  if (rp->r_flags & RSTALE) {
 687  687                          error = rp->r_error;
 688  688                          /*
 689  689                           * A close may have cleared r_error, if so,
 690  690                           * propagate ESTALE error return properly
 691  691                           */
 692  692                          if (error == 0)
 693  693                                  error = ESTALE;
 694  694                          break;
 695  695                  }
 696  696  
 697  697                  /*
 698  698                   * Don't create dirty pages faster than they
 699  699                   * can be cleaned so that the system doesn't
 700  700                   * get imbalanced.  If the async queue is
 701  701                   * maxed out, then wait for it to drain before
 702  702                   * creating more dirty pages.  Also, wait for
 703  703                   * any threads doing pagewalks in the vop_getattr
 704  704                   * entry points so that they don't block for
 705  705                   * long periods.
 706  706                   */
 707  707                  mutex_enter(&rp->r_statelock);
 708  708                  while ((mi->mi_max_threads != 0 &&
 709  709                      rp->r_awcount > 2 * mi->mi_max_threads) ||
 710  710                      rp->r_gcount > 0) {
 711  711                          if (INTR(vp)) {
 712  712                                  klwp_t *lwp = ttolwp(curthread);
 713  713  
 714  714                                  if (lwp != NULL)
 715  715                                          lwp->lwp_nostop++;
 716  716                                  if (!cv_wait_sig(&rp->r_cv, &rp->r_statelock)) {
 717  717                                          mutex_exit(&rp->r_statelock);
 718  718                                          if (lwp != NULL)
 719  719                                                  lwp->lwp_nostop--;
 720  720                                          error = EINTR;
 721  721                                          goto bottom;
 722  722                                  }
 723  723                                  if (lwp != NULL)
 724  724                                          lwp->lwp_nostop--;
 725  725                          } else
 726  726                                  cv_wait(&rp->r_cv, &rp->r_statelock);
 727  727                  }
 728  728                  mutex_exit(&rp->r_statelock);
 729  729  
 730  730                  /*
 731  731                   * Touch the page and fault it in if it is not in core
 732  732                   * before segmap_getmapflt or vpm_data_copy can lock it.
 733  733                   * This is to avoid the deadlock if the buffer is mapped
 734  734                   * to the same file through mmap which we want to write.
 735  735                   */
 736  736                  uio_prefaultpages((long)n, uiop);
 737  737  
 738  738                  if (vpm_enable) {
 739  739                          /*
 740  740                           * It will use kpm mappings, so no need to
 741  741                           * pass an address.
 742  742                           */
 743  743                          error = writerp(rp, NULL, n, uiop, 0);
 744  744                  } else  {
 745  745                          if (segmap_kpm) {
 746  746                                  int pon = uiop->uio_loffset & PAGEOFFSET;
 747  747                                  size_t pn = MIN(PAGESIZE - pon,
 748  748                                      uiop->uio_resid);
 749  749                                  int pagecreate;
 750  750  
 751  751                                  mutex_enter(&rp->r_statelock);
 752  752                                  pagecreate = (pon == 0) && (pn == PAGESIZE ||
 753  753                                      uiop->uio_loffset + pn >= rp->r_size);
 754  754                                  mutex_exit(&rp->r_statelock);
 755  755  
 756  756                                  base = segmap_getmapflt(segkmap, vp, off + on,
 757  757                                      pn, !pagecreate, S_WRITE);
 758  758  
 759  759                                  error = writerp(rp, base + pon, n, uiop,
 760  760                                      pagecreate);
 761  761  
 762  762                          } else {
 763  763                                  base = segmap_getmapflt(segkmap, vp, off + on,
 764  764                                      n, 0, S_READ);
 765  765                                  error = writerp(rp, base + on, n, uiop, 0);
 766  766                          }
 767  767                  }
 768  768  
 769  769                  if (!error) {
 770  770                          if (mi->mi_flags & MI_NOAC)
 771  771                                  flags = SM_WRITE;
 772  772                          else if (n + on == MAXBSIZE || IS_SWAPVP(vp)) {
 773  773                                  /*
 774  774                                   * Have written a whole block.
 775  775                                   * Start an asynchronous write
 776  776                                   * and mark the buffer to
 777  777                                   * indicate that it won't be
 778  778                                   * needed again soon.
 779  779                                   */
 780  780                                  flags = SM_WRITE | SM_ASYNC | SM_DONTNEED;
 781  781                          } else
 782  782                                  flags = 0;
 783  783                          if ((ioflag & (FSYNC|FDSYNC)) ||
 784  784                              (rp->r_flags & ROUTOFSPACE)) {
 785  785                                  flags &= ~SM_ASYNC;
 786  786                                  flags |= SM_WRITE;
 787  787                          }
 788  788                          if (vpm_enable) {
 789  789                                  error = vpm_sync_pages(vp, off, n, flags);
 790  790                          } else {
 791  791                                  error = segmap_release(segkmap, base, flags);
 792  792                          }
 793  793                  } else {
 794  794                          if (vpm_enable) {
 795  795                                  (void) vpm_sync_pages(vp, off, n, 0);
 796  796                          } else {
 797  797                                  (void) segmap_release(segkmap, base, 0);
 798  798                          }
 799  799                          /*
 800  800                           * In the event that we got an access error while
 801  801                           * faulting in a page for a write-only file just
 802  802                           * force a write.
 803  803                           */
 804  804                          if (error == EACCES)
 805  805                                  goto nfs_fwrite;
 806  806                  }
 807  807          } while (!error && uiop->uio_resid > 0);
 808  808  
 809  809  bottom:
 810  810          if (error) {
 811  811                  uiop->uio_resid = resid + remainder;
 812  812                  uiop->uio_loffset = offset;
 813  813          } else
 814  814                  uiop->uio_resid += remainder;
 815  815  
 816  816          nfs_rw_exit(&rp->r_lkserlock);
 817  817  
 818  818          return (error);
 819  819  }
 820  820  
 821  821  /*
 822  822   * Flags are composed of {B_ASYNC, B_INVAL, B_FREE, B_DONTNEED}
 823  823   */
 824  824  static int
 825  825  nfs_rdwrlbn(vnode_t *vp, page_t *pp, u_offset_t off, size_t len,
 826  826          int flags, cred_t *cr)
 827  827  {
 828  828          struct buf *bp;
 829  829          int error;
 830  830  
 831  831          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
 832  832          bp = pageio_setup(pp, len, vp, flags);
 833  833          ASSERT(bp != NULL);
 834  834  
 835  835          /*
 836  836           * pageio_setup should have set b_addr to 0.  This
 837  837           * is correct since we want to do I/O on a page
 838  838           * boundary.  bp_mapin will use this addr to calculate
 839  839           * an offset, and then set b_addr to the kernel virtual
 840  840           * address it allocated for us.
 841  841           */
 842  842          ASSERT(bp->b_un.b_addr == 0);
 843  843  
 844  844          bp->b_edev = 0;
 845  845          bp->b_dev = 0;
 846  846          bp->b_lblkno = lbtodb(off);
 847  847          bp->b_file = vp;
 848  848          bp->b_offset = (offset_t)off;
 849  849          bp_mapin(bp);
 850  850  
 851  851          error = nfs_bio(bp, cr);
 852  852  
 853  853          bp_mapout(bp);
 854  854          pageio_done(bp);
 855  855  
 856  856          return (error);
 857  857  }
 858  858  
 859  859  /*
 860  860   * Write to file.  Writes to remote server in largest size
 861  861   * chunks that the server can handle.  Write is synchronous.
 862  862   */
 863  863  static int
 864  864  nfswrite(vnode_t *vp, caddr_t base, uint_t offset, int count, cred_t *cr)
 865  865  {
 866  866          rnode_t *rp;
 867  867          mntinfo_t *mi;
 868  868          struct nfswriteargs wa;
 869  869          struct nfsattrstat ns;
 870  870          int error;
 871  871          int tsize;
 872  872          int douprintf;
 873  873  
 874  874          douprintf = 1;
 875  875  
 876  876          rp = VTOR(vp);
 877  877          mi = VTOMI(vp);
 878  878  
 879  879          ASSERT(nfs_zone() == mi->mi_zone);
 880  880  
 881  881          wa.wa_args = &wa.wa_args_buf;
 882  882          wa.wa_fhandle = *VTOFH(vp);
 883  883  
 884  884          do {
 885  885                  tsize = MIN(mi->mi_curwrite, count);
 886  886                  wa.wa_data = base;
 887  887                  wa.wa_begoff = offset;
 888  888                  wa.wa_totcount = tsize;
 889  889                  wa.wa_count = tsize;
 890  890                  wa.wa_offset = offset;
 891  891  
 892  892                  if (mi->mi_io_kstats) {
 893  893                          mutex_enter(&mi->mi_lock);
 894  894                          kstat_runq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
 895  895                          mutex_exit(&mi->mi_lock);
 896  896                  }
 897  897                  wa.wa_mblk = NULL;
 898  898                  do {
 899  899                          error = rfs2call(mi, RFS_WRITE,
 900  900                              xdr_writeargs, (caddr_t)&wa,
 901  901                              xdr_attrstat, (caddr_t)&ns, cr,
 902  902                              &douprintf, &ns.ns_status, 0, NULL);
 903  903                  } while (error == ENFS_TRYAGAIN);
 904  904                  if (mi->mi_io_kstats) {
 905  905                          mutex_enter(&mi->mi_lock);
 906  906                          kstat_runq_exit(KSTAT_IO_PTR(mi->mi_io_kstats));
 907  907                          mutex_exit(&mi->mi_lock);
 908  908                  }
 909  909  
 910  910                  if (!error) {
 911  911                          error = geterrno(ns.ns_status);
 912  912                          /*
 913  913                           * Can't check for stale fhandle and purge caches
 914  914                           * here because pages are held by nfs_getpage.
 915  915                           * Just mark the attribute cache as timed out
 916  916                           * and set RWRITEATTR to indicate that the file
 917  917                           * was modified with a WRITE operation.
 918  918                           */
 919  919                          if (!error) {
 920  920                                  count -= tsize;
 921  921                                  base += tsize;
 922  922                                  offset += tsize;
 923  923                                  if (mi->mi_io_kstats) {
 924  924                                          mutex_enter(&mi->mi_lock);
 925  925                                          KSTAT_IO_PTR(mi->mi_io_kstats)->
 926  926                                              writes++;
 927  927                                          KSTAT_IO_PTR(mi->mi_io_kstats)->
 928  928                                              nwritten += tsize;
 929  929                                          mutex_exit(&mi->mi_lock);
 930  930                                  }
 931  931                                  lwp_stat_update(LWP_STAT_OUBLK, 1);
 932  932                                  mutex_enter(&rp->r_statelock);
 933  933                                  PURGE_ATTRCACHE_LOCKED(rp);
 934  934                                  rp->r_flags |= RWRITEATTR;
 935  935                                  mutex_exit(&rp->r_statelock);
 936  936                          }
 937  937                  }
 938  938          } while (!error && count);
 939  939  
 940  940          return (error);
 941  941  }
 942  942  
 943  943  /*
 944  944   * Read from a file.  Reads data in largest chunks our interface can handle.
 945  945   */
 946  946  static int
 947  947  nfsread(vnode_t *vp, caddr_t base, uint_t offset,
 948  948      int count, size_t *residp, cred_t *cr)
 949  949  {
 950  950          mntinfo_t *mi;
 951  951          struct nfsreadargs ra;
 952  952          struct nfsrdresult rr;
 953  953          int tsize;
 954  954          int error;
 955  955          int douprintf;
 956  956          failinfo_t fi;
 957  957          rnode_t *rp;
 958  958          struct vattr va;
 959  959          hrtime_t t;
 960  960  
 961  961          rp = VTOR(vp);
 962  962          mi = VTOMI(vp);
 963  963  
 964  964          ASSERT(nfs_zone() == mi->mi_zone);
 965  965  
 966  966          douprintf = 1;
 967  967  
 968  968          ra.ra_fhandle = *VTOFH(vp);
 969  969  
 970  970          fi.vp = vp;
 971  971          fi.fhp = (caddr_t)&ra.ra_fhandle;
 972  972          fi.copyproc = nfscopyfh;
 973  973          fi.lookupproc = nfslookup;
 974  974          fi.xattrdirproc = acl_getxattrdir2;
 975  975  
 976  976          do {
 977  977                  if (mi->mi_io_kstats) {
 978  978                          mutex_enter(&mi->mi_lock);
 979  979                          kstat_runq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
 980  980                          mutex_exit(&mi->mi_lock);
 981  981                  }
 982  982  
 983  983                  do {
 984  984                          tsize = MIN(mi->mi_curread, count);
 985  985                          rr.rr_data = base;
 986  986                          ra.ra_offset = offset;
 987  987                          ra.ra_totcount = tsize;
 988  988                          ra.ra_count = tsize;
 989  989                          ra.ra_data = base;
 990  990                          t = gethrtime();
 991  991                          error = rfs2call(mi, RFS_READ,
 992  992                              xdr_readargs, (caddr_t)&ra,
 993  993                              xdr_rdresult, (caddr_t)&rr, cr,
 994  994                              &douprintf, &rr.rr_status, 0, &fi);
 995  995                  } while (error == ENFS_TRYAGAIN);
 996  996  
 997  997                  if (mi->mi_io_kstats) {
 998  998                          mutex_enter(&mi->mi_lock);
 999  999                          kstat_runq_exit(KSTAT_IO_PTR(mi->mi_io_kstats));
1000 1000                          mutex_exit(&mi->mi_lock);
1001 1001                  }
1002 1002  
1003 1003                  if (!error) {
1004 1004                          error = geterrno(rr.rr_status);
1005 1005                          if (!error) {
1006 1006                                  count -= rr.rr_count;
1007 1007                                  base += rr.rr_count;
1008 1008                                  offset += rr.rr_count;
1009 1009                                  if (mi->mi_io_kstats) {
1010 1010                                          mutex_enter(&mi->mi_lock);
1011 1011                                          KSTAT_IO_PTR(mi->mi_io_kstats)->reads++;
1012 1012                                          KSTAT_IO_PTR(mi->mi_io_kstats)->nread +=
1013 1013                                              rr.rr_count;
1014 1014                                          mutex_exit(&mi->mi_lock);
1015 1015                                  }
1016 1016                                  lwp_stat_update(LWP_STAT_INBLK, 1);
1017 1017                          }
1018 1018                  }
1019 1019          } while (!error && count && rr.rr_count == tsize);
1020 1020  
1021 1021          *residp = count;
1022 1022  
1023 1023          if (!error) {
1024 1024                  /*
1025 1025                   * Since no error occurred, we have the current
1026 1026                   * attributes and we need to do a cache check and then
1027 1027                   * potentially update the cached attributes.  We can't
1028 1028                   * use the normal attribute check and cache mechanisms
1029 1029                   * because they might cause a cache flush which would
1030 1030                   * deadlock.  Instead, we just check the cache to see
1031 1031                   * if the attributes have changed.  If it is, then we
1032 1032                   * just mark the attributes as out of date.  The next
1033 1033                   * time that the attributes are checked, they will be
1034 1034                   * out of date, new attributes will be fetched, and
1035 1035                   * the page cache will be flushed.  If the attributes
1036 1036                   * weren't changed, then we just update the cached
1037 1037                   * attributes with these attributes.
1038 1038                   */
1039 1039                  /*
1040 1040                   * If NFS_ACL is supported on the server, then the
1041 1041                   * attributes returned by server may have minimal
1042 1042                   * permissions sometimes denying access to users having
1043 1043                   * proper access.  To get the proper attributes, mark
1044 1044                   * the attributes as expired so that they will be
1045 1045                   * regotten via the NFS_ACL GETATTR2 procedure.
1046 1046                   */
1047 1047                  error = nattr_to_vattr(vp, &rr.rr_attr, &va);
1048 1048                  mutex_enter(&rp->r_statelock);
1049 1049                  if (error || !CACHE_VALID(rp, va.va_mtime, va.va_size) ||
1050 1050                      (mi->mi_flags & MI_ACL)) {
1051 1051                          mutex_exit(&rp->r_statelock);
1052 1052                          PURGE_ATTRCACHE(vp);
1053 1053                  } else {
1054 1054                          if (rp->r_mtime <= t) {
1055 1055                                  nfs_attrcache_va(vp, &va);
1056 1056                          }
1057 1057                          mutex_exit(&rp->r_statelock);
1058 1058                  }
1059 1059          }
1060 1060  
1061 1061          return (error);
1062 1062  }
1063 1063  
1064 1064  /* ARGSUSED */
1065 1065  static int
1066 1066  nfs_ioctl(vnode_t *vp, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp,
1067 1067          caller_context_t *ct)
1068 1068  {
1069 1069  
1070 1070          if (nfs_zone() != VTOMI(vp)->mi_zone)
1071 1071                  return (EIO);
1072 1072          switch (cmd) {
1073 1073                  case _FIODIRECTIO:
1074 1074                          return (nfs_directio(vp, (int)arg, cr));
1075 1075                  default:
1076 1076                          return (ENOTTY);
1077 1077          }
1078 1078  }
1079 1079  
1080 1080  /* ARGSUSED */
1081 1081  static int
1082 1082  nfs_getattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr,
1083 1083          caller_context_t *ct)
1084 1084  {
1085 1085          int error;
1086 1086          rnode_t *rp;
1087 1087  
1088 1088          if (nfs_zone() != VTOMI(vp)->mi_zone)
1089 1089                  return (EIO);
1090 1090          /*
1091 1091           * If it has been specified that the return value will
1092 1092           * just be used as a hint, and we are only being asked
1093 1093           * for size, fsid or rdevid, then return the client's
1094 1094           * notion of these values without checking to make sure
1095 1095           * that the attribute cache is up to date.
1096 1096           * The whole point is to avoid an over the wire GETATTR
1097 1097           * call.
1098 1098           */
1099 1099          rp = VTOR(vp);
1100 1100          if (flags & ATTR_HINT) {
1101 1101                  if (vap->va_mask ==
1102 1102                      (vap->va_mask & (AT_SIZE | AT_FSID | AT_RDEV))) {
1103 1103                          mutex_enter(&rp->r_statelock);
1104 1104                          if (vap->va_mask | AT_SIZE)
1105 1105                                  vap->va_size = rp->r_size;
1106 1106                          if (vap->va_mask | AT_FSID)
1107 1107                                  vap->va_fsid = rp->r_attr.va_fsid;
1108 1108                          if (vap->va_mask | AT_RDEV)
1109 1109                                  vap->va_rdev = rp->r_attr.va_rdev;
1110 1110                          mutex_exit(&rp->r_statelock);
1111 1111                          return (0);
1112 1112                  }
1113 1113          }
1114 1114  
1115 1115          /*
1116 1116           * Only need to flush pages if asking for the mtime
1117 1117           * and if there any dirty pages or any outstanding
1118 1118           * asynchronous (write) requests for this file.
1119 1119           */
1120 1120          if (vap->va_mask & AT_MTIME) {
1121 1121                  if (vn_has_cached_data(vp) &&
1122 1122                      ((rp->r_flags & RDIRTY) || rp->r_awcount > 0)) {
1123 1123                          mutex_enter(&rp->r_statelock);
1124 1124                          rp->r_gcount++;
1125 1125                          mutex_exit(&rp->r_statelock);
1126 1126                          error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
1127 1127                          mutex_enter(&rp->r_statelock);
1128 1128                          if (error && (error == ENOSPC || error == EDQUOT)) {
1129 1129                                  if (!rp->r_error)
1130 1130                                          rp->r_error = error;
1131 1131                          }
1132 1132                          if (--rp->r_gcount == 0)
1133 1133                                  cv_broadcast(&rp->r_cv);
1134 1134                          mutex_exit(&rp->r_statelock);
1135 1135                  }
1136 1136          }
1137 1137  
1138 1138          return (nfsgetattr(vp, vap, cr));
1139 1139  }
1140 1140  
1141 1141  /*ARGSUSED4*/
1142 1142  static int
1143 1143  nfs_setattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr,
1144 1144                  caller_context_t *ct)
1145 1145  {
1146 1146          int error;
1147 1147          uint_t mask;
1148 1148          struct vattr va;
1149 1149  
1150 1150          mask = vap->va_mask;
1151 1151  
1152 1152          if (mask & AT_NOSET)
1153 1153                  return (EINVAL);
1154 1154  
1155 1155          if ((mask & AT_SIZE) &&
1156 1156              vap->va_type == VREG &&
1157 1157              vap->va_size > MAXOFF32_T)
1158 1158                  return (EFBIG);
1159 1159  
1160 1160          if (nfs_zone() != VTOMI(vp)->mi_zone)
1161 1161                  return (EIO);
1162 1162  
1163 1163          va.va_mask = AT_UID | AT_MODE;
1164 1164  
1165 1165          error = nfsgetattr(vp, &va, cr);
1166 1166          if (error)
1167 1167                  return (error);
1168 1168  
1169 1169          error = secpolicy_vnode_setattr(cr, vp, vap, &va, flags, nfs_accessx,
1170 1170              vp);
1171 1171  
1172 1172          if (error)
1173 1173                  return (error);
1174 1174  
1175 1175          error = nfssetattr(vp, vap, flags, cr);
1176 1176  
1177 1177          if (error == 0 && (mask & AT_SIZE)) {
1178 1178                  if (vap->va_size == 0) {
1179 1179                          vnevent_truncate(vp, ct);
1180 1180                  } else {
1181 1181                          vnevent_resize(vp, ct);
1182 1182                  }
1183 1183          }
1184 1184  
1185 1185          return (error);
1186 1186  }
1187 1187  
1188 1188  static int
1189 1189  nfssetattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr)
1190 1190  {
1191 1191          int error;
1192 1192          uint_t mask;
1193 1193          struct nfssaargs args;
1194 1194          struct nfsattrstat ns;
1195 1195          int douprintf;
1196 1196          rnode_t *rp;
1197 1197          struct vattr va;
1198 1198          mode_t omode;
1199 1199          mntinfo_t *mi;
1200 1200          vsecattr_t *vsp;
1201 1201          hrtime_t t;
1202 1202  
1203 1203          mask = vap->va_mask;
1204 1204  
1205 1205          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
1206 1206  
1207 1207          rp = VTOR(vp);
1208 1208  
1209 1209          /*
1210 1210           * Only need to flush pages if there are any pages and
1211 1211           * if the file is marked as dirty in some fashion.  The
1212 1212           * file must be flushed so that we can accurately
1213 1213           * determine the size of the file and the cached data
1214 1214           * after the SETATTR returns.  A file is considered to
1215 1215           * be dirty if it is either marked with RDIRTY, has
1216 1216           * outstanding i/o's active, or is mmap'd.  In this
1217 1217           * last case, we can't tell whether there are dirty
1218 1218           * pages, so we flush just to be sure.
1219 1219           */
1220 1220          if (vn_has_cached_data(vp) &&
1221 1221              ((rp->r_flags & RDIRTY) ||
1222 1222              rp->r_count > 0 ||
1223 1223              rp->r_mapcnt > 0)) {
1224 1224                  ASSERT(vp->v_type != VCHR);
1225 1225                  error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, NULL);
1226 1226                  if (error && (error == ENOSPC || error == EDQUOT)) {
1227 1227                          mutex_enter(&rp->r_statelock);
1228 1228                          if (!rp->r_error)
1229 1229                                  rp->r_error = error;
1230 1230                          mutex_exit(&rp->r_statelock);
1231 1231                  }
1232 1232          }
1233 1233  
1234 1234          /*
1235 1235           * If the system call was utime(2) or utimes(2) and the
1236 1236           * application did not specify the times, then set the
1237 1237           * mtime nanosecond field to 1 billion.  This will get
1238 1238           * translated from 1 billion nanoseconds to 1 million
1239 1239           * microseconds in the over the wire request.  The
1240 1240           * server will use 1 million in the microsecond field
1241 1241           * to tell whether both the mtime and atime should be
1242 1242           * set to the server's current time.
1243 1243           *
1244 1244           * This is an overload of the protocol and should be
1245 1245           * documented in the NFS Version 2 protocol specification.
1246 1246           */
1247 1247          if ((mask & AT_MTIME) && !(flags & ATTR_UTIME)) {
1248 1248                  vap->va_mtime.tv_nsec = 1000000000;
1249 1249                  if (NFS_TIME_T_OK(vap->va_mtime.tv_sec) &&
1250 1250                      NFS_TIME_T_OK(vap->va_atime.tv_sec)) {
1251 1251                          error = vattr_to_sattr(vap, &args.saa_sa);
1252 1252                  } else {
1253 1253                          /*
1254 1254                           * Use server times. vap time values will not be used.
1255 1255                           * To ensure no time overflow, make sure vap has
1256 1256                           * valid values, but retain the original values.
1257 1257                           */
1258 1258                          timestruc_t     mtime = vap->va_mtime;
1259 1259                          timestruc_t     atime = vap->va_atime;
1260 1260                          time_t          now;
1261 1261  
1262 1262                          now = gethrestime_sec();
1263 1263                          if (NFS_TIME_T_OK(now)) {
1264 1264                                  /* Just in case server does not know of this */
1265 1265                                  vap->va_mtime.tv_sec = now;
1266 1266                                  vap->va_atime.tv_sec = now;
1267 1267                          } else {
1268 1268                                  vap->va_mtime.tv_sec = 0;
1269 1269                                  vap->va_atime.tv_sec = 0;
1270 1270                          }
1271 1271                          error = vattr_to_sattr(vap, &args.saa_sa);
1272 1272                          /* set vap times back on */
1273 1273                          vap->va_mtime = mtime;
1274 1274                          vap->va_atime = atime;
1275 1275                  }
1276 1276          } else {
1277 1277                  /* Either do not set times or use the client specified times */
1278 1278                  error = vattr_to_sattr(vap, &args.saa_sa);
1279 1279          }
1280 1280          if (error) {
1281 1281                  /* req time field(s) overflow - return immediately */
1282 1282                  return (error);
1283 1283          }
1284 1284          args.saa_fh = *VTOFH(vp);
1285 1285  
1286 1286          va.va_mask = AT_MODE;
1287 1287          error = nfsgetattr(vp, &va, cr);
1288 1288          if (error)
1289 1289                  return (error);
1290 1290          omode = va.va_mode;
1291 1291  
1292 1292          mi = VTOMI(vp);
1293 1293  
1294 1294          douprintf = 1;
1295 1295  
1296 1296          t = gethrtime();
1297 1297  
1298 1298          error = rfs2call(mi, RFS_SETATTR,
1299 1299              xdr_saargs, (caddr_t)&args,
1300 1300              xdr_attrstat, (caddr_t)&ns, cr,
1301 1301              &douprintf, &ns.ns_status, 0, NULL);
1302 1302  
1303 1303          /*
1304 1304           * Purge the access cache and ACL cache if changing either the
1305 1305           * owner of the file, the group owner, or the mode.  These may
1306 1306           * change the access permissions of the file, so purge old
1307 1307           * information and start over again.
1308 1308           */
1309 1309          if ((mask & (AT_UID | AT_GID | AT_MODE)) && (mi->mi_flags & MI_ACL)) {
1310 1310                  (void) nfs_access_purge_rp(rp);
1311 1311                  if (rp->r_secattr != NULL) {
1312 1312                          mutex_enter(&rp->r_statelock);
1313 1313                          vsp = rp->r_secattr;
1314 1314                          rp->r_secattr = NULL;
1315 1315                          mutex_exit(&rp->r_statelock);
1316 1316                          if (vsp != NULL)
1317 1317                                  nfs_acl_free(vsp);
1318 1318                  }
1319 1319          }
1320 1320  
1321 1321          if (!error) {
1322 1322                  error = geterrno(ns.ns_status);
1323 1323                  if (!error) {
1324 1324                          /*
1325 1325                           * If changing the size of the file, invalidate
1326 1326                           * any local cached data which is no longer part
1327 1327                           * of the file.  We also possibly invalidate the
1328 1328                           * last page in the file.  We could use
1329 1329                           * pvn_vpzero(), but this would mark the page as
1330 1330                           * modified and require it to be written back to
1331 1331                           * the server for no particularly good reason.
1332 1332                           * This way, if we access it, then we bring it
1333 1333                           * back in.  A read should be cheaper than a
1334 1334                           * write.
1335 1335                           */
1336 1336                          if (mask & AT_SIZE) {
1337 1337                                  nfs_invalidate_pages(vp,
1338 1338                                      (vap->va_size & PAGEMASK), cr);
1339 1339                          }
1340 1340                          (void) nfs_cache_fattr(vp, &ns.ns_attr, &va, t, cr);
1341 1341                          /*
1342 1342                           * If NFS_ACL is supported on the server, then the
1343 1343                           * attributes returned by server may have minimal
1344 1344                           * permissions sometimes denying access to users having
1345 1345                           * proper access.  To get the proper attributes, mark
1346 1346                           * the attributes as expired so that they will be
1347 1347                           * regotten via the NFS_ACL GETATTR2 procedure.
1348 1348                           */
1349 1349                          if (mi->mi_flags & MI_ACL) {
1350 1350                                  PURGE_ATTRCACHE(vp);
1351 1351                          }
1352 1352                          /*
1353 1353                           * This next check attempts to deal with NFS
1354 1354                           * servers which can not handle increasing
1355 1355                           * the size of the file via setattr.  Most
1356 1356                           * of these servers do not return an error,
1357 1357                           * but do not change the size of the file.
1358 1358                           * Hence, this check and then attempt to set
1359 1359                           * the file size by writing 1 byte at the
1360 1360                           * offset of the end of the file that we need.
1361 1361                           */
1362 1362                          if ((mask & AT_SIZE) &&
1363 1363                              ns.ns_attr.na_size < (uint32_t)vap->va_size) {
1364 1364                                  char zb = '\0';
1365 1365  
1366 1366                                  error = nfswrite(vp, &zb,
1367 1367                                      vap->va_size - sizeof (zb),
1368 1368                                      sizeof (zb), cr);
1369 1369                          }
1370 1370                          /*
1371 1371                           * Some servers will change the mode to clear the setuid
1372 1372                           * and setgid bits when changing the uid or gid.  The
1373 1373                           * client needs to compensate appropriately.
1374 1374                           */
1375 1375                          if (mask & (AT_UID | AT_GID)) {
1376 1376                                  int terror;
1377 1377  
1378 1378                                  va.va_mask = AT_MODE;
1379 1379                                  terror = nfsgetattr(vp, &va, cr);
1380 1380                                  if (!terror &&
1381 1381                                      (((mask & AT_MODE) &&
1382 1382                                      va.va_mode != vap->va_mode) ||
1383 1383                                      (!(mask & AT_MODE) &&
1384 1384                                      va.va_mode != omode))) {
1385 1385                                          va.va_mask = AT_MODE;
1386 1386                                          if (mask & AT_MODE)
1387 1387                                                  va.va_mode = vap->va_mode;
1388 1388                                          else
1389 1389                                                  va.va_mode = omode;
1390 1390                                          (void) nfssetattr(vp, &va, 0, cr);
1391 1391                                  }
1392 1392                          }
1393 1393                  } else {
1394 1394                          PURGE_ATTRCACHE(vp);
1395 1395                          PURGE_STALE_FH(error, vp, cr);
1396 1396                  }
1397 1397          } else {
1398 1398                  PURGE_ATTRCACHE(vp);
1399 1399          }
1400 1400  
1401 1401          return (error);
1402 1402  }
1403 1403  
1404 1404  static int
1405 1405  nfs_accessx(void *vp, int mode, cred_t *cr)
1406 1406  {
1407 1407          ASSERT(nfs_zone() == VTOMI((vnode_t *)vp)->mi_zone);
1408 1408          return (nfs_access(vp, mode, 0, cr, NULL));
1409 1409  }
1410 1410  
1411 1411  /* ARGSUSED */
1412 1412  static int
1413 1413  nfs_access(vnode_t *vp, int mode, int flags, cred_t *cr, caller_context_t *ct)
1414 1414  {
1415 1415          struct vattr va;
1416 1416          int error;
1417 1417          mntinfo_t *mi;
1418 1418          int shift = 0;
1419 1419  
1420 1420          mi = VTOMI(vp);
1421 1421  
1422 1422          if (nfs_zone() != mi->mi_zone)
1423 1423                  return (EIO);
1424 1424          if (mi->mi_flags & MI_ACL) {
1425 1425                  error = acl_access2(vp, mode, flags, cr);
1426 1426                  if (mi->mi_flags & MI_ACL)
1427 1427                          return (error);
1428 1428          }
1429 1429  
1430 1430          va.va_mask = AT_MODE | AT_UID | AT_GID;
1431 1431          error = nfsgetattr(vp, &va, cr);
1432 1432          if (error)
1433 1433                  return (error);
1434 1434  
1435 1435          /*
1436 1436           * Disallow write attempts on read-only
1437 1437           * file systems, unless the file is a
1438 1438           * device node.
1439 1439           */
1440 1440          if ((mode & VWRITE) && vn_is_readonly(vp) && !IS_DEVVP(vp))
1441 1441                  return (EROFS);
1442 1442  
1443 1443          /*
1444 1444           * Disallow attempts to access mandatory lock files.
1445 1445           */
1446 1446          if ((mode & (VWRITE | VREAD | VEXEC)) &&
1447 1447              MANDLOCK(vp, va.va_mode))
1448 1448                  return (EACCES);
1449 1449  
1450 1450          /*
1451 1451           * Access check is based on only
1452 1452           * one of owner, group, public.
1453 1453           * If not owner, then check group.
1454 1454           * If not a member of the group,
1455 1455           * then check public access.
1456 1456           */
1457 1457          if (crgetuid(cr) != va.va_uid) {
1458 1458                  shift += 3;
1459 1459                  if (!groupmember(va.va_gid, cr))
1460 1460                          shift += 3;
1461 1461          }
1462 1462  
1463 1463          return (secpolicy_vnode_access2(cr, vp, va.va_uid,
1464 1464              va.va_mode << shift, mode));
1465 1465  }
1466 1466  
1467 1467  static int nfs_do_symlink_cache = 1;
1468 1468  
1469 1469  /* ARGSUSED */
1470 1470  static int
1471 1471  nfs_readlink(vnode_t *vp, struct uio *uiop, cred_t *cr, caller_context_t *ct)
1472 1472  {
1473 1473          int error;
1474 1474          struct nfsrdlnres rl;
1475 1475          rnode_t *rp;
1476 1476          int douprintf;
1477 1477          failinfo_t fi;
1478 1478  
1479 1479          /*
1480 1480           * We want to be consistent with UFS semantics so we will return
1481 1481           * EINVAL instead of ENXIO. This violates the XNFS spec and
1482 1482           * the RFC 1094, which are wrong any way. BUGID 1138002.
1483 1483           */
1484 1484          if (vp->v_type != VLNK)
1485 1485                  return (EINVAL);
1486 1486  
1487 1487          if (nfs_zone() != VTOMI(vp)->mi_zone)
1488 1488                  return (EIO);
1489 1489  
1490 1490          rp = VTOR(vp);
1491 1491          if (nfs_do_symlink_cache && rp->r_symlink.contents != NULL) {
1492 1492                  error = nfs_validate_caches(vp, cr);
1493 1493                  if (error)
1494 1494                          return (error);
1495 1495                  mutex_enter(&rp->r_statelock);
1496 1496                  if (rp->r_symlink.contents != NULL) {
1497 1497                          error = uiomove(rp->r_symlink.contents,
1498 1498                              rp->r_symlink.len, UIO_READ, uiop);
1499 1499                          mutex_exit(&rp->r_statelock);
1500 1500                          return (error);
1501 1501                  }
1502 1502                  mutex_exit(&rp->r_statelock);
1503 1503          }
1504 1504  
1505 1505  
1506 1506          rl.rl_data = kmem_alloc(NFS_MAXPATHLEN, KM_SLEEP);
1507 1507  
1508 1508          fi.vp = vp;
1509 1509          fi.fhp = NULL;          /* no need to update, filehandle not copied */
1510 1510          fi.copyproc = nfscopyfh;
1511 1511          fi.lookupproc = nfslookup;
1512 1512          fi.xattrdirproc = acl_getxattrdir2;
1513 1513  
1514 1514          douprintf = 1;
1515 1515  
1516 1516          error = rfs2call(VTOMI(vp), RFS_READLINK,
1517 1517              xdr_readlink, (caddr_t)VTOFH(vp),
1518 1518              xdr_rdlnres, (caddr_t)&rl, cr,
1519 1519              &douprintf, &rl.rl_status, 0, &fi);
1520 1520  
1521 1521          if (error) {
1522 1522  
1523 1523                  kmem_free((void *)rl.rl_data, NFS_MAXPATHLEN);
1524 1524                  return (error);
1525 1525          }
1526 1526  
1527 1527          error = geterrno(rl.rl_status);
1528 1528          if (!error) {
1529 1529                  error = uiomove(rl.rl_data, (int)rl.rl_count, UIO_READ, uiop);
1530 1530                  if (nfs_do_symlink_cache && rp->r_symlink.contents == NULL) {
1531 1531                          mutex_enter(&rp->r_statelock);
1532 1532                          if (rp->r_symlink.contents == NULL) {
1533 1533                                  rp->r_symlink.contents = rl.rl_data;
1534 1534                                  rp->r_symlink.len = (int)rl.rl_count;
1535 1535                                  rp->r_symlink.size = NFS_MAXPATHLEN;
1536 1536                                  mutex_exit(&rp->r_statelock);
1537 1537                          } else {
1538 1538                                  mutex_exit(&rp->r_statelock);
1539 1539  
1540 1540                                  kmem_free((void *)rl.rl_data,
1541 1541                                      NFS_MAXPATHLEN);
1542 1542                          }
1543 1543                  } else {
1544 1544  
1545 1545                          kmem_free((void *)rl.rl_data, NFS_MAXPATHLEN);
1546 1546                  }
1547 1547          } else {
1548 1548                  PURGE_STALE_FH(error, vp, cr);
1549 1549  
1550 1550                  kmem_free((void *)rl.rl_data, NFS_MAXPATHLEN);
1551 1551          }
1552 1552  
1553 1553          /*
1554 1554           * Conform to UFS semantics (see comment above)
1555 1555           */
1556 1556          return (error == ENXIO ? EINVAL : error);
1557 1557  }
1558 1558  
1559 1559  /*
1560 1560   * Flush local dirty pages to stable storage on the server.
1561 1561   *
1562 1562   * If FNODSYNC is specified, then there is nothing to do because
1563 1563   * metadata changes are not cached on the client before being
1564 1564   * sent to the server.
1565 1565   */
1566 1566  /* ARGSUSED */
1567 1567  static int
1568 1568  nfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
1569 1569  {
1570 1570          int error;
1571 1571  
1572 1572          if ((syncflag & FNODSYNC) || IS_SWAPVP(vp))
1573 1573                  return (0);
1574 1574  
1575 1575          if (nfs_zone() != VTOMI(vp)->mi_zone)
1576 1576                  return (EIO);
1577 1577  
1578 1578          error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
1579 1579          if (!error)
1580 1580                  error = VTOR(vp)->r_error;
1581 1581          return (error);
1582 1582  }
1583 1583  
1584 1584  
1585 1585  /*
1586 1586   * Weirdness: if the file was removed or the target of a rename
1587 1587   * operation while it was open, it got renamed instead.  Here we
1588 1588   * remove the renamed file.
1589 1589   */
1590 1590  /* ARGSUSED */
1591 1591  static void
1592 1592  nfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
1593 1593  {
1594 1594          rnode_t *rp;
1595 1595  
1596 1596          ASSERT(vp != DNLC_NO_VNODE);
1597 1597  
1598 1598          /*
1599 1599           * If this is coming from the wrong zone, we let someone in the right
1600 1600           * zone take care of it asynchronously.  We can get here due to
1601 1601           * VN_RELE() being called from pageout() or fsflush().  This call may
1602 1602           * potentially turn into an expensive no-op if, for instance, v_count
1603 1603           * gets incremented in the meantime, but it's still correct.
1604 1604           */
1605 1605          if (nfs_zone() != VTOMI(vp)->mi_zone) {
1606 1606                  nfs_async_inactive(vp, cr, nfs_inactive);
1607 1607                  return;
1608 1608          }
1609 1609  
1610 1610          rp = VTOR(vp);
1611 1611  redo:
1612 1612          if (rp->r_unldvp != NULL) {
1613 1613                  /*
1614 1614                   * Save the vnode pointer for the directory where the
1615 1615                   * unlinked-open file got renamed, then set it to NULL
1616 1616                   * to prevent another thread from getting here before
1617 1617                   * we're done with the remove.  While we have the
1618 1618                   * statelock, make local copies of the pertinent rnode
1619 1619                   * fields.  If we weren't to do this in an atomic way, the
1620 1620                   * the unl* fields could become inconsistent with respect
1621 1621                   * to each other due to a race condition between this
1622 1622                   * code and nfs_remove().  See bug report 1034328.
1623 1623                   */
1624 1624                  mutex_enter(&rp->r_statelock);
1625 1625                  if (rp->r_unldvp != NULL) {
1626 1626                          vnode_t *unldvp;
1627 1627                          char *unlname;
1628 1628                          cred_t *unlcred;
1629 1629                          struct nfsdiropargs da;
1630 1630                          enum nfsstat status;
1631 1631                          int douprintf;
1632 1632                          int error;
1633 1633  
1634 1634                          unldvp = rp->r_unldvp;
1635 1635                          rp->r_unldvp = NULL;
1636 1636                          unlname = rp->r_unlname;
1637 1637                          rp->r_unlname = NULL;
1638 1638                          unlcred = rp->r_unlcred;
1639 1639                          rp->r_unlcred = NULL;
1640 1640                          mutex_exit(&rp->r_statelock);
1641 1641  
1642 1642                          /*
1643 1643                           * If there are any dirty pages left, then flush
1644 1644                           * them.  This is unfortunate because they just
1645 1645                           * may get thrown away during the remove operation,
1646 1646                           * but we have to do this for correctness.
1647 1647                           */
1648 1648                          if (vn_has_cached_data(vp) &&
1649 1649                              ((rp->r_flags & RDIRTY) || rp->r_count > 0)) {
1650 1650                                  ASSERT(vp->v_type != VCHR);
1651 1651                                  error = nfs_putpage(vp, (offset_t)0, 0, 0,
1652 1652                                      cr, ct);
1653 1653                                  if (error) {
1654 1654                                          mutex_enter(&rp->r_statelock);
1655 1655                                          if (!rp->r_error)
1656 1656                                                  rp->r_error = error;
1657 1657                                          mutex_exit(&rp->r_statelock);
1658 1658                                  }
1659 1659                          }
1660 1660  
1661 1661                          /*
1662 1662                           * Do the remove operation on the renamed file
1663 1663                           */
1664 1664                          setdiropargs(&da, unlname, unldvp);
1665 1665  
1666 1666                          douprintf = 1;
1667 1667  
1668 1668                          (void) rfs2call(VTOMI(unldvp), RFS_REMOVE,
1669 1669                              xdr_diropargs, (caddr_t)&da,
1670 1670                              xdr_enum, (caddr_t)&status, unlcred,
1671 1671                              &douprintf, &status, 0, NULL);
1672 1672  
1673 1673                          if (HAVE_RDDIR_CACHE(VTOR(unldvp)))
1674 1674                                  nfs_purge_rddir_cache(unldvp);
1675 1675                          PURGE_ATTRCACHE(unldvp);
1676 1676  
1677 1677                          /*
1678 1678                           * Release stuff held for the remove
1679 1679                           */
1680 1680                          VN_RELE(unldvp);
1681 1681                          kmem_free(unlname, MAXNAMELEN);
1682 1682                          crfree(unlcred);
1683 1683                          goto redo;
1684 1684                  }
1685 1685                  mutex_exit(&rp->r_statelock);
1686 1686          }
1687 1687  
1688 1688          rp_addfree(rp, cr);
1689 1689  }
1690 1690  
1691 1691  /*
1692 1692   * Remote file system operations having to do with directory manipulation.
1693 1693   */
1694 1694  
1695 1695  /* ARGSUSED */
1696 1696  static int
1697 1697  nfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
1698 1698          int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
1699 1699          int *direntflags, pathname_t *realpnp)
1700 1700  {
1701 1701          int error;
1702 1702          vnode_t *vp;
1703 1703          vnode_t *avp = NULL;
1704 1704          rnode_t *drp;
1705 1705  
1706 1706          if (nfs_zone() != VTOMI(dvp)->mi_zone)
1707 1707                  return (EPERM);
1708 1708  
1709 1709          drp = VTOR(dvp);
1710 1710  
1711 1711          /*
1712 1712           * Are we looking up extended attributes?  If so, "dvp" is
1713 1713           * the file or directory for which we want attributes, and
1714 1714           * we need a lookup of the hidden attribute directory
1715 1715           * before we lookup the rest of the path.
1716 1716           */
1717 1717          if (flags & LOOKUP_XATTR) {
1718 1718                  bool_t cflag = ((flags & CREATE_XATTR_DIR) != 0);
1719 1719                  mntinfo_t *mi;
1720 1720  
1721 1721                  mi = VTOMI(dvp);
1722 1722                  if (!(mi->mi_flags & MI_EXTATTR))
1723 1723                          return (EINVAL);
1724 1724  
1725 1725                  if (nfs_rw_enter_sig(&drp->r_rwlock, RW_READER, INTR(dvp)))
1726 1726                          return (EINTR);
1727 1727  
1728 1728                  (void) nfslookup_dnlc(dvp, XATTR_DIR_NAME, &avp, cr);
1729 1729                  if (avp == NULL)
1730 1730                          error = acl_getxattrdir2(dvp, &avp, cflag, cr, 0);
1731 1731                  else
1732 1732                          error = 0;
1733 1733  
1734 1734                  nfs_rw_exit(&drp->r_rwlock);
1735 1735  
1736 1736                  if (error) {
1737 1737                          if (mi->mi_flags & MI_EXTATTR)
1738 1738                                  return (error);
1739 1739                          return (EINVAL);
1740 1740                  }
1741 1741                  dvp = avp;
1742 1742                  drp = VTOR(dvp);
1743 1743          }
1744 1744  
1745 1745          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_READER, INTR(dvp))) {
1746 1746                  error = EINTR;
1747 1747                  goto out;
1748 1748          }
1749 1749  
1750 1750          error = nfslookup(dvp, nm, vpp, pnp, flags, rdir, cr, 0);
1751 1751  
1752 1752          nfs_rw_exit(&drp->r_rwlock);
1753 1753  
1754 1754          /*
1755 1755           * If vnode is a device, create special vnode.
1756 1756           */
1757 1757          if (!error && IS_DEVVP(*vpp)) {
1758 1758                  vp = *vpp;
1759 1759                  *vpp = specvp(vp, vp->v_rdev, vp->v_type, cr);
1760 1760                  VN_RELE(vp);
1761 1761          }
1762 1762  
1763 1763  out:
1764 1764          if (avp != NULL)
1765 1765                  VN_RELE(avp);
1766 1766  
1767 1767          return (error);
1768 1768  }
1769 1769  
1770 1770  static int nfs_lookup_neg_cache = 1;
1771 1771  
1772 1772  #ifdef DEBUG
1773 1773  static int nfs_lookup_dnlc_hits = 0;
1774 1774  static int nfs_lookup_dnlc_misses = 0;
1775 1775  static int nfs_lookup_dnlc_neg_hits = 0;
1776 1776  static int nfs_lookup_dnlc_disappears = 0;
1777 1777  static int nfs_lookup_dnlc_lookups = 0;
1778 1778  #endif
1779 1779  
1780 1780  /* ARGSUSED */
1781 1781  int
1782 1782  nfslookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
1783 1783          int flags, vnode_t *rdir, cred_t *cr, int rfscall_flags)
1784 1784  {
1785 1785          int error;
1786 1786  
1787 1787          ASSERT(nfs_zone() == VTOMI(dvp)->mi_zone);
1788 1788  
1789 1789          /*
1790 1790           * If lookup is for "", just return dvp.  Don't need
1791 1791           * to send it over the wire, look it up in the dnlc,
1792 1792           * or perform any access checks.
1793 1793           */
1794 1794          if (*nm == '\0') {
1795 1795                  VN_HOLD(dvp);
1796 1796                  *vpp = dvp;
1797 1797                  return (0);
1798 1798          }
1799 1799  
1800 1800          /*
1801 1801           * Can't do lookups in non-directories.
1802 1802           */
1803 1803          if (dvp->v_type != VDIR)
1804 1804                  return (ENOTDIR);
1805 1805  
1806 1806          /*
1807 1807           * If we're called with RFSCALL_SOFT, it's important that
1808 1808           * the only rfscall is one we make directly; if we permit
1809 1809           * an access call because we're looking up "." or validating
1810 1810           * a dnlc hit, we'll deadlock because that rfscall will not
1811 1811           * have the RFSCALL_SOFT set.
1812 1812           */
1813 1813          if (rfscall_flags & RFSCALL_SOFT)
1814 1814                  goto callit;
1815 1815  
1816 1816          /*
1817 1817           * If lookup is for ".", just return dvp.  Don't need
1818 1818           * to send it over the wire or look it up in the dnlc,
1819 1819           * just need to check access.
1820 1820           */
1821 1821          if (strcmp(nm, ".") == 0) {
1822 1822                  error = nfs_access(dvp, VEXEC, 0, cr, NULL);
1823 1823                  if (error)
1824 1824                          return (error);
1825 1825                  VN_HOLD(dvp);
1826 1826                  *vpp = dvp;
1827 1827                  return (0);
1828 1828          }
1829 1829  
1830 1830          /*
1831 1831           * Lookup this name in the DNLC.  If there was a valid entry,
1832 1832           * then return the results of the lookup.
1833 1833           */
1834 1834          error = nfslookup_dnlc(dvp, nm, vpp, cr);
1835 1835          if (error || *vpp != NULL)
1836 1836                  return (error);
1837 1837  
1838 1838  callit:
1839 1839          error = nfslookup_otw(dvp, nm, vpp, cr, rfscall_flags);
1840 1840  
1841 1841          return (error);
1842 1842  }
1843 1843  
1844 1844  static int
1845 1845  nfslookup_dnlc(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr)
1846 1846  {
1847 1847          int error;
1848 1848          vnode_t *vp;
1849 1849  
1850 1850          ASSERT(*nm != '\0');
1851 1851          ASSERT(nfs_zone() == VTOMI(dvp)->mi_zone);
1852 1852  
1853 1853          /*
1854 1854           * Lookup this name in the DNLC.  If successful, then validate
1855 1855           * the caches and then recheck the DNLC.  The DNLC is rechecked
1856 1856           * just in case this entry got invalidated during the call
1857 1857           * to nfs_validate_caches.
1858 1858           *
1859 1859           * An assumption is being made that it is safe to say that a
1860 1860           * file exists which may not on the server.  Any operations to
1861 1861           * the server will fail with ESTALE.
1862 1862           */
1863 1863  #ifdef DEBUG
1864 1864          nfs_lookup_dnlc_lookups++;
1865 1865  #endif
1866 1866          vp = dnlc_lookup(dvp, nm);
1867 1867          if (vp != NULL) {
1868 1868                  VN_RELE(vp);
1869 1869                  if (vp == DNLC_NO_VNODE && !vn_is_readonly(dvp)) {
1870 1870                          PURGE_ATTRCACHE(dvp);
1871 1871                  }
1872 1872                  error = nfs_validate_caches(dvp, cr);
1873 1873                  if (error)
1874 1874                          return (error);
1875 1875                  vp = dnlc_lookup(dvp, nm);
1876 1876                  if (vp != NULL) {
1877 1877                          error = nfs_access(dvp, VEXEC, 0, cr, NULL);
1878 1878                          if (error) {
1879 1879                                  VN_RELE(vp);
1880 1880                                  return (error);
1881 1881                          }
1882 1882                          if (vp == DNLC_NO_VNODE) {
1883 1883                                  VN_RELE(vp);
1884 1884  #ifdef DEBUG
1885 1885                                  nfs_lookup_dnlc_neg_hits++;
1886 1886  #endif
1887 1887                                  return (ENOENT);
1888 1888                          }
1889 1889                          *vpp = vp;
1890 1890  #ifdef DEBUG
1891 1891                          nfs_lookup_dnlc_hits++;
1892 1892  #endif
1893 1893                          return (0);
1894 1894                  }
1895 1895  #ifdef DEBUG
1896 1896                  nfs_lookup_dnlc_disappears++;
1897 1897  #endif
1898 1898          }
1899 1899  #ifdef DEBUG
1900 1900          else
1901 1901                  nfs_lookup_dnlc_misses++;
1902 1902  #endif
1903 1903  
1904 1904          *vpp = NULL;
1905 1905  
1906 1906          return (0);
1907 1907  }
1908 1908  
1909 1909  static int
1910 1910  nfslookup_otw(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr,
1911 1911          int rfscall_flags)
1912 1912  {
1913 1913          int error;
1914 1914          struct nfsdiropargs da;
1915 1915          struct nfsdiropres dr;
1916 1916          int douprintf;
1917 1917          failinfo_t fi;
1918 1918          hrtime_t t;
1919 1919  
1920 1920          ASSERT(*nm != '\0');
1921 1921          ASSERT(dvp->v_type == VDIR);
1922 1922          ASSERT(nfs_zone() == VTOMI(dvp)->mi_zone);
1923 1923  
1924 1924          setdiropargs(&da, nm, dvp);
1925 1925  
1926 1926          fi.vp = dvp;
1927 1927          fi.fhp = NULL;          /* no need to update, filehandle not copied */
1928 1928          fi.copyproc = nfscopyfh;
1929 1929          fi.lookupproc = nfslookup;
1930 1930          fi.xattrdirproc = acl_getxattrdir2;
1931 1931  
1932 1932          douprintf = 1;
1933 1933  
1934 1934          t = gethrtime();
1935 1935  
1936 1936          error = rfs2call(VTOMI(dvp), RFS_LOOKUP,
1937 1937              xdr_diropargs, (caddr_t)&da,
1938 1938              xdr_diropres, (caddr_t)&dr, cr,
1939 1939              &douprintf, &dr.dr_status, rfscall_flags, &fi);
1940 1940  
1941 1941          if (!error) {
1942 1942                  error = geterrno(dr.dr_status);
1943 1943                  if (!error) {
1944 1944                          *vpp = makenfsnode(&dr.dr_fhandle, &dr.dr_attr,
1945 1945                              dvp->v_vfsp, t, cr, VTOR(dvp)->r_path, nm);
1946 1946                          /*
1947 1947                           * If NFS_ACL is supported on the server, then the
1948 1948                           * attributes returned by server may have minimal
1949 1949                           * permissions sometimes denying access to users having
1950 1950                           * proper access.  To get the proper attributes, mark
1951 1951                           * the attributes as expired so that they will be
1952 1952                           * regotten via the NFS_ACL GETATTR2 procedure.
1953 1953                           */
1954 1954                          if (VTOMI(*vpp)->mi_flags & MI_ACL) {
1955 1955                                  PURGE_ATTRCACHE(*vpp);
1956 1956                          }
1957 1957                          if (!(rfscall_flags & RFSCALL_SOFT))
1958 1958                                  dnlc_update(dvp, nm, *vpp);
1959 1959                  } else {
1960 1960                          PURGE_STALE_FH(error, dvp, cr);
1961 1961                          if (error == ENOENT && nfs_lookup_neg_cache)
1962 1962                                  dnlc_enter(dvp, nm, DNLC_NO_VNODE);
1963 1963                  }
1964 1964          }
1965 1965  
1966 1966          return (error);
1967 1967  }
1968 1968  
1969 1969  /* ARGSUSED */
1970 1970  static int
1971 1971  nfs_create(vnode_t *dvp, char *nm, struct vattr *va, enum vcexcl exclusive,
1972 1972          int mode, vnode_t **vpp, cred_t *cr, int lfaware, caller_context_t *ct,
1973 1973          vsecattr_t *vsecp)
1974 1974  {
1975 1975          int error;
1976 1976          struct nfscreatargs args;
1977 1977          struct nfsdiropres dr;
1978 1978          int douprintf;
1979 1979          vnode_t *vp;
1980 1980          rnode_t *rp;
1981 1981          struct vattr vattr;
1982 1982          rnode_t *drp;
1983 1983          vnode_t *tempvp;
1984 1984          hrtime_t t;
1985 1985  
1986 1986          drp = VTOR(dvp);
1987 1987  
1988 1988          if (nfs_zone() != VTOMI(dvp)->mi_zone)
1989 1989                  return (EPERM);
1990 1990          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
1991 1991                  return (EINTR);
1992 1992  
1993 1993          /*
1994 1994           * We make a copy of the attributes because the caller does not
1995 1995           * expect us to change what va points to.
1996 1996           */
1997 1997          vattr = *va;
1998 1998  
1999 1999          /*
2000 2000           * If the pathname is "", just use dvp.  Don't need
2001 2001           * to send it over the wire, look it up in the dnlc,
2002 2002           * or perform any access checks.
2003 2003           */
2004 2004          if (*nm == '\0') {
2005 2005                  error = 0;
2006 2006                  VN_HOLD(dvp);
2007 2007                  vp = dvp;
2008 2008          /*
2009 2009           * If the pathname is ".", just use dvp.  Don't need
2010 2010           * to send it over the wire or look it up in the dnlc,
2011 2011           * just need to check access.
2012 2012           */
2013 2013          } else if (strcmp(nm, ".") == 0) {
2014 2014                  error = nfs_access(dvp, VEXEC, 0, cr, ct);
2015 2015                  if (error) {
2016 2016                          nfs_rw_exit(&drp->r_rwlock);
2017 2017                          return (error);
2018 2018                  }
2019 2019                  VN_HOLD(dvp);
2020 2020                  vp = dvp;
2021 2021          /*
2022 2022           * We need to go over the wire, just to be sure whether the
2023 2023           * file exists or not.  Using the DNLC can be dangerous in
2024 2024           * this case when making a decision regarding existence.
2025 2025           */
2026 2026          } else {
2027 2027                  error = nfslookup_otw(dvp, nm, &vp, cr, 0);
2028 2028          }
2029 2029          if (!error) {
2030 2030                  if (exclusive == EXCL)
2031 2031                          error = EEXIST;
2032 2032                  else if (vp->v_type == VDIR && (mode & VWRITE))
2033 2033                          error = EISDIR;
2034 2034                  else {
2035 2035                          /*
2036 2036                           * If vnode is a device, create special vnode.
2037 2037                           */
2038 2038                          if (IS_DEVVP(vp)) {
2039 2039                                  tempvp = vp;
2040 2040                                  vp = specvp(vp, vp->v_rdev, vp->v_type, cr);
2041 2041                                  VN_RELE(tempvp);
2042 2042                          }
2043 2043                          if (!(error = VOP_ACCESS(vp, mode, 0, cr, ct))) {
2044 2044                                  if ((vattr.va_mask & AT_SIZE) &&
2045 2045                                      vp->v_type == VREG) {
2046 2046                                          vattr.va_mask = AT_SIZE;
2047 2047                                          error = nfssetattr(vp, &vattr, 0, cr);
2048 2048  
2049 2049                                          if (!error) {
2050 2050                                                  /*
2051 2051                                                   * Existing file was truncated;
2052 2052                                                   * emit a create event.
2053 2053                                                   */
2054 2054                                                  vnevent_create(vp, ct);
2055 2055                                          }
2056 2056                                  }
2057 2057                          }
2058 2058                  }
2059 2059                  nfs_rw_exit(&drp->r_rwlock);
2060 2060                  if (error) {
2061 2061                          VN_RELE(vp);
2062 2062                  } else {
2063 2063                          *vpp = vp;
2064 2064                  }
2065 2065                  return (error);
2066 2066          }
2067 2067  
2068 2068          ASSERT(vattr.va_mask & AT_TYPE);
2069 2069          if (vattr.va_type == VREG) {
2070 2070                  ASSERT(vattr.va_mask & AT_MODE);
2071 2071                  if (MANDMODE(vattr.va_mode)) {
2072 2072                          nfs_rw_exit(&drp->r_rwlock);
2073 2073                          return (EACCES);
2074 2074                  }
2075 2075          }
2076 2076  
2077 2077          dnlc_remove(dvp, nm);
2078 2078  
2079 2079          setdiropargs(&args.ca_da, nm, dvp);
2080 2080  
2081 2081          /*
2082 2082           * Decide what the group-id of the created file should be.
2083 2083           * Set it in attribute list as advisory...then do a setattr
2084 2084           * if the server didn't get it right the first time.
2085 2085           */
2086 2086          error = setdirgid(dvp, &vattr.va_gid, cr);
2087 2087          if (error) {
2088 2088                  nfs_rw_exit(&drp->r_rwlock);
2089 2089                  return (error);
2090 2090          }
2091 2091          vattr.va_mask |= AT_GID;
2092 2092  
2093 2093          /*
2094 2094           * This is a completely gross hack to make mknod
2095 2095           * work over the wire until we can wack the protocol
2096 2096           */
2097 2097  #define IFCHR           0020000         /* character special */
2098 2098  #define IFBLK           0060000         /* block special */
2099 2099  #define IFSOCK          0140000         /* socket */
2100 2100  
2101 2101          /*
2102 2102           * dev_t is uint_t in 5.x and short in 4.x. Both 4.x
2103 2103           * supports 8 bit majors. 5.x supports 14 bit majors. 5.x supports 18
2104 2104           * bits in the minor number where 4.x supports 8 bits.  If the 5.x
2105 2105           * minor/major numbers <= 8 bits long, compress the device
2106 2106           * number before sending it. Otherwise, the 4.x server will not
2107 2107           * create the device with the correct device number and nothing can be
2108 2108           * done about this.
2109 2109           */
2110 2110          if (vattr.va_type == VCHR || vattr.va_type == VBLK) {
2111 2111                  dev_t d = vattr.va_rdev;
2112 2112                  dev32_t dev32;
2113 2113  
2114 2114                  if (vattr.va_type == VCHR)
2115 2115                          vattr.va_mode |= IFCHR;
2116 2116                  else
2117 2117                          vattr.va_mode |= IFBLK;
2118 2118  
2119 2119                  (void) cmpldev(&dev32, d);
2120 2120                  if (dev32 & ~((SO4_MAXMAJ << L_BITSMINOR32) | SO4_MAXMIN))
2121 2121                          vattr.va_size = (u_offset_t)dev32;
2122 2122                  else
2123 2123                          vattr.va_size = (u_offset_t)nfsv2_cmpdev(d);
2124 2124  
2125 2125                  vattr.va_mask |= AT_MODE|AT_SIZE;
2126 2126          } else if (vattr.va_type == VFIFO) {
2127 2127                  vattr.va_mode |= IFCHR;         /* xtra kludge for namedpipe */
2128 2128                  vattr.va_size = (u_offset_t)NFS_FIFO_DEV;       /* blech */
2129 2129                  vattr.va_mask |= AT_MODE|AT_SIZE;
2130 2130          } else if (vattr.va_type == VSOCK) {
2131 2131                  vattr.va_mode |= IFSOCK;
2132 2132                  /*
2133 2133                   * To avoid triggering bugs in the servers set AT_SIZE
2134 2134                   * (all other RFS_CREATE calls set this).
2135 2135                   */
2136 2136                  vattr.va_size = 0;
2137 2137                  vattr.va_mask |= AT_MODE|AT_SIZE;
2138 2138          }
2139 2139  
2140 2140          args.ca_sa = &args.ca_sa_buf;
2141 2141          error = vattr_to_sattr(&vattr, args.ca_sa);
2142 2142          if (error) {
2143 2143                  /* req time field(s) overflow - return immediately */
2144 2144                  nfs_rw_exit(&drp->r_rwlock);
2145 2145                  return (error);
2146 2146          }
2147 2147  
2148 2148          douprintf = 1;
2149 2149  
2150 2150          t = gethrtime();
2151 2151  
2152 2152          error = rfs2call(VTOMI(dvp), RFS_CREATE,
2153 2153              xdr_creatargs, (caddr_t)&args,
2154 2154              xdr_diropres, (caddr_t)&dr, cr,
2155 2155              &douprintf, &dr.dr_status, 0, NULL);
2156 2156  
2157 2157          PURGE_ATTRCACHE(dvp);   /* mod time changed */
2158 2158  
2159 2159          if (!error) {
2160 2160                  error = geterrno(dr.dr_status);
2161 2161                  if (!error) {
2162 2162                          if (HAVE_RDDIR_CACHE(drp))
2163 2163                                  nfs_purge_rddir_cache(dvp);
2164 2164                          vp = makenfsnode(&dr.dr_fhandle, &dr.dr_attr,
2165 2165                              dvp->v_vfsp, t, cr, NULL, NULL);
2166 2166                          /*
2167 2167                           * If NFS_ACL is supported on the server, then the
2168 2168                           * attributes returned by server may have minimal
2169 2169                           * permissions sometimes denying access to users having
2170 2170                           * proper access.  To get the proper attributes, mark
2171 2171                           * the attributes as expired so that they will be
2172 2172                           * regotten via the NFS_ACL GETATTR2 procedure.
2173 2173                           */
2174 2174                          if (VTOMI(vp)->mi_flags & MI_ACL) {
2175 2175                                  PURGE_ATTRCACHE(vp);
2176 2176                          }
2177 2177                          dnlc_update(dvp, nm, vp);
2178 2178                          rp = VTOR(vp);
2179 2179                          if (vattr.va_size == 0) {
2180 2180                                  mutex_enter(&rp->r_statelock);
2181 2181                                  rp->r_size = 0;
2182 2182                                  mutex_exit(&rp->r_statelock);
2183 2183                                  if (vn_has_cached_data(vp)) {
2184 2184                                          ASSERT(vp->v_type != VCHR);
2185 2185                                          nfs_invalidate_pages(vp,
2186 2186                                              (u_offset_t)0, cr);
2187 2187                                  }
2188 2188                          }
2189 2189  
2190 2190                          /*
2191 2191                           * Make sure the gid was set correctly.
2192 2192                           * If not, try to set it (but don't lose
2193 2193                           * any sleep over it).
2194 2194                           */
2195 2195                          if (vattr.va_gid != rp->r_attr.va_gid) {
2196 2196                                  vattr.va_mask = AT_GID;
2197 2197                                  (void) nfssetattr(vp, &vattr, 0, cr);
2198 2198                          }
2199 2199  
2200 2200                          /*
2201 2201                           * If vnode is a device create special vnode
2202 2202                           */
2203 2203                          if (IS_DEVVP(vp)) {
2204 2204                                  *vpp = specvp(vp, vp->v_rdev, vp->v_type, cr);
2205 2205                                  VN_RELE(vp);
2206 2206                          } else
2207 2207                                  *vpp = vp;
2208 2208                  } else {
2209 2209                          PURGE_STALE_FH(error, dvp, cr);
2210 2210                  }
2211 2211          }
2212 2212  
2213 2213          nfs_rw_exit(&drp->r_rwlock);
2214 2214  
2215 2215          return (error);
2216 2216  }
2217 2217  
2218 2218  /*
2219 2219   * Weirdness: if the vnode to be removed is open
2220 2220   * we rename it instead of removing it and nfs_inactive
2221 2221   * will remove the new name.
2222 2222   */
2223 2223  /* ARGSUSED */
2224 2224  static int
2225 2225  nfs_remove(vnode_t *dvp, char *nm, cred_t *cr, caller_context_t *ct, int flags)
2226 2226  {
2227 2227          int error;
2228 2228          struct nfsdiropargs da;
2229 2229          enum nfsstat status;
2230 2230          vnode_t *vp;
2231 2231          char *tmpname;
2232 2232          int douprintf;
2233 2233          rnode_t *rp;
2234 2234          rnode_t *drp;
2235 2235  
2236 2236          if (nfs_zone() != VTOMI(dvp)->mi_zone)
2237 2237                  return (EPERM);
2238 2238          drp = VTOR(dvp);
2239 2239          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
2240 2240                  return (EINTR);
2241 2241  
2242 2242          error = nfslookup(dvp, nm, &vp, NULL, 0, NULL, cr, 0);
2243 2243          if (error) {
2244 2244                  nfs_rw_exit(&drp->r_rwlock);
2245 2245                  return (error);
2246 2246          }
2247 2247  
2248 2248          if (vp->v_type == VDIR && secpolicy_fs_linkdir(cr, dvp->v_vfsp)) {
2249 2249                  VN_RELE(vp);
2250 2250                  nfs_rw_exit(&drp->r_rwlock);
2251 2251                  return (EPERM);
2252 2252          }
2253 2253  
2254 2254          /*
2255 2255           * First just remove the entry from the name cache, as it
2256 2256           * is most likely the only entry for this vp.
2257 2257           */
2258 2258          dnlc_remove(dvp, nm);
2259 2259  
2260 2260          /*
2261 2261           * If the file has a v_count > 1 then there may be more than one
2262 2262           * entry in the name cache due multiple links or an open file,
2263 2263           * but we don't have the real reference count so flush all
2264 2264           * possible entries.
2265 2265           */
2266 2266          if (vp->v_count > 1)
2267 2267                  dnlc_purge_vp(vp);
2268 2268  
2269 2269          /*
2270 2270           * Now we have the real reference count on the vnode
2271 2271           */
2272 2272          rp = VTOR(vp);
2273 2273          mutex_enter(&rp->r_statelock);
2274 2274          if (vp->v_count > 1 &&
2275 2275              (rp->r_unldvp == NULL || strcmp(nm, rp->r_unlname) == 0)) {
2276 2276                  mutex_exit(&rp->r_statelock);
2277 2277                  tmpname = newname();
2278 2278                  error = nfsrename(dvp, nm, dvp, tmpname, cr, ct);
2279 2279                  if (error)
2280 2280                          kmem_free(tmpname, MAXNAMELEN);
2281 2281                  else {
2282 2282                          mutex_enter(&rp->r_statelock);
2283 2283                          if (rp->r_unldvp == NULL) {
2284 2284                                  VN_HOLD(dvp);
2285 2285                                  rp->r_unldvp = dvp;
2286 2286                                  if (rp->r_unlcred != NULL)
2287 2287                                          crfree(rp->r_unlcred);
2288 2288                                  crhold(cr);
2289 2289                                  rp->r_unlcred = cr;
2290 2290                                  rp->r_unlname = tmpname;
2291 2291                          } else {
2292 2292                                  kmem_free(rp->r_unlname, MAXNAMELEN);
2293 2293                                  rp->r_unlname = tmpname;
2294 2294                          }
2295 2295                          mutex_exit(&rp->r_statelock);
2296 2296                  }
2297 2297          } else {
2298 2298                  mutex_exit(&rp->r_statelock);
2299 2299                  /*
2300 2300                   * We need to flush any dirty pages which happen to
2301 2301                   * be hanging around before removing the file.  This
2302 2302                   * shouldn't happen very often and mostly on file
2303 2303                   * systems mounted "nocto".
2304 2304                   */
2305 2305                  if (vn_has_cached_data(vp) &&
2306 2306                      ((rp->r_flags & RDIRTY) || rp->r_count > 0)) {
2307 2307                          error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
2308 2308                          if (error && (error == ENOSPC || error == EDQUOT)) {
2309 2309                                  mutex_enter(&rp->r_statelock);
2310 2310                                  if (!rp->r_error)
2311 2311                                          rp->r_error = error;
2312 2312                                  mutex_exit(&rp->r_statelock);
2313 2313                          }
2314 2314                  }
2315 2315  
2316 2316                  setdiropargs(&da, nm, dvp);
2317 2317  
2318 2318                  douprintf = 1;
2319 2319  
2320 2320                  error = rfs2call(VTOMI(dvp), RFS_REMOVE,
2321 2321                      xdr_diropargs, (caddr_t)&da,
2322 2322                      xdr_enum, (caddr_t)&status, cr,
2323 2323                      &douprintf, &status, 0, NULL);
2324 2324  
2325 2325                  /*
2326 2326                   * The xattr dir may be gone after last attr is removed,
2327 2327                   * so flush it from dnlc.
2328 2328                   */
2329 2329                  if (dvp->v_flag & V_XATTRDIR)
2330 2330                          dnlc_purge_vp(dvp);
2331 2331  
2332 2332                  PURGE_ATTRCACHE(dvp);   /* mod time changed */
2333 2333                  PURGE_ATTRCACHE(vp);    /* link count changed */
2334 2334  
2335 2335                  if (!error) {
2336 2336                          error = geterrno(status);
2337 2337                          if (!error) {
2338 2338                                  if (HAVE_RDDIR_CACHE(drp))
2339 2339                                          nfs_purge_rddir_cache(dvp);
2340 2340                          } else {
2341 2341                                  PURGE_STALE_FH(error, dvp, cr);
2342 2342                          }
2343 2343                  }
2344 2344          }
2345 2345  
2346 2346          if (error == 0) {
2347 2347                  vnevent_remove(vp, dvp, nm, ct);
2348 2348          }
2349 2349          VN_RELE(vp);
2350 2350  
2351 2351          nfs_rw_exit(&drp->r_rwlock);
2352 2352  
2353 2353          return (error);
2354 2354  }
2355 2355  
2356 2356  /* ARGSUSED */
2357 2357  static int
2358 2358  nfs_link(vnode_t *tdvp, vnode_t *svp, char *tnm, cred_t *cr,
2359 2359          caller_context_t *ct, int flags)
2360 2360  {
2361 2361          int error;
2362 2362          struct nfslinkargs args;
2363 2363          enum nfsstat status;
2364 2364          vnode_t *realvp;
2365 2365          int douprintf;
2366 2366          rnode_t *tdrp;
2367 2367  
2368 2368          if (nfs_zone() != VTOMI(tdvp)->mi_zone)
2369 2369                  return (EPERM);
2370 2370          if (VOP_REALVP(svp, &realvp, ct) == 0)
2371 2371                  svp = realvp;
2372 2372  
2373 2373          args.la_from = VTOFH(svp);
2374 2374          setdiropargs(&args.la_to, tnm, tdvp);
2375 2375  
2376 2376          tdrp = VTOR(tdvp);
2377 2377          if (nfs_rw_enter_sig(&tdrp->r_rwlock, RW_WRITER, INTR(tdvp)))
2378 2378                  return (EINTR);
2379 2379  
2380 2380          dnlc_remove(tdvp, tnm);
2381 2381  
2382 2382          douprintf = 1;
2383 2383  
2384 2384          error = rfs2call(VTOMI(svp), RFS_LINK,
2385 2385              xdr_linkargs, (caddr_t)&args,
2386 2386              xdr_enum, (caddr_t)&status, cr,
2387 2387              &douprintf, &status, 0, NULL);
2388 2388  
2389 2389          PURGE_ATTRCACHE(tdvp);  /* mod time changed */
2390 2390          PURGE_ATTRCACHE(svp);   /* link count changed */
2391 2391  
2392 2392          if (!error) {
2393 2393                  error = geterrno(status);
2394 2394                  if (!error) {
2395 2395                          if (HAVE_RDDIR_CACHE(tdrp))
2396 2396                                  nfs_purge_rddir_cache(tdvp);
2397 2397                  }
2398 2398          }
2399 2399  
2400 2400          nfs_rw_exit(&tdrp->r_rwlock);
2401 2401  
2402 2402          if (!error) {
2403 2403                  /*
2404 2404                   * Notify the source file of this link operation.
2405 2405                   */
2406 2406                  vnevent_link(svp, ct);
2407 2407          }
2408 2408          return (error);
2409 2409  }
2410 2410  
2411 2411  /* ARGSUSED */
2412 2412  static int
2413 2413  nfs_rename(vnode_t *odvp, char *onm, vnode_t *ndvp, char *nnm, cred_t *cr,
2414 2414          caller_context_t *ct, int flags)
2415 2415  {
2416 2416          vnode_t *realvp;
2417 2417  
2418 2418          if (nfs_zone() != VTOMI(odvp)->mi_zone)
2419 2419                  return (EPERM);
2420 2420          if (VOP_REALVP(ndvp, &realvp, ct) == 0)
2421 2421                  ndvp = realvp;
2422 2422  
2423 2423          return (nfsrename(odvp, onm, ndvp, nnm, cr, ct));
2424 2424  }
2425 2425  
2426 2426  /*
2427 2427   * nfsrename does the real work of renaming in NFS Version 2.
2428 2428   */
2429 2429  static int
2430 2430  nfsrename(vnode_t *odvp, char *onm, vnode_t *ndvp, char *nnm, cred_t *cr,
2431 2431      caller_context_t *ct)
2432 2432  {
2433 2433          int error;
2434 2434          enum nfsstat status;
2435 2435          struct nfsrnmargs args;
2436 2436          int douprintf;
2437 2437          vnode_t *nvp = NULL;
2438 2438          vnode_t *ovp = NULL;
2439 2439          char *tmpname;
2440 2440          rnode_t *rp;
2441 2441          rnode_t *odrp;
2442 2442          rnode_t *ndrp;
2443 2443  
2444 2444          ASSERT(nfs_zone() == VTOMI(odvp)->mi_zone);
2445 2445          if (strcmp(onm, ".") == 0 || strcmp(onm, "..") == 0 ||
2446 2446              strcmp(nnm, ".") == 0 || strcmp(nnm, "..") == 0)
2447 2447                  return (EINVAL);
2448 2448  
2449 2449          odrp = VTOR(odvp);
2450 2450          ndrp = VTOR(ndvp);
2451 2451          if ((intptr_t)odrp < (intptr_t)ndrp) {
2452 2452                  if (nfs_rw_enter_sig(&odrp->r_rwlock, RW_WRITER, INTR(odvp)))
2453 2453                          return (EINTR);
2454 2454                  if (nfs_rw_enter_sig(&ndrp->r_rwlock, RW_WRITER, INTR(ndvp))) {
2455 2455                          nfs_rw_exit(&odrp->r_rwlock);
2456 2456                          return (EINTR);
2457 2457                  }
2458 2458          } else {
2459 2459                  if (nfs_rw_enter_sig(&ndrp->r_rwlock, RW_WRITER, INTR(ndvp)))
2460 2460                          return (EINTR);
2461 2461                  if (nfs_rw_enter_sig(&odrp->r_rwlock, RW_WRITER, INTR(odvp))) {
2462 2462                          nfs_rw_exit(&ndrp->r_rwlock);
2463 2463                          return (EINTR);
2464 2464                  }
2465 2465          }
2466 2466  
2467 2467          /*
2468 2468           * Lookup the target file.  If it exists, it needs to be
2469 2469           * checked to see whether it is a mount point and whether
2470 2470           * it is active (open).
2471 2471           */
2472 2472          error = nfslookup(ndvp, nnm, &nvp, NULL, 0, NULL, cr, 0);
2473 2473          if (!error) {
2474 2474                  /*
2475 2475                   * If this file has been mounted on, then just
2476 2476                   * return busy because renaming to it would remove
2477 2477                   * the mounted file system from the name space.
2478 2478                   */
2479 2479                  if (vn_mountedvfs(nvp) != NULL) {
2480 2480                          VN_RELE(nvp);
2481 2481                          nfs_rw_exit(&odrp->r_rwlock);
2482 2482                          nfs_rw_exit(&ndrp->r_rwlock);
2483 2483                          return (EBUSY);
2484 2484                  }
2485 2485  
2486 2486                  /*
2487 2487                   * Purge the name cache of all references to this vnode
2488 2488                   * so that we can check the reference count to infer
2489 2489                   * whether it is active or not.
2490 2490                   */
2491 2491                  /*
2492 2492                   * First just remove the entry from the name cache, as it
2493 2493                   * is most likely the only entry for this vp.
2494 2494                   */
2495 2495                  dnlc_remove(ndvp, nnm);
2496 2496                  /*
2497 2497                   * If the file has a v_count > 1 then there may be more
2498 2498                   * than one entry in the name cache due multiple links
2499 2499                   * or an open file, but we don't have the real reference
2500 2500                   * count so flush all possible entries.
2501 2501                   */
2502 2502                  if (nvp->v_count > 1)
2503 2503                          dnlc_purge_vp(nvp);
2504 2504  
2505 2505                  /*
2506 2506                   * If the vnode is active and is not a directory,
2507 2507                   * arrange to rename it to a
2508 2508                   * temporary file so that it will continue to be
2509 2509                   * accessible.  This implements the "unlink-open-file"
2510 2510                   * semantics for the target of a rename operation.
2511 2511                   * Before doing this though, make sure that the
2512 2512                   * source and target files are not already the same.
2513 2513                   */
2514 2514                  if (nvp->v_count > 1 && nvp->v_type != VDIR) {
2515 2515                          /*
2516 2516                           * Lookup the source name.
2517 2517                           */
2518 2518                          error = nfslookup(odvp, onm, &ovp, NULL, 0, NULL,
2519 2519                              cr, 0);
2520 2520  
2521 2521                          /*
2522 2522                           * The source name *should* already exist.
2523 2523                           */
2524 2524                          if (error) {
2525 2525                                  VN_RELE(nvp);
2526 2526                                  nfs_rw_exit(&odrp->r_rwlock);
2527 2527                                  nfs_rw_exit(&ndrp->r_rwlock);
2528 2528                                  return (error);
2529 2529                          }
2530 2530  
2531 2531                          /*
2532 2532                           * Compare the two vnodes.  If they are the same,
2533 2533                           * just release all held vnodes and return success.
2534 2534                           */
2535 2535                          if (ovp == nvp) {
2536 2536                                  VN_RELE(ovp);
2537 2537                                  VN_RELE(nvp);
2538 2538                                  nfs_rw_exit(&odrp->r_rwlock);
2539 2539                                  nfs_rw_exit(&ndrp->r_rwlock);
2540 2540                                  return (0);
2541 2541                          }
2542 2542  
2543 2543                          /*
2544 2544                           * Can't mix and match directories and non-
2545 2545                           * directories in rename operations.  We already
2546 2546                           * know that the target is not a directory.  If
2547 2547                           * the source is a directory, return an error.
2548 2548                           */
2549 2549                          if (ovp->v_type == VDIR) {
2550 2550                                  VN_RELE(ovp);
2551 2551                                  VN_RELE(nvp);
2552 2552                                  nfs_rw_exit(&odrp->r_rwlock);
2553 2553                                  nfs_rw_exit(&ndrp->r_rwlock);
2554 2554                                  return (ENOTDIR);
2555 2555                          }
2556 2556  
2557 2557                          /*
2558 2558                           * The target file exists, is not the same as
2559 2559                           * the source file, and is active.  Link it
2560 2560                           * to a temporary filename to avoid having
2561 2561                           * the server removing the file completely.
2562 2562                           */
2563 2563                          tmpname = newname();
2564 2564                          error = nfs_link(ndvp, nvp, tmpname, cr, NULL, 0);
2565 2565                          if (error == EOPNOTSUPP) {
2566 2566                                  error = nfs_rename(ndvp, nnm, ndvp, tmpname,
2567 2567                                      cr, NULL, 0);
2568 2568                          }
2569 2569                          if (error) {
2570 2570                                  kmem_free(tmpname, MAXNAMELEN);
2571 2571                                  VN_RELE(ovp);
2572 2572                                  VN_RELE(nvp);
2573 2573                                  nfs_rw_exit(&odrp->r_rwlock);
2574 2574                                  nfs_rw_exit(&ndrp->r_rwlock);
2575 2575                                  return (error);
2576 2576                          }
2577 2577                          rp = VTOR(nvp);
2578 2578                          mutex_enter(&rp->r_statelock);
2579 2579                          if (rp->r_unldvp == NULL) {
2580 2580                                  VN_HOLD(ndvp);
2581 2581                                  rp->r_unldvp = ndvp;
2582 2582                                  if (rp->r_unlcred != NULL)
2583 2583                                          crfree(rp->r_unlcred);
2584 2584                                  crhold(cr);
2585 2585                                  rp->r_unlcred = cr;
2586 2586                                  rp->r_unlname = tmpname;
2587 2587                          } else {
2588 2588                                  kmem_free(rp->r_unlname, MAXNAMELEN);
2589 2589                                  rp->r_unlname = tmpname;
2590 2590                          }
2591 2591                          mutex_exit(&rp->r_statelock);
2592 2592                  }
2593 2593          }
2594 2594  
2595 2595          if (ovp == NULL) {
2596 2596                  /*
2597 2597                   * When renaming directories to be a subdirectory of a
2598 2598                   * different parent, the dnlc entry for ".." will no
2599 2599                   * longer be valid, so it must be removed.
2600 2600                   *
2601 2601                   * We do a lookup here to determine whether we are renaming
2602 2602                   * a directory and we need to check if we are renaming
2603 2603                   * an unlinked file.  This might have already been done
2604 2604                   * in previous code, so we check ovp == NULL to avoid
2605 2605                   * doing it twice.
2606 2606                   */
2607 2607  
2608 2608                  error = nfslookup(odvp, onm, &ovp, NULL, 0, NULL, cr, 0);
2609 2609  
2610 2610                  /*
2611 2611                   * The source name *should* already exist.
2612 2612                   */
2613 2613                  if (error) {
2614 2614                          nfs_rw_exit(&odrp->r_rwlock);
2615 2615                          nfs_rw_exit(&ndrp->r_rwlock);
2616 2616                          if (nvp) {
2617 2617                                  VN_RELE(nvp);
2618 2618                          }
2619 2619                          return (error);
2620 2620                  }
2621 2621                  ASSERT(ovp != NULL);
2622 2622          }
2623 2623  
2624 2624          dnlc_remove(odvp, onm);
2625 2625          dnlc_remove(ndvp, nnm);
2626 2626  
2627 2627          setdiropargs(&args.rna_from, onm, odvp);
2628 2628          setdiropargs(&args.rna_to, nnm, ndvp);
2629 2629  
2630 2630          douprintf = 1;
2631 2631  
2632 2632          error = rfs2call(VTOMI(odvp), RFS_RENAME,
2633 2633              xdr_rnmargs, (caddr_t)&args,
2634 2634              xdr_enum, (caddr_t)&status, cr,
2635 2635              &douprintf, &status, 0, NULL);
2636 2636  
2637 2637          PURGE_ATTRCACHE(odvp);  /* mod time changed */
2638 2638          PURGE_ATTRCACHE(ndvp);  /* mod time changed */
2639 2639  
2640 2640          if (!error) {
2641 2641                  error = geterrno(status);
2642 2642                  if (!error) {
2643 2643                          if (HAVE_RDDIR_CACHE(odrp))
2644 2644                                  nfs_purge_rddir_cache(odvp);
2645 2645                          if (HAVE_RDDIR_CACHE(ndrp))
2646 2646                                  nfs_purge_rddir_cache(ndvp);
2647 2647                          /*
2648 2648                           * when renaming directories to be a subdirectory of a
2649 2649                           * different parent, the dnlc entry for ".." will no
2650 2650                           * longer be valid, so it must be removed
2651 2651                           */
2652 2652                          rp = VTOR(ovp);
2653 2653                          if (ndvp != odvp) {
2654 2654                                  if (ovp->v_type == VDIR) {
2655 2655                                          dnlc_remove(ovp, "..");
2656 2656                                          if (HAVE_RDDIR_CACHE(rp))
2657 2657                                                  nfs_purge_rddir_cache(ovp);
2658 2658                                  }
2659 2659                          }
2660 2660  
2661 2661                          /*
2662 2662                           * If we are renaming the unlinked file, update the
2663 2663                           * r_unldvp and r_unlname as needed.
2664 2664                           */
2665 2665                          mutex_enter(&rp->r_statelock);
2666 2666                          if (rp->r_unldvp != NULL) {
2667 2667                                  if (strcmp(rp->r_unlname, onm) == 0) {
2668 2668                                          (void) strncpy(rp->r_unlname,
2669 2669                                              nnm, MAXNAMELEN);
2670 2670                                          rp->r_unlname[MAXNAMELEN - 1] = '\0';
2671 2671  
2672 2672                                          if (ndvp != rp->r_unldvp) {
2673 2673                                                  VN_RELE(rp->r_unldvp);
2674 2674                                                  rp->r_unldvp = ndvp;
2675 2675                                                  VN_HOLD(ndvp);
2676 2676                                          }
2677 2677                                  }
2678 2678                          }
2679 2679                          mutex_exit(&rp->r_statelock);
2680 2680                  } else {
2681 2681                          /*
2682 2682                           * System V defines rename to return EEXIST, not
2683 2683                           * ENOTEMPTY if the target directory is not empty.
2684 2684                           * Over the wire, the error is NFSERR_ENOTEMPTY
2685 2685                           * which geterrno maps to ENOTEMPTY.
2686 2686                           */
2687 2687                          if (error == ENOTEMPTY)
2688 2688                                  error = EEXIST;
2689 2689                  }
2690 2690          }
2691 2691  
2692 2692          if (error == 0) {
2693 2693                  if (nvp)
2694 2694                          vnevent_rename_dest(nvp, ndvp, nnm, ct);
2695 2695  
2696 2696                  ASSERT(ovp != NULL);
2697 2697                  vnevent_rename_src(ovp, odvp, onm, ct);
2698 2698                  vnevent_rename_dest_dir(ndvp, ovp, nnm, ct);
2699 2699          }
2700 2700  
2701 2701          if (nvp) {
2702 2702                  VN_RELE(nvp);
2703 2703          }
2704 2704          VN_RELE(ovp);
2705 2705  
2706 2706          nfs_rw_exit(&odrp->r_rwlock);
2707 2707          nfs_rw_exit(&ndrp->r_rwlock);
2708 2708  
2709 2709          return (error);
2710 2710  }
2711 2711  
2712 2712  /* ARGSUSED */
2713 2713  static int
2714 2714  nfs_mkdir(vnode_t *dvp, char *nm, struct vattr *va, vnode_t **vpp, cred_t *cr,
2715 2715          caller_context_t *ct, int flags, vsecattr_t *vsecp)
2716 2716  {
2717 2717          int error;
2718 2718          struct nfscreatargs args;
2719 2719          struct nfsdiropres dr;
2720 2720          int douprintf;
2721 2721          rnode_t *drp;
2722 2722          hrtime_t t;
2723 2723  
2724 2724          if (nfs_zone() != VTOMI(dvp)->mi_zone)
2725 2725                  return (EPERM);
2726 2726  
2727 2727          setdiropargs(&args.ca_da, nm, dvp);
2728 2728  
2729 2729          /*
2730 2730           * Decide what the group-id and set-gid bit of the created directory
2731 2731           * should be.  May have to do a setattr to get the gid right.
2732 2732           */
2733 2733          error = setdirgid(dvp, &va->va_gid, cr);
2734 2734          if (error)
2735 2735                  return (error);
2736 2736          error = setdirmode(dvp, &va->va_mode, cr);
2737 2737          if (error)
2738 2738                  return (error);
2739 2739          va->va_mask |= AT_MODE|AT_GID;
2740 2740  
2741 2741          args.ca_sa = &args.ca_sa_buf;
2742 2742          error = vattr_to_sattr(va, args.ca_sa);
2743 2743          if (error) {
2744 2744                  /* req time field(s) overflow - return immediately */
2745 2745                  return (error);
2746 2746          }
2747 2747  
2748 2748          drp = VTOR(dvp);
2749 2749          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
2750 2750                  return (EINTR);
2751 2751  
2752 2752          dnlc_remove(dvp, nm);
2753 2753  
2754 2754          douprintf = 1;
2755 2755  
2756 2756          t = gethrtime();
2757 2757  
2758 2758          error = rfs2call(VTOMI(dvp), RFS_MKDIR,
2759 2759              xdr_creatargs, (caddr_t)&args,
2760 2760              xdr_diropres, (caddr_t)&dr, cr,
2761 2761              &douprintf, &dr.dr_status, 0, NULL);
2762 2762  
2763 2763          PURGE_ATTRCACHE(dvp);   /* mod time changed */
2764 2764  
2765 2765          if (!error) {
2766 2766                  error = geterrno(dr.dr_status);
2767 2767                  if (!error) {
2768 2768                          if (HAVE_RDDIR_CACHE(drp))
2769 2769                                  nfs_purge_rddir_cache(dvp);
2770 2770                          /*
2771 2771                           * The attributes returned by RFS_MKDIR can not
2772 2772                           * be depended upon, so mark the attribute cache
2773 2773                           * as purged.  A subsequent GETATTR will get the
2774 2774                           * correct attributes from the server.
2775 2775                           */
2776 2776                          *vpp = makenfsnode(&dr.dr_fhandle, &dr.dr_attr,
2777 2777                              dvp->v_vfsp, t, cr, NULL, NULL);
2778 2778                          PURGE_ATTRCACHE(*vpp);
2779 2779                          dnlc_update(dvp, nm, *vpp);
2780 2780  
2781 2781                          /*
2782 2782                           * Make sure the gid was set correctly.
2783 2783                           * If not, try to set it (but don't lose
2784 2784                           * any sleep over it).
2785 2785                           */
2786 2786                          if (va->va_gid != VTOR(*vpp)->r_attr.va_gid) {
2787 2787                                  va->va_mask = AT_GID;
2788 2788                                  (void) nfssetattr(*vpp, va, 0, cr);
2789 2789                          }
2790 2790                  } else {
2791 2791                          PURGE_STALE_FH(error, dvp, cr);
2792 2792                  }
2793 2793          }
2794 2794  
2795 2795          nfs_rw_exit(&drp->r_rwlock);
2796 2796  
2797 2797          return (error);
2798 2798  }
2799 2799  
2800 2800  /* ARGSUSED */
2801 2801  static int
2802 2802  nfs_rmdir(vnode_t *dvp, char *nm, vnode_t *cdir, cred_t *cr,
2803 2803          caller_context_t *ct, int flags)
2804 2804  {
2805 2805          int error;
2806 2806          enum nfsstat status;
2807 2807          struct nfsdiropargs da;
2808 2808          vnode_t *vp;
2809 2809          int douprintf;
2810 2810          rnode_t *drp;
2811 2811  
2812 2812          if (nfs_zone() != VTOMI(dvp)->mi_zone)
2813 2813                  return (EPERM);
2814 2814          drp = VTOR(dvp);
2815 2815          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
2816 2816                  return (EINTR);
2817 2817  
2818 2818          /*
2819 2819           * Attempt to prevent a rmdir(".") from succeeding.
2820 2820           */
2821 2821          error = nfslookup(dvp, nm, &vp, NULL, 0, NULL, cr, 0);
2822 2822          if (error) {
2823 2823                  nfs_rw_exit(&drp->r_rwlock);
2824 2824                  return (error);
2825 2825          }
2826 2826  
2827 2827          if (vp == cdir) {
2828 2828                  VN_RELE(vp);
2829 2829                  nfs_rw_exit(&drp->r_rwlock);
2830 2830                  return (EINVAL);
2831 2831          }
2832 2832  
2833 2833          setdiropargs(&da, nm, dvp);
2834 2834  
2835 2835          /*
2836 2836           * First just remove the entry from the name cache, as it
2837 2837           * is most likely an entry for this vp.
2838 2838           */
2839 2839          dnlc_remove(dvp, nm);
2840 2840  
2841 2841          /*
2842 2842           * If there vnode reference count is greater than one, then
2843 2843           * there may be additional references in the DNLC which will
2844 2844           * need to be purged.  First, trying removing the entry for
2845 2845           * the parent directory and see if that removes the additional
2846 2846           * reference(s).  If that doesn't do it, then use dnlc_purge_vp
2847 2847           * to completely remove any references to the directory which
2848 2848           * might still exist in the DNLC.
2849 2849           */
2850 2850          if (vp->v_count > 1) {
2851 2851                  dnlc_remove(vp, "..");
2852 2852                  if (vp->v_count > 1)
2853 2853                          dnlc_purge_vp(vp);
2854 2854          }
2855 2855  
2856 2856          douprintf = 1;
2857 2857  
2858 2858          error = rfs2call(VTOMI(dvp), RFS_RMDIR,
2859 2859              xdr_diropargs, (caddr_t)&da,
2860 2860              xdr_enum, (caddr_t)&status, cr,
2861 2861              &douprintf, &status, 0, NULL);
2862 2862  
2863 2863          PURGE_ATTRCACHE(dvp);   /* mod time changed */
2864 2864  
2865 2865          if (error) {
2866 2866                  VN_RELE(vp);
2867 2867                  nfs_rw_exit(&drp->r_rwlock);
2868 2868                  return (error);
2869 2869          }
2870 2870  
2871 2871          error = geterrno(status);
2872 2872          if (!error) {
2873 2873                  if (HAVE_RDDIR_CACHE(drp))
2874 2874                          nfs_purge_rddir_cache(dvp);
2875 2875                  if (HAVE_RDDIR_CACHE(VTOR(vp)))
2876 2876                          nfs_purge_rddir_cache(vp);
2877 2877          } else {
2878 2878                  PURGE_STALE_FH(error, dvp, cr);
2879 2879                  /*
2880 2880                   * System V defines rmdir to return EEXIST, not
2881 2881                   * ENOTEMPTY if the directory is not empty.  Over
2882 2882                   * the wire, the error is NFSERR_ENOTEMPTY which
2883 2883                   * geterrno maps to ENOTEMPTY.
2884 2884                   */
2885 2885                  if (error == ENOTEMPTY)
2886 2886                          error = EEXIST;
2887 2887          }
2888 2888  
2889 2889          if (error == 0) {
2890 2890                  vnevent_rmdir(vp, dvp, nm, ct);
2891 2891          }
2892 2892          VN_RELE(vp);
2893 2893  
2894 2894          nfs_rw_exit(&drp->r_rwlock);
2895 2895  
2896 2896          return (error);
2897 2897  }
2898 2898  
2899 2899  /* ARGSUSED */
2900 2900  static int
2901 2901  nfs_symlink(vnode_t *dvp, char *lnm, struct vattr *tva, char *tnm, cred_t *cr,
2902 2902          caller_context_t *ct, int flags)
2903 2903  {
2904 2904          int error;
2905 2905          struct nfsslargs args;
2906 2906          enum nfsstat status;
2907 2907          int douprintf;
2908 2908          rnode_t *drp;
2909 2909  
2910 2910          if (nfs_zone() != VTOMI(dvp)->mi_zone)
2911 2911                  return (EPERM);
2912 2912          setdiropargs(&args.sla_from, lnm, dvp);
2913 2913          args.sla_sa = &args.sla_sa_buf;
2914 2914          error = vattr_to_sattr(tva, args.sla_sa);
2915 2915          if (error) {
2916 2916                  /* req time field(s) overflow - return immediately */
2917 2917                  return (error);
2918 2918          }
2919 2919          args.sla_tnm = tnm;
2920 2920  
2921 2921          drp = VTOR(dvp);
2922 2922          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
2923 2923                  return (EINTR);
2924 2924  
2925 2925          dnlc_remove(dvp, lnm);
2926 2926  
2927 2927          douprintf = 1;
2928 2928  
2929 2929          error = rfs2call(VTOMI(dvp), RFS_SYMLINK,
2930 2930              xdr_slargs, (caddr_t)&args,
2931 2931              xdr_enum, (caddr_t)&status, cr,
2932 2932              &douprintf, &status, 0, NULL);
2933 2933  
2934 2934          PURGE_ATTRCACHE(dvp);   /* mod time changed */
2935 2935  
2936 2936          if (!error) {
2937 2937                  error = geterrno(status);
2938 2938                  if (!error) {
2939 2939                          if (HAVE_RDDIR_CACHE(drp))
2940 2940                                  nfs_purge_rddir_cache(dvp);
2941 2941                  } else {
2942 2942                          PURGE_STALE_FH(error, dvp, cr);
2943 2943                  }
2944 2944          }
2945 2945  
2946 2946          nfs_rw_exit(&drp->r_rwlock);
2947 2947  
2948 2948          return (error);
2949 2949  }
2950 2950  
2951 2951  #ifdef DEBUG
2952 2952  static int nfs_readdir_cache_hits = 0;
2953 2953  static int nfs_readdir_cache_shorts = 0;
2954 2954  static int nfs_readdir_cache_waits = 0;
2955 2955  static int nfs_readdir_cache_misses = 0;
2956 2956  static int nfs_readdir_readahead = 0;
2957 2957  #endif
2958 2958  
2959 2959  static int nfs_shrinkreaddir = 0;
2960 2960  
2961 2961  /*
2962 2962   * Read directory entries.
2963 2963   * There are some weird things to look out for here.  The uio_offset
2964 2964   * field is either 0 or it is the offset returned from a previous
2965 2965   * readdir.  It is an opaque value used by the server to find the
2966 2966   * correct directory block to read. The count field is the number
2967 2967   * of blocks to read on the server.  This is advisory only, the server
2968 2968   * may return only one block's worth of entries.  Entries may be compressed
2969 2969   * on the server.
2970 2970   */
2971 2971  /* ARGSUSED */
2972 2972  static int
2973 2973  nfs_readdir(vnode_t *vp, struct uio *uiop, cred_t *cr, int *eofp,
2974 2974          caller_context_t *ct, int flags)
2975 2975  {
2976 2976          int error;
2977 2977          size_t count;
2978 2978          rnode_t *rp;
2979 2979          rddir_cache *rdc;
2980 2980          rddir_cache *nrdc;
2981 2981          rddir_cache *rrdc;
2982 2982  #ifdef DEBUG
2983 2983          int missed;
2984 2984  #endif
2985 2985          rddir_cache srdc;
2986 2986          avl_index_t where;
2987 2987  
2988 2988          rp = VTOR(vp);
2989 2989  
2990 2990          ASSERT(nfs_rw_lock_held(&rp->r_rwlock, RW_READER));
2991 2991          if (nfs_zone() != VTOMI(vp)->mi_zone)
2992 2992                  return (EIO);
2993 2993          /*
2994 2994           * Make sure that the directory cache is valid.
2995 2995           */
2996 2996          if (HAVE_RDDIR_CACHE(rp)) {
2997 2997                  if (nfs_disable_rddir_cache) {
2998 2998                          /*
2999 2999                           * Setting nfs_disable_rddir_cache in /etc/system
3000 3000                           * allows interoperability with servers that do not
3001 3001                           * properly update the attributes of directories.
3002 3002                           * Any cached information gets purged before an
3003 3003                           * access is made to it.
3004 3004                           */
3005 3005                          nfs_purge_rddir_cache(vp);
3006 3006                  } else {
3007 3007                          error = nfs_validate_caches(vp, cr);
3008 3008                          if (error)
3009 3009                                  return (error);
3010 3010                  }
3011 3011          }
3012 3012  
3013 3013          /*
3014 3014           * UGLINESS: SunOS 3.2 servers apparently cannot always handle an
3015 3015           * RFS_READDIR request with rda_count set to more than 0x400. So
3016 3016           * we reduce the request size here purely for compatibility.
3017 3017           *
3018 3018           * In general, this is no longer required.  However, if a server
3019 3019           * is discovered which can not handle requests larger than 1024,
3020 3020           * nfs_shrinkreaddir can be set to 1 to enable this backwards
3021 3021           * compatibility.
3022 3022           *
3023 3023           * In any case, the request size is limited to NFS_MAXDATA bytes.
3024 3024           */
3025 3025          count = MIN(uiop->uio_iov->iov_len,
3026 3026              nfs_shrinkreaddir ? 0x400 : NFS_MAXDATA);
3027 3027  
3028 3028          nrdc = NULL;
3029 3029  #ifdef DEBUG
3030 3030          missed = 0;
3031 3031  #endif
3032 3032  top:
3033 3033          /*
3034 3034           * Short circuit last readdir which always returns 0 bytes.
3035 3035           * This can be done after the directory has been read through
3036 3036           * completely at least once.  This will set r_direof which
3037 3037           * can be used to find the value of the last cookie.
3038 3038           */
3039 3039          mutex_enter(&rp->r_statelock);
3040 3040          if (rp->r_direof != NULL &&
3041 3041              uiop->uio_offset == rp->r_direof->nfs_ncookie) {
3042 3042                  mutex_exit(&rp->r_statelock);
3043 3043  #ifdef DEBUG
3044 3044                  nfs_readdir_cache_shorts++;
3045 3045  #endif
3046 3046                  if (eofp)
3047 3047                          *eofp = 1;
3048 3048                  if (nrdc != NULL)
3049 3049                          rddir_cache_rele(nrdc);
3050 3050                  return (0);
3051 3051          }
3052 3052          /*
3053 3053           * Look for a cache entry.  Cache entries are identified
3054 3054           * by the NFS cookie value and the byte count requested.
3055 3055           */
3056 3056          srdc.nfs_cookie = uiop->uio_offset;
3057 3057          srdc.buflen = count;
3058 3058          rdc = avl_find(&rp->r_dir, &srdc, &where);
3059 3059          if (rdc != NULL) {
3060 3060                  rddir_cache_hold(rdc);
3061 3061                  /*
3062 3062                   * If the cache entry is in the process of being
3063 3063                   * filled in, wait until this completes.  The
3064 3064                   * RDDIRWAIT bit is set to indicate that someone
3065 3065                   * is waiting and then the thread currently
3066 3066                   * filling the entry is done, it should do a
3067 3067                   * cv_broadcast to wakeup all of the threads
3068 3068                   * waiting for it to finish.
3069 3069                   */
3070 3070                  if (rdc->flags & RDDIR) {
3071 3071                          nfs_rw_exit(&rp->r_rwlock);
3072 3072                          rdc->flags |= RDDIRWAIT;
3073 3073  #ifdef DEBUG
3074 3074                          nfs_readdir_cache_waits++;
3075 3075  #endif
3076 3076                          if (!cv_wait_sig(&rdc->cv, &rp->r_statelock)) {
3077 3077                                  /*
3078 3078                                   * We got interrupted, probably
3079 3079                                   * the user typed ^C or an alarm
3080 3080                                   * fired.  We free the new entry
3081 3081                                   * if we allocated one.
3082 3082                                   */
3083 3083                                  mutex_exit(&rp->r_statelock);
3084 3084                                  (void) nfs_rw_enter_sig(&rp->r_rwlock,
3085 3085                                      RW_READER, FALSE);
3086 3086                                  rddir_cache_rele(rdc);
3087 3087                                  if (nrdc != NULL)
3088 3088                                          rddir_cache_rele(nrdc);
3089 3089                                  return (EINTR);
3090 3090                          }
3091 3091                          mutex_exit(&rp->r_statelock);
3092 3092                          (void) nfs_rw_enter_sig(&rp->r_rwlock,
3093 3093                              RW_READER, FALSE);
3094 3094                          rddir_cache_rele(rdc);
3095 3095                          goto top;
3096 3096                  }
3097 3097                  /*
3098 3098                   * Check to see if a readdir is required to
3099 3099                   * fill the entry.  If so, mark this entry
3100 3100                   * as being filled, remove our reference,
3101 3101                   * and branch to the code to fill the entry.
3102 3102                   */
3103 3103                  if (rdc->flags & RDDIRREQ) {
3104 3104                          rdc->flags &= ~RDDIRREQ;
3105 3105                          rdc->flags |= RDDIR;
3106 3106                          if (nrdc != NULL)
3107 3107                                  rddir_cache_rele(nrdc);
3108 3108                          nrdc = rdc;
3109 3109                          mutex_exit(&rp->r_statelock);
3110 3110                          goto bottom;
3111 3111                  }
3112 3112  #ifdef DEBUG
3113 3113                  if (!missed)
3114 3114                          nfs_readdir_cache_hits++;
3115 3115  #endif
3116 3116                  /*
3117 3117                   * If an error occurred while attempting
3118 3118                   * to fill the cache entry, just return it.
3119 3119                   */
3120 3120                  if (rdc->error) {
3121 3121                          error = rdc->error;
3122 3122                          mutex_exit(&rp->r_statelock);
3123 3123                          rddir_cache_rele(rdc);
3124 3124                          if (nrdc != NULL)
3125 3125                                  rddir_cache_rele(nrdc);
3126 3126                          return (error);
3127 3127                  }
3128 3128  
3129 3129                  /*
3130 3130                   * The cache entry is complete and good,
3131 3131                   * copyout the dirent structs to the calling
3132 3132                   * thread.
3133 3133                   */
3134 3134                  error = uiomove(rdc->entries, rdc->entlen, UIO_READ, uiop);
3135 3135  
3136 3136                  /*
3137 3137                   * If no error occurred during the copyout,
3138 3138                   * update the offset in the uio struct to
3139 3139                   * contain the value of the next cookie
3140 3140                   * and set the eof value appropriately.
3141 3141                   */
3142 3142                  if (!error) {
3143 3143                          uiop->uio_offset = rdc->nfs_ncookie;
3144 3144                          if (eofp)
3145 3145                                  *eofp = rdc->eof;
3146 3146                  }
3147 3147  
3148 3148                  /*
3149 3149                   * Decide whether to do readahead.  Don't if
3150 3150                   * have already read to the end of directory.
3151 3151                   */
3152 3152                  if (rdc->eof) {
3153 3153                          rp->r_direof = rdc;
3154 3154                          mutex_exit(&rp->r_statelock);
3155 3155                          rddir_cache_rele(rdc);
3156 3156                          if (nrdc != NULL)
3157 3157                                  rddir_cache_rele(nrdc);
3158 3158                          return (error);
3159 3159                  }
3160 3160  
3161 3161                  /*
3162 3162                   * Check to see whether we found an entry
3163 3163                   * for the readahead.  If so, we don't need
3164 3164                   * to do anything further, so free the new
3165 3165                   * entry if one was allocated.  Otherwise,
3166 3166                   * allocate a new entry, add it to the cache,
3167 3167                   * and then initiate an asynchronous readdir
3168 3168                   * operation to fill it.
3169 3169                   */
3170 3170                  srdc.nfs_cookie = rdc->nfs_ncookie;
3171 3171                  srdc.buflen = count;
3172 3172                  rrdc = avl_find(&rp->r_dir, &srdc, &where);
3173 3173                  if (rrdc != NULL) {
3174 3174                          if (nrdc != NULL)
3175 3175                                  rddir_cache_rele(nrdc);
3176 3176                  } else {
3177 3177                          if (nrdc != NULL)
3178 3178                                  rrdc = nrdc;
3179 3179                          else {
3180 3180                                  rrdc = rddir_cache_alloc(KM_NOSLEEP);
3181 3181                          }
3182 3182                          if (rrdc != NULL) {
3183 3183                                  rrdc->nfs_cookie = rdc->nfs_ncookie;
3184 3184                                  rrdc->buflen = count;
3185 3185                                  avl_insert(&rp->r_dir, rrdc, where);
3186 3186                                  rddir_cache_hold(rrdc);
3187 3187                                  mutex_exit(&rp->r_statelock);
3188 3188                                  rddir_cache_rele(rdc);
3189 3189  #ifdef DEBUG
3190 3190                                  nfs_readdir_readahead++;
3191 3191  #endif
3192 3192                                  nfs_async_readdir(vp, rrdc, cr, nfsreaddir);
3193 3193                                  return (error);
3194 3194                          }
3195 3195                  }
3196 3196  
3197 3197                  mutex_exit(&rp->r_statelock);
3198 3198                  rddir_cache_rele(rdc);
3199 3199                  return (error);
3200 3200          }
3201 3201  
3202 3202          /*
3203 3203           * Didn't find an entry in the cache.  Construct a new empty
3204 3204           * entry and link it into the cache.  Other processes attempting
3205 3205           * to access this entry will need to wait until it is filled in.
3206 3206           *
3207 3207           * Since kmem_alloc may block, another pass through the cache
3208 3208           * will need to be taken to make sure that another process
3209 3209           * hasn't already added an entry to the cache for this request.
3210 3210           */
3211 3211          if (nrdc == NULL) {
3212 3212                  mutex_exit(&rp->r_statelock);
3213 3213                  nrdc = rddir_cache_alloc(KM_SLEEP);
3214 3214                  nrdc->nfs_cookie = uiop->uio_offset;
3215 3215                  nrdc->buflen = count;
3216 3216                  goto top;
3217 3217          }
3218 3218  
3219 3219          /*
3220 3220           * Add this entry to the cache.
3221 3221           */
3222 3222          avl_insert(&rp->r_dir, nrdc, where);
3223 3223          rddir_cache_hold(nrdc);
3224 3224          mutex_exit(&rp->r_statelock);
3225 3225  
3226 3226  bottom:
3227 3227  #ifdef DEBUG
3228 3228          missed = 1;
3229 3229          nfs_readdir_cache_misses++;
3230 3230  #endif
3231 3231          /*
3232 3232           * Do the readdir.
3233 3233           */
3234 3234          error = nfsreaddir(vp, nrdc, cr);
3235 3235  
3236 3236          /*
3237 3237           * If this operation failed, just return the error which occurred.
3238 3238           */
3239 3239          if (error != 0)
3240 3240                  return (error);
3241 3241  
3242 3242          /*
3243 3243           * Since the RPC operation will have taken sometime and blocked
3244 3244           * this process, another pass through the cache will need to be
3245 3245           * taken to find the correct cache entry.  It is possible that
3246 3246           * the correct cache entry will not be there (although one was
3247 3247           * added) because the directory changed during the RPC operation
3248 3248           * and the readdir cache was flushed.  In this case, just start
3249 3249           * over.  It is hoped that this will not happen too often... :-)
3250 3250           */
3251 3251          nrdc = NULL;
3252 3252          goto top;
3253 3253          /* NOTREACHED */
3254 3254  }
3255 3255  
3256 3256  static int
3257 3257  nfsreaddir(vnode_t *vp, rddir_cache *rdc, cred_t *cr)
3258 3258  {
3259 3259          int error;
3260 3260          struct nfsrddirargs rda;
3261 3261          struct nfsrddirres rd;
3262 3262          rnode_t *rp;
3263 3263          mntinfo_t *mi;
3264 3264          uint_t count;
3265 3265          int douprintf;
3266 3266          failinfo_t fi, *fip;
3267 3267  
3268 3268          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
3269 3269          count = rdc->buflen;
3270 3270  
3271 3271          rp = VTOR(vp);
3272 3272          mi = VTOMI(vp);
3273 3273  
3274 3274          rda.rda_fh = *VTOFH(vp);
3275 3275          rda.rda_offset = rdc->nfs_cookie;
3276 3276  
3277 3277          /*
3278 3278           * NFS client failover support
3279 3279           * suppress failover unless we have a zero cookie
3280 3280           */
3281 3281          if (rdc->nfs_cookie == (off_t)0) {
3282 3282                  fi.vp = vp;
3283 3283                  fi.fhp = (caddr_t)&rda.rda_fh;
3284 3284                  fi.copyproc = nfscopyfh;
3285 3285                  fi.lookupproc = nfslookup;
3286 3286                  fi.xattrdirproc = acl_getxattrdir2;
3287 3287                  fip = &fi;
3288 3288          } else {
3289 3289                  fip = NULL;
3290 3290          }
3291 3291  
3292 3292          rd.rd_entries = kmem_alloc(rdc->buflen, KM_SLEEP);
3293 3293          rd.rd_size = count;
3294 3294          rd.rd_offset = rda.rda_offset;
3295 3295  
3296 3296          douprintf = 1;
3297 3297  
3298 3298          if (mi->mi_io_kstats) {
3299 3299                  mutex_enter(&mi->mi_lock);
3300 3300                  kstat_runq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
3301 3301                  mutex_exit(&mi->mi_lock);
3302 3302          }
3303 3303  
3304 3304          do {
3305 3305                  rda.rda_count = MIN(count, mi->mi_curread);
3306 3306                  error = rfs2call(mi, RFS_READDIR,
3307 3307                      xdr_rddirargs, (caddr_t)&rda,
3308 3308                      xdr_getrddirres, (caddr_t)&rd, cr,
3309 3309                      &douprintf, &rd.rd_status, 0, fip);
3310 3310          } while (error == ENFS_TRYAGAIN);
3311 3311  
3312 3312          if (mi->mi_io_kstats) {
3313 3313                  mutex_enter(&mi->mi_lock);
3314 3314                  kstat_runq_exit(KSTAT_IO_PTR(mi->mi_io_kstats));
3315 3315                  mutex_exit(&mi->mi_lock);
3316 3316          }
3317 3317  
3318 3318          /*
3319 3319           * Since we are actually doing a READDIR RPC, we must have
3320 3320           * exclusive access to the cache entry being filled.  Thus,
3321 3321           * it is safe to update all fields except for the flags
3322 3322           * field.  The r_statelock in the rnode must be held to
3323 3323           * prevent two different threads from simultaneously
3324 3324           * attempting to update the flags field.  This can happen
3325 3325           * if we are turning off RDDIR and the other thread is
3326 3326           * trying to set RDDIRWAIT.
3327 3327           */
3328 3328          ASSERT(rdc->flags & RDDIR);
3329 3329          if (!error) {
3330 3330                  error = geterrno(rd.rd_status);
3331 3331                  if (!error) {
3332 3332                          rdc->nfs_ncookie = rd.rd_offset;
3333 3333                          rdc->eof = rd.rd_eof ? 1 : 0;
3334 3334                          rdc->entlen = rd.rd_size;
3335 3335                          ASSERT(rdc->entlen <= rdc->buflen);
3336 3336  #ifdef DEBUG
3337 3337                          rdc->entries = rddir_cache_buf_alloc(rdc->buflen,
3338 3338                              KM_SLEEP);
3339 3339  #else
3340 3340                          rdc->entries = kmem_alloc(rdc->buflen, KM_SLEEP);
3341 3341  #endif
3342 3342                          bcopy(rd.rd_entries, rdc->entries, rdc->entlen);
3343 3343                          rdc->error = 0;
3344 3344                          if (mi->mi_io_kstats) {
3345 3345                                  mutex_enter(&mi->mi_lock);
3346 3346                                  KSTAT_IO_PTR(mi->mi_io_kstats)->reads++;
3347 3347                                  KSTAT_IO_PTR(mi->mi_io_kstats)->nread +=
3348 3348                                      rd.rd_size;
3349 3349                                  mutex_exit(&mi->mi_lock);
3350 3350                          }
3351 3351                  } else {
3352 3352                          PURGE_STALE_FH(error, vp, cr);
3353 3353                  }
3354 3354          }
3355 3355          if (error) {
3356 3356                  rdc->entries = NULL;
3357 3357                  rdc->error = error;
3358 3358          }
3359 3359          kmem_free(rd.rd_entries, rdc->buflen);
3360 3360  
3361 3361          mutex_enter(&rp->r_statelock);
3362 3362          rdc->flags &= ~RDDIR;
3363 3363          if (rdc->flags & RDDIRWAIT) {
3364 3364                  rdc->flags &= ~RDDIRWAIT;
3365 3365                  cv_broadcast(&rdc->cv);
3366 3366          }
3367 3367          if (error)
3368 3368                  rdc->flags |= RDDIRREQ;
3369 3369          mutex_exit(&rp->r_statelock);
3370 3370  
3371 3371          rddir_cache_rele(rdc);
3372 3372  
3373 3373          return (error);
3374 3374  }
3375 3375  
3376 3376  #ifdef DEBUG
3377 3377  static int nfs_bio_do_stop = 0;
3378 3378  #endif
3379 3379  
3380 3380  static int
3381 3381  nfs_bio(struct buf *bp, cred_t *cr)
3382 3382  {
3383 3383          rnode_t *rp = VTOR(bp->b_vp);
3384 3384          int count;
3385 3385          int error;
3386 3386          cred_t *cred;
3387 3387          uint_t offset;
3388 3388  
3389 3389          DTRACE_IO1(start, struct buf *, bp);
3390 3390  
3391 3391          ASSERT(nfs_zone() == VTOMI(bp->b_vp)->mi_zone);
3392 3392          offset = dbtob(bp->b_blkno);
3393 3393  
3394 3394          if (bp->b_flags & B_READ) {
3395 3395                  mutex_enter(&rp->r_statelock);
3396 3396                  if (rp->r_cred != NULL) {
3397 3397                          cred = rp->r_cred;
3398 3398                          crhold(cred);
3399 3399                  } else {
3400 3400                          rp->r_cred = cr;
3401 3401                          crhold(cr);
3402 3402                          cred = cr;
3403 3403                          crhold(cred);
3404 3404                  }
3405 3405                  mutex_exit(&rp->r_statelock);
3406 3406          read_again:
3407 3407                  error = bp->b_error = nfsread(bp->b_vp, bp->b_un.b_addr,
3408 3408                      offset, bp->b_bcount, &bp->b_resid, cred);
3409 3409  
3410 3410                  crfree(cred);
3411 3411                  if (!error) {
3412 3412                          if (bp->b_resid) {
3413 3413                                  /*
3414 3414                                   * Didn't get it all because we hit EOF,
3415 3415                                   * zero all the memory beyond the EOF.
3416 3416                                   */
3417 3417                                  /* bzero(rdaddr + */
3418 3418                                  bzero(bp->b_un.b_addr +
3419 3419                                      bp->b_bcount - bp->b_resid, bp->b_resid);
3420 3420                          }
3421 3421                          mutex_enter(&rp->r_statelock);
3422 3422                          if (bp->b_resid == bp->b_bcount &&
3423 3423                              offset >= rp->r_size) {
3424 3424                                  /*
3425 3425                                   * We didn't read anything at all as we are
3426 3426                                   * past EOF.  Return an error indicator back
3427 3427                                   * but don't destroy the pages (yet).
3428 3428                                   */
3429 3429                                  error = NFS_EOF;
3430 3430                          }
3431 3431                          mutex_exit(&rp->r_statelock);
3432 3432                  } else if (error == EACCES) {
3433 3433                          mutex_enter(&rp->r_statelock);
3434 3434                          if (cred != cr) {
3435 3435                                  if (rp->r_cred != NULL)
3436 3436                                          crfree(rp->r_cred);
3437 3437                                  rp->r_cred = cr;
3438 3438                                  crhold(cr);
3439 3439                                  cred = cr;
3440 3440                                  crhold(cred);
3441 3441                                  mutex_exit(&rp->r_statelock);
3442 3442                                  goto read_again;
3443 3443                          }
3444 3444                          mutex_exit(&rp->r_statelock);
3445 3445                  }
3446 3446          } else {
3447 3447                  if (!(rp->r_flags & RSTALE)) {
3448 3448                          mutex_enter(&rp->r_statelock);
3449 3449                          if (rp->r_cred != NULL) {
3450 3450                                  cred = rp->r_cred;
3451 3451                                  crhold(cred);
3452 3452                          } else {
3453 3453                                  rp->r_cred = cr;
3454 3454                                  crhold(cr);
3455 3455                                  cred = cr;
3456 3456                                  crhold(cred);
3457 3457                          }
3458 3458                          mutex_exit(&rp->r_statelock);
3459 3459                  write_again:
3460 3460                          mutex_enter(&rp->r_statelock);
3461 3461                          count = MIN(bp->b_bcount, rp->r_size - offset);
3462 3462                          mutex_exit(&rp->r_statelock);
3463 3463                          if (count < 0)
3464 3464                                  cmn_err(CE_PANIC, "nfs_bio: write count < 0");
3465 3465  #ifdef DEBUG
3466 3466                          if (count == 0) {
3467 3467                                  zcmn_err(getzoneid(), CE_WARN,
3468 3468                                      "nfs_bio: zero length write at %d",
3469 3469                                      offset);
3470 3470                                  nfs_printfhandle(&rp->r_fh);
3471 3471                                  if (nfs_bio_do_stop)
3472 3472                                          debug_enter("nfs_bio");
3473 3473                          }
3474 3474  #endif
3475 3475                          error = nfswrite(bp->b_vp, bp->b_un.b_addr, offset,
3476 3476                              count, cred);
3477 3477                          if (error == EACCES) {
3478 3478                                  mutex_enter(&rp->r_statelock);
3479 3479                                  if (cred != cr) {
3480 3480                                          if (rp->r_cred != NULL)
3481 3481                                                  crfree(rp->r_cred);
3482 3482                                          rp->r_cred = cr;
3483 3483                                          crhold(cr);
3484 3484                                          crfree(cred);
3485 3485                                          cred = cr;
3486 3486                                          crhold(cred);
3487 3487                                          mutex_exit(&rp->r_statelock);
3488 3488                                          goto write_again;
3489 3489                                  }
3490 3490                                  mutex_exit(&rp->r_statelock);
3491 3491                          }
3492 3492                          bp->b_error = error;
3493 3493                          if (error && error != EINTR) {
3494 3494                                  /*
3495 3495                                   * Don't print EDQUOT errors on the console.
3496 3496                                   * Don't print asynchronous EACCES errors.
3497 3497                                   * Don't print EFBIG errors.
3498 3498                                   * Print all other write errors.
3499 3499                                   */
3500 3500                                  if (error != EDQUOT && error != EFBIG &&
3501 3501                                      (error != EACCES ||
3502 3502                                      !(bp->b_flags & B_ASYNC)))
3503 3503                                          nfs_write_error(bp->b_vp, error, cred);
3504 3504                                  /*
3505 3505                                   * Update r_error and r_flags as appropriate.
3506 3506                                   * If the error was ESTALE, then mark the
3507 3507                                   * rnode as not being writeable and save
3508 3508                                   * the error status.  Otherwise, save any
3509 3509                                   * errors which occur from asynchronous
3510 3510                                   * page invalidations.  Any errors occurring
3511 3511                                   * from other operations should be saved
3512 3512                                   * by the caller.
3513 3513                                   */
3514 3514                                  mutex_enter(&rp->r_statelock);
3515 3515                                  if (error == ESTALE) {
3516 3516                                          rp->r_flags |= RSTALE;
3517 3517                                          if (!rp->r_error)
3518 3518                                                  rp->r_error = error;
3519 3519                                  } else if (!rp->r_error &&
3520 3520                                      (bp->b_flags &
3521 3521                                      (B_INVAL|B_FORCE|B_ASYNC)) ==
3522 3522                                      (B_INVAL|B_FORCE|B_ASYNC)) {
3523 3523                                          rp->r_error = error;
3524 3524                                  }
3525 3525                                  mutex_exit(&rp->r_statelock);
3526 3526                          }
3527 3527                          crfree(cred);
3528 3528                  } else {
3529 3529                          error = rp->r_error;
3530 3530                          /*
3531 3531                           * A close may have cleared r_error, if so,
3532 3532                           * propagate ESTALE error return properly
3533 3533                           */
3534 3534                          if (error == 0)
3535 3535                                  error = ESTALE;
3536 3536                  }
3537 3537          }
3538 3538  
3539 3539          if (error != 0 && error != NFS_EOF)
3540 3540                  bp->b_flags |= B_ERROR;
3541 3541  
3542 3542          DTRACE_IO1(done, struct buf *, bp);
3543 3543  
3544 3544          return (error);
3545 3545  }
3546 3546  
3547 3547  /* ARGSUSED */
3548 3548  static int
3549 3549  nfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
3550 3550  {
3551 3551          struct nfs_fid *fp;
3552 3552          rnode_t *rp;
3553 3553  
3554 3554          rp = VTOR(vp);
3555 3555  
3556 3556          if (fidp->fid_len < (sizeof (struct nfs_fid) - sizeof (short))) {
3557 3557                  fidp->fid_len = sizeof (struct nfs_fid) - sizeof (short);
3558 3558                  return (ENOSPC);
3559 3559          }
3560 3560          fp = (struct nfs_fid *)fidp;
3561 3561          fp->nf_pad = 0;
3562 3562          fp->nf_len = sizeof (struct nfs_fid) - sizeof (short);
3563 3563          bcopy(rp->r_fh.fh_buf, fp->nf_data, NFS_FHSIZE);
3564 3564          return (0);
3565 3565  }
3566 3566  
3567 3567  /* ARGSUSED2 */
3568 3568  static int
3569 3569  nfs_rwlock(vnode_t *vp, int write_lock, caller_context_t *ctp)
3570 3570  {
3571 3571          rnode_t *rp = VTOR(vp);
3572 3572  
3573 3573          if (!write_lock) {
3574 3574                  (void) nfs_rw_enter_sig(&rp->r_rwlock, RW_READER, FALSE);
3575 3575                  return (V_WRITELOCK_FALSE);
3576 3576          }
3577 3577  
3578 3578          if ((rp->r_flags & RDIRECTIO) || (VTOMI(vp)->mi_flags & MI_DIRECTIO)) {
3579 3579                  (void) nfs_rw_enter_sig(&rp->r_rwlock, RW_READER, FALSE);
3580 3580                  if (rp->r_mapcnt == 0 && !vn_has_cached_data(vp))
3581 3581                          return (V_WRITELOCK_FALSE);
3582 3582                  nfs_rw_exit(&rp->r_rwlock);
3583 3583          }
3584 3584  
3585 3585          (void) nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, FALSE);
3586 3586          return (V_WRITELOCK_TRUE);
3587 3587  }
3588 3588  
3589 3589  /* ARGSUSED */
3590 3590  static void
3591 3591  nfs_rwunlock(vnode_t *vp, int write_lock, caller_context_t *ctp)
3592 3592  {
3593 3593          rnode_t *rp = VTOR(vp);
3594 3594  
3595 3595          nfs_rw_exit(&rp->r_rwlock);
3596 3596  }
3597 3597  
3598 3598  /* ARGSUSED */
3599 3599  static int
3600 3600  nfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, caller_context_t *ct)
3601 3601  {
3602 3602  
3603 3603          /*
3604 3604           * Because we stuff the readdir cookie into the offset field
3605 3605           * someone may attempt to do an lseek with the cookie which
3606 3606           * we want to succeed.
3607 3607           */
3608 3608          if (vp->v_type == VDIR)
3609 3609                  return (0);
3610 3610          if (*noffp < 0 || *noffp > MAXOFF32_T)
3611 3611                  return (EINVAL);
3612 3612          return (0);
3613 3613  }
3614 3614  
3615 3615  /*
3616 3616   * number of NFS_MAXDATA blocks to read ahead
3617 3617   * optimized for 100 base-T.
3618 3618   */
3619 3619  static int nfs_nra = 4;
3620 3620  
3621 3621  #ifdef DEBUG
3622 3622  static int nfs_lostpage = 0;    /* number of times we lost original page */
3623 3623  #endif
3624 3624  
3625 3625  /*
3626 3626   * Return all the pages from [off..off+len) in file
3627 3627   */
3628 3628  /* ARGSUSED */
3629 3629  static int
3630 3630  nfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
3631 3631          page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
3632 3632          enum seg_rw rw, cred_t *cr, caller_context_t *ct)
3633 3633  {
3634 3634          rnode_t *rp;
3635 3635          int error;
3636 3636          mntinfo_t *mi;
3637 3637  
3638 3638          if (vp->v_flag & VNOMAP)
3639 3639                  return (ENOSYS);
3640 3640  
3641 3641          ASSERT(off <= MAXOFF32_T);
3642 3642          if (nfs_zone() != VTOMI(vp)->mi_zone)
3643 3643                  return (EIO);
3644 3644          if (protp != NULL)
3645 3645                  *protp = PROT_ALL;
3646 3646  
3647 3647          /*
3648 3648           * Now valididate that the caches are up to date.
3649 3649           */
3650 3650          error = nfs_validate_caches(vp, cr);
3651 3651          if (error)
3652 3652                  return (error);
3653 3653  
3654 3654          rp = VTOR(vp);
3655 3655          mi = VTOMI(vp);
3656 3656  retry:
3657 3657          mutex_enter(&rp->r_statelock);
3658 3658  
3659 3659          /*
3660 3660           * Don't create dirty pages faster than they
3661 3661           * can be cleaned so that the system doesn't
3662 3662           * get imbalanced.  If the async queue is
3663 3663           * maxed out, then wait for it to drain before
3664 3664           * creating more dirty pages.  Also, wait for
3665 3665           * any threads doing pagewalks in the vop_getattr
3666 3666           * entry points so that they don't block for
3667 3667           * long periods.
3668 3668           */
3669 3669          if (rw == S_CREATE) {
3670 3670                  while ((mi->mi_max_threads != 0 &&
3671 3671                      rp->r_awcount > 2 * mi->mi_max_threads) ||
3672 3672                      rp->r_gcount > 0)
3673 3673                          cv_wait(&rp->r_cv, &rp->r_statelock);
3674 3674          }
3675 3675  
3676 3676          /*
3677 3677           * If we are getting called as a side effect of an nfs_write()
3678 3678           * operation the local file size might not be extended yet.
3679 3679           * In this case we want to be able to return pages of zeroes.
3680 3680           */
3681 3681          if (off + len > rp->r_size + PAGEOFFSET && seg != segkmap) {
3682 3682                  mutex_exit(&rp->r_statelock);
3683 3683                  return (EFAULT);                /* beyond EOF */
3684 3684          }
3685 3685  
3686 3686          mutex_exit(&rp->r_statelock);
3687 3687  
3688 3688          error = pvn_getpages(nfs_getapage, vp, off, len, protp, pl, plsz,
3689 3689              seg, addr, rw, cr);
3690 3690  
3691 3691          switch (error) {
3692 3692          case NFS_EOF:
3693 3693                  nfs_purge_caches(vp, NFS_NOPURGE_DNLC, cr);
3694 3694                  goto retry;
3695 3695          case ESTALE:
3696 3696                  PURGE_STALE_FH(error, vp, cr);
3697 3697          }
3698 3698  
3699 3699          return (error);
3700 3700  }
3701 3701  
3702 3702  /*
3703 3703   * Called from pvn_getpages to get a particular page.
3704 3704   */
3705 3705  /* ARGSUSED */
3706 3706  static int
3707 3707  nfs_getapage(vnode_t *vp, u_offset_t off, size_t len, uint_t *protp,
3708 3708          page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
3709 3709          enum seg_rw rw, cred_t *cr)
3710 3710  {
3711 3711          rnode_t *rp;
3712 3712          uint_t bsize;
3713 3713          struct buf *bp;
3714 3714          page_t *pp;
3715 3715          u_offset_t lbn;
3716 3716          u_offset_t io_off;
3717 3717          u_offset_t blkoff;
3718 3718          u_offset_t rablkoff;
3719 3719          size_t io_len;
3720 3720          uint_t blksize;
3721 3721          int error;
3722 3722          int readahead;
3723 3723          int readahead_issued = 0;
3724 3724          int ra_window; /* readahead window */
3725 3725          page_t *pagefound;
3726 3726  
3727 3727          if (nfs_zone() != VTOMI(vp)->mi_zone)
3728 3728                  return (EIO);
3729 3729          rp = VTOR(vp);
3730 3730          bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
3731 3731  
3732 3732  reread:
3733 3733          bp = NULL;
3734 3734          pp = NULL;
3735 3735          pagefound = NULL;
3736 3736  
3737 3737          if (pl != NULL)
3738 3738                  pl[0] = NULL;
3739 3739  
3740 3740          error = 0;
3741 3741          lbn = off / bsize;
3742 3742          blkoff = lbn * bsize;
3743 3743  
3744 3744          /*
3745 3745           * Queueing up the readahead before doing the synchronous read
3746 3746           * results in a significant increase in read throughput because
3747 3747           * of the increased parallelism between the async threads and
3748 3748           * the process context.
3749 3749           */
3750 3750          if ((off & ((vp->v_vfsp->vfs_bsize) - 1)) == 0 &&
3751 3751              rw != S_CREATE &&
3752 3752              !(vp->v_flag & VNOCACHE)) {
3753 3753                  mutex_enter(&rp->r_statelock);
3754 3754  
3755 3755                  /*
3756 3756                   * Calculate the number of readaheads to do.
3757 3757                   * a) No readaheads at offset = 0.
3758 3758                   * b) Do maximum(nfs_nra) readaheads when the readahead
3759 3759                   *    window is closed.
3760 3760                   * c) Do readaheads between 1 to (nfs_nra - 1) depending
3761 3761                   *    upon how far the readahead window is open or close.
3762 3762                   * d) No readaheads if rp->r_nextr is not within the scope
3763 3763                   *    of the readahead window (random i/o).
3764 3764                   */
3765 3765  
3766 3766                  if (off == 0)
3767 3767                          readahead = 0;
3768 3768                  else if (blkoff == rp->r_nextr)
3769 3769                          readahead = nfs_nra;
3770 3770                  else if (rp->r_nextr > blkoff &&
3771 3771                      ((ra_window = (rp->r_nextr - blkoff) / bsize)
3772 3772                      <= (nfs_nra - 1)))
3773 3773                          readahead = nfs_nra - ra_window;
3774 3774                  else
3775 3775                          readahead = 0;
3776 3776  
3777 3777                  rablkoff = rp->r_nextr;
3778 3778                  while (readahead > 0 && rablkoff + bsize < rp->r_size) {
3779 3779                          mutex_exit(&rp->r_statelock);
3780 3780                          if (nfs_async_readahead(vp, rablkoff + bsize,
3781 3781                              addr + (rablkoff + bsize - off), seg, cr,
3782 3782                              nfs_readahead) < 0) {
3783 3783                                  mutex_enter(&rp->r_statelock);
3784 3784                                  break;
3785 3785                          }
3786 3786                          readahead--;
3787 3787                          rablkoff += bsize;
3788 3788                          /*
3789 3789                           * Indicate that we did a readahead so
3790 3790                           * readahead offset is not updated
3791 3791                           * by the synchronous read below.
3792 3792                           */
3793 3793                          readahead_issued = 1;
3794 3794                          mutex_enter(&rp->r_statelock);
3795 3795                          /*
3796 3796                           * set readahead offset to
3797 3797                           * offset of last async readahead
3798 3798                           * request.
3799 3799                           */
3800 3800                          rp->r_nextr = rablkoff;
3801 3801                  }
3802 3802                  mutex_exit(&rp->r_statelock);
3803 3803          }
3804 3804  
3805 3805  again:
3806 3806          if ((pagefound = page_exists(vp, off)) == NULL) {
3807 3807                  if (pl == NULL) {
3808 3808                          (void) nfs_async_readahead(vp, blkoff, addr, seg, cr,
3809 3809                              nfs_readahead);
3810 3810                  } else if (rw == S_CREATE) {
3811 3811                          /*
3812 3812                           * Block for this page is not allocated, or the offset
3813 3813                           * is beyond the current allocation size, or we're
3814 3814                           * allocating a swap slot and the page was not found,
3815 3815                           * so allocate it and return a zero page.
3816 3816                           */
3817 3817                          if ((pp = page_create_va(vp, off,
3818 3818                              PAGESIZE, PG_WAIT, seg, addr)) == NULL)
3819 3819                                  cmn_err(CE_PANIC, "nfs_getapage: page_create");
3820 3820                          io_len = PAGESIZE;
3821 3821                          mutex_enter(&rp->r_statelock);
3822 3822                          rp->r_nextr = off + PAGESIZE;
3823 3823                          mutex_exit(&rp->r_statelock);
3824 3824                  } else {
3825 3825                          /*
3826 3826                           * Need to go to server to get a BLOCK, exception to
3827 3827                           * that being while reading at offset = 0 or doing
3828 3828                           * random i/o, in that case read only a PAGE.
3829 3829                           */
3830 3830                          mutex_enter(&rp->r_statelock);
3831 3831                          if (blkoff < rp->r_size &&
3832 3832                              blkoff + bsize >= rp->r_size) {
3833 3833                                  /*
3834 3834                                   * If only a block or less is left in
3835 3835                                   * the file, read all that is remaining.
3836 3836                                   */
3837 3837                                  if (rp->r_size <= off) {
3838 3838                                          /*
3839 3839                                           * Trying to access beyond EOF,
3840 3840                                           * set up to get at least one page.
3841 3841                                           */
3842 3842                                          blksize = off + PAGESIZE - blkoff;
3843 3843                                  } else
3844 3844                                          blksize = rp->r_size - blkoff;
3845 3845                          } else if ((off == 0) ||
3846 3846                              (off != rp->r_nextr && !readahead_issued)) {
3847 3847                                  blksize = PAGESIZE;
3848 3848                                  blkoff = off; /* block = page here */
3849 3849                          } else
3850 3850                                  blksize = bsize;
3851 3851                          mutex_exit(&rp->r_statelock);
3852 3852  
3853 3853                          pp = pvn_read_kluster(vp, off, seg, addr, &io_off,
3854 3854                              &io_len, blkoff, blksize, 0);
3855 3855  
3856 3856                          /*
3857 3857                           * Some other thread has entered the page,
3858 3858                           * so just use it.
3859 3859                           */
3860 3860                          if (pp == NULL)
3861 3861                                  goto again;
3862 3862  
3863 3863                          /*
3864 3864                           * Now round the request size up to page boundaries.
3865 3865                           * This ensures that the entire page will be
3866 3866                           * initialized to zeroes if EOF is encountered.
3867 3867                           */
3868 3868                          io_len = ptob(btopr(io_len));
3869 3869  
3870 3870                          bp = pageio_setup(pp, io_len, vp, B_READ);
3871 3871                          ASSERT(bp != NULL);
3872 3872  
3873 3873                          /*
3874 3874                           * pageio_setup should have set b_addr to 0.  This
3875 3875                           * is correct since we want to do I/O on a page
3876 3876                           * boundary.  bp_mapin will use this addr to calculate
3877 3877                           * an offset, and then set b_addr to the kernel virtual
3878 3878                           * address it allocated for us.
3879 3879                           */
3880 3880                          ASSERT(bp->b_un.b_addr == 0);
3881 3881  
3882 3882                          bp->b_edev = 0;
3883 3883                          bp->b_dev = 0;
3884 3884                          bp->b_lblkno = lbtodb(io_off);
3885 3885                          bp->b_file = vp;
3886 3886                          bp->b_offset = (offset_t)off;
3887 3887                          bp_mapin(bp);
3888 3888  
3889 3889                          /*
3890 3890                           * If doing a write beyond what we believe is EOF,
3891 3891                           * don't bother trying to read the pages from the
3892 3892                           * server, we'll just zero the pages here.  We
3893 3893                           * don't check that the rw flag is S_WRITE here
3894 3894                           * because some implementations may attempt a
3895 3895                           * read access to the buffer before copying data.
3896 3896                           */
3897 3897                          mutex_enter(&rp->r_statelock);
3898 3898                          if (io_off >= rp->r_size && seg == segkmap) {
3899 3899                                  mutex_exit(&rp->r_statelock);
3900 3900                                  bzero(bp->b_un.b_addr, io_len);
3901 3901                          } else {
3902 3902                                  mutex_exit(&rp->r_statelock);
3903 3903                                  error = nfs_bio(bp, cr);
3904 3904                          }
3905 3905  
3906 3906                          /*
3907 3907                           * Unmap the buffer before freeing it.
3908 3908                           */
3909 3909                          bp_mapout(bp);
3910 3910                          pageio_done(bp);
3911 3911  
3912 3912                          if (error == NFS_EOF) {
3913 3913                                  /*
3914 3914                                   * If doing a write system call just return
3915 3915                                   * zeroed pages, else user tried to get pages
3916 3916                                   * beyond EOF, return error.  We don't check
3917 3917                                   * that the rw flag is S_WRITE here because
3918 3918                                   * some implementations may attempt a read
3919 3919                                   * access to the buffer before copying data.
3920 3920                                   */
3921 3921                                  if (seg == segkmap)
3922 3922                                          error = 0;
3923 3923                                  else
3924 3924                                          error = EFAULT;
3925 3925                          }
3926 3926  
3927 3927                          if (!readahead_issued && !error) {
3928 3928                                  mutex_enter(&rp->r_statelock);
3929 3929                                  rp->r_nextr = io_off + io_len;
3930 3930                                  mutex_exit(&rp->r_statelock);
3931 3931                          }
3932 3932                  }
3933 3933          }
3934 3934  
3935 3935  out:
3936 3936          if (pl == NULL)
3937 3937                  return (error);
3938 3938  
3939 3939          if (error) {
3940 3940                  if (pp != NULL)
3941 3941                          pvn_read_done(pp, B_ERROR);
3942 3942                  return (error);
3943 3943          }
3944 3944  
3945 3945          if (pagefound) {
3946 3946                  se_t se = (rw == S_CREATE ? SE_EXCL : SE_SHARED);
3947 3947  
3948 3948                  /*
3949 3949                   * Page exists in the cache, acquire the appropriate lock.
3950 3950                   * If this fails, start all over again.
3951 3951                   */
3952 3952                  if ((pp = page_lookup(vp, off, se)) == NULL) {
3953 3953  #ifdef DEBUG
3954 3954                          nfs_lostpage++;
3955 3955  #endif
3956 3956                          goto reread;
3957 3957                  }
3958 3958                  pl[0] = pp;
3959 3959                  pl[1] = NULL;
3960 3960                  return (0);
3961 3961          }
3962 3962  
3963 3963          if (pp != NULL)
3964 3964                  pvn_plist_init(pp, pl, plsz, off, io_len, rw);
3965 3965  
3966 3966          return (error);
3967 3967  }
3968 3968  
3969 3969  static void
3970 3970  nfs_readahead(vnode_t *vp, u_offset_t blkoff, caddr_t addr, struct seg *seg,
3971 3971          cred_t *cr)
3972 3972  {
3973 3973          int error;
3974 3974          page_t *pp;
3975 3975          u_offset_t io_off;
3976 3976          size_t io_len;
3977 3977          struct buf *bp;
3978 3978          uint_t bsize, blksize;
3979 3979          rnode_t *rp = VTOR(vp);
3980 3980  
3981 3981          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
3982 3982  
3983 3983          bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
3984 3984  
3985 3985          mutex_enter(&rp->r_statelock);
3986 3986          if (blkoff < rp->r_size && blkoff + bsize > rp->r_size) {
3987 3987                  /*
3988 3988                   * If less than a block left in file read less
3989 3989                   * than a block.
3990 3990                   */
3991 3991                  blksize = rp->r_size - blkoff;
3992 3992          } else
3993 3993                  blksize = bsize;
3994 3994          mutex_exit(&rp->r_statelock);
3995 3995  
3996 3996          pp = pvn_read_kluster(vp, blkoff, segkmap, addr,
3997 3997              &io_off, &io_len, blkoff, blksize, 1);
3998 3998          /*
3999 3999           * The isra flag passed to the kluster function is 1, we may have
4000 4000           * gotten a return value of NULL for a variety of reasons (# of free
4001 4001           * pages < minfree, someone entered the page on the vnode etc). In all
4002 4002           * cases, we want to punt on the readahead.
4003 4003           */
4004 4004          if (pp == NULL)
4005 4005                  return;
4006 4006  
4007 4007          /*
4008 4008           * Now round the request size up to page boundaries.
4009 4009           * This ensures that the entire page will be
4010 4010           * initialized to zeroes if EOF is encountered.
4011 4011           */
4012 4012          io_len = ptob(btopr(io_len));
4013 4013  
4014 4014          bp = pageio_setup(pp, io_len, vp, B_READ);
4015 4015          ASSERT(bp != NULL);
4016 4016  
4017 4017          /*
4018 4018           * pageio_setup should have set b_addr to 0.  This is correct since
4019 4019           * we want to do I/O on a page boundary. bp_mapin() will use this addr
4020 4020           * to calculate an offset, and then set b_addr to the kernel virtual
4021 4021           * address it allocated for us.
4022 4022           */
4023 4023          ASSERT(bp->b_un.b_addr == 0);
4024 4024  
4025 4025          bp->b_edev = 0;
4026 4026          bp->b_dev = 0;
4027 4027          bp->b_lblkno = lbtodb(io_off);
4028 4028          bp->b_file = vp;
4029 4029          bp->b_offset = (offset_t)blkoff;
4030 4030          bp_mapin(bp);
4031 4031  
4032 4032          /*
4033 4033           * If doing a write beyond what we believe is EOF, don't bother trying
4034 4034           * to read the pages from the server, we'll just zero the pages here.
4035 4035           * We don't check that the rw flag is S_WRITE here because some
4036 4036           * implementations may attempt a read access to the buffer before
4037 4037           * copying data.
4038 4038           */
4039 4039          mutex_enter(&rp->r_statelock);
4040 4040          if (io_off >= rp->r_size && seg == segkmap) {
4041 4041                  mutex_exit(&rp->r_statelock);
4042 4042                  bzero(bp->b_un.b_addr, io_len);
4043 4043                  error = 0;
4044 4044          } else {
4045 4045                  mutex_exit(&rp->r_statelock);
4046 4046                  error = nfs_bio(bp, cr);
4047 4047                  if (error == NFS_EOF)
4048 4048                          error = 0;
4049 4049          }
4050 4050  
4051 4051          /*
4052 4052           * Unmap the buffer before freeing it.
4053 4053           */
4054 4054          bp_mapout(bp);
4055 4055          pageio_done(bp);
4056 4056  
4057 4057          pvn_read_done(pp, error ? B_READ | B_ERROR : B_READ);
4058 4058  
4059 4059          /*
4060 4060           * In case of error set readahead offset
4061 4061           * to the lowest offset.
4062 4062           * pvn_read_done() calls VN_DISPOSE to destroy the pages
4063 4063           */
4064 4064          if (error && rp->r_nextr > io_off) {
4065 4065                  mutex_enter(&rp->r_statelock);
4066 4066                  if (rp->r_nextr > io_off)
4067 4067                          rp->r_nextr = io_off;
4068 4068                  mutex_exit(&rp->r_statelock);
4069 4069          }
4070 4070  }
4071 4071  
4072 4072  /*
4073 4073   * Flags are composed of {B_INVAL, B_FREE, B_DONTNEED, B_FORCE}
4074 4074   * If len == 0, do from off to EOF.
4075 4075   *
4076 4076   * The normal cases should be len == 0 && off == 0 (entire vp list),
4077 4077   * len == MAXBSIZE (from segmap_release actions), and len == PAGESIZE
4078 4078   * (from pageout).
4079 4079   */
4080 4080  /* ARGSUSED */
4081 4081  static int
4082 4082  nfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr,
4083 4083          caller_context_t *ct)
4084 4084  {
4085 4085          int error;
4086 4086          rnode_t *rp;
4087 4087  
4088 4088          ASSERT(cr != NULL);
4089 4089  
4090 4090          /*
4091 4091           * XXX - Why should this check be made here?
4092 4092           */
4093 4093          if (vp->v_flag & VNOMAP)
4094 4094                  return (ENOSYS);
4095 4095  
4096 4096          if (len == 0 && !(flags & B_INVAL) && vn_is_readonly(vp))
4097 4097                  return (0);
4098 4098  
4099 4099          if (!(flags & B_ASYNC) && nfs_zone() != VTOMI(vp)->mi_zone)
4100 4100                  return (EIO);
4101 4101          ASSERT(off <= MAXOFF32_T);
4102 4102  
4103 4103          rp = VTOR(vp);
4104 4104          mutex_enter(&rp->r_statelock);
4105 4105          rp->r_count++;
4106 4106          mutex_exit(&rp->r_statelock);
4107 4107          error = nfs_putpages(vp, off, len, flags, cr);
4108 4108          mutex_enter(&rp->r_statelock);
4109 4109          rp->r_count--;
4110 4110          cv_broadcast(&rp->r_cv);
4111 4111          mutex_exit(&rp->r_statelock);
4112 4112  
4113 4113          return (error);
4114 4114  }
4115 4115  
4116 4116  /*
4117 4117   * Write out a single page, possibly klustering adjacent dirty pages.
4118 4118   */
4119 4119  int
4120 4120  nfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
4121 4121          int flags, cred_t *cr)
4122 4122  {
4123 4123          u_offset_t io_off;
4124 4124          u_offset_t lbn_off;
4125 4125          u_offset_t lbn;
4126 4126          size_t io_len;
4127 4127          uint_t bsize;
4128 4128          int error;
4129 4129          rnode_t *rp;
4130 4130  
4131 4131          ASSERT(!vn_is_readonly(vp));
4132 4132          ASSERT(pp != NULL);
4133 4133          ASSERT(cr != NULL);
4134 4134          ASSERT((flags & B_ASYNC) || nfs_zone() == VTOMI(vp)->mi_zone);
4135 4135  
4136 4136          rp = VTOR(vp);
4137 4137          ASSERT(rp->r_count > 0);
4138 4138  
4139 4139          ASSERT(pp->p_offset <= MAXOFF32_T);
4140 4140  
4141 4141          bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
4142 4142          lbn = pp->p_offset / bsize;
4143 4143          lbn_off = lbn * bsize;
4144 4144  
4145 4145          /*
4146 4146           * Find a kluster that fits in one block, or in
4147 4147           * one page if pages are bigger than blocks.  If
4148 4148           * there is less file space allocated than a whole
4149 4149           * page, we'll shorten the i/o request below.
4150 4150           */
4151 4151          pp = pvn_write_kluster(vp, pp, &io_off, &io_len, lbn_off,
4152 4152              roundup(bsize, PAGESIZE), flags);
4153 4153  
4154 4154          /*
4155 4155           * pvn_write_kluster shouldn't have returned a page with offset
4156 4156           * behind the original page we were given.  Verify that.
4157 4157           */
4158 4158          ASSERT((pp->p_offset / bsize) >= lbn);
4159 4159  
4160 4160          /*
4161 4161           * Now pp will have the list of kept dirty pages marked for
4162 4162           * write back.  It will also handle invalidation and freeing
4163 4163           * of pages that are not dirty.  Check for page length rounding
4164 4164           * problems.
4165 4165           */
4166 4166          if (io_off + io_len > lbn_off + bsize) {
4167 4167                  ASSERT((io_off + io_len) - (lbn_off + bsize) < PAGESIZE);
4168 4168                  io_len = lbn_off + bsize - io_off;
4169 4169          }
4170 4170          /*
4171 4171           * The RMODINPROGRESS flag makes sure that nfs(3)_bio() sees a
4172 4172           * consistent value of r_size. RMODINPROGRESS is set in writerp().
4173 4173           * When RMODINPROGRESS is set it indicates that a uiomove() is in
4174 4174           * progress and the r_size has not been made consistent with the
4175 4175           * new size of the file. When the uiomove() completes the r_size is
4176 4176           * updated and the RMODINPROGRESS flag is cleared.
4177 4177           *
4178 4178           * The RMODINPROGRESS flag makes sure that nfs(3)_bio() sees a
4179 4179           * consistent value of r_size. Without this handshaking, it is
4180 4180           * possible that nfs(3)_bio() picks  up the old value of r_size
4181 4181           * before the uiomove() in writerp() completes. This will result
4182 4182           * in the write through nfs(3)_bio() being dropped.
4183 4183           *
4184 4184           * More precisely, there is a window between the time the uiomove()
4185 4185           * completes and the time the r_size is updated. If a VOP_PUTPAGE()
4186 4186           * operation intervenes in this window, the page will be picked up,
4187 4187           * because it is dirty (it will be unlocked, unless it was
4188 4188           * pagecreate'd). When the page is picked up as dirty, the dirty
4189 4189           * bit is reset (pvn_getdirty()). In nfs(3)write(), r_size is
4190 4190           * checked. This will still be the old size. Therefore the page will
4191 4191           * not be written out. When segmap_release() calls VOP_PUTPAGE(),
4192 4192           * the page will be found to be clean and the write will be dropped.
4193 4193           */
4194 4194          if (rp->r_flags & RMODINPROGRESS) {
4195 4195                  mutex_enter(&rp->r_statelock);
4196 4196                  if ((rp->r_flags & RMODINPROGRESS) &&
4197 4197                      rp->r_modaddr + MAXBSIZE > io_off &&
4198 4198                      rp->r_modaddr < io_off + io_len) {
4199 4199                          page_t *plist;
4200 4200                          /*
4201 4201                           * A write is in progress for this region of the file.
4202 4202                           * If we did not detect RMODINPROGRESS here then this
4203 4203                           * path through nfs_putapage() would eventually go to
4204 4204                           * nfs(3)_bio() and may not write out all of the data
4205 4205                           * in the pages. We end up losing data. So we decide
4206 4206                           * to set the modified bit on each page in the page
4207 4207                           * list and mark the rnode with RDIRTY. This write
4208 4208                           * will be restarted at some later time.
4209 4209                           */
4210 4210                          plist = pp;
4211 4211                          while (plist != NULL) {
4212 4212                                  pp = plist;
4213 4213                                  page_sub(&plist, pp);
4214 4214                                  hat_setmod(pp);
4215 4215                                  page_io_unlock(pp);
4216 4216                                  page_unlock(pp);
4217 4217                          }
4218 4218                          rp->r_flags |= RDIRTY;
4219 4219                          mutex_exit(&rp->r_statelock);
4220 4220                          if (offp)
4221 4221                                  *offp = io_off;
4222 4222                          if (lenp)
4223 4223                                  *lenp = io_len;
4224 4224                          return (0);
4225 4225                  }
4226 4226                  mutex_exit(&rp->r_statelock);
4227 4227          }
4228 4228  
4229 4229          if (flags & B_ASYNC) {
4230 4230                  error = nfs_async_putapage(vp, pp, io_off, io_len, flags, cr,
4231 4231                      nfs_sync_putapage);
4232 4232          } else
4233 4233                  error = nfs_sync_putapage(vp, pp, io_off, io_len, flags, cr);
4234 4234  
4235 4235          if (offp)
4236 4236                  *offp = io_off;
4237 4237          if (lenp)
4238 4238                  *lenp = io_len;
4239 4239          return (error);
4240 4240  }
4241 4241  
4242 4242  static int
4243 4243  nfs_sync_putapage(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len,
4244 4244          int flags, cred_t *cr)
4245 4245  {
4246 4246          int error;
4247 4247          rnode_t *rp;
4248 4248  
4249 4249          flags |= B_WRITE;
4250 4250  
4251 4251          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
4252 4252          error = nfs_rdwrlbn(vp, pp, io_off, io_len, flags, cr);
4253 4253  
4254 4254          rp = VTOR(vp);
4255 4255  
4256 4256          if ((error == ENOSPC || error == EDQUOT || error == EACCES) &&
4257 4257              (flags & (B_INVAL|B_FORCE)) != (B_INVAL|B_FORCE)) {
4258 4258                  if (!(rp->r_flags & ROUTOFSPACE)) {
4259 4259                          mutex_enter(&rp->r_statelock);
4260 4260                          rp->r_flags |= ROUTOFSPACE;
4261 4261                          mutex_exit(&rp->r_statelock);
4262 4262                  }
4263 4263                  flags |= B_ERROR;
4264 4264                  pvn_write_done(pp, flags);
4265 4265                  /*
4266 4266                   * If this was not an async thread, then try again to
4267 4267                   * write out the pages, but this time, also destroy
4268 4268                   * them whether or not the write is successful.  This
4269 4269                   * will prevent memory from filling up with these
4270 4270                   * pages and destroying them is the only alternative
4271 4271                   * if they can't be written out.
4272 4272                   *
4273 4273                   * Don't do this if this is an async thread because
4274 4274                   * when the pages are unlocked in pvn_write_done,
4275 4275                   * some other thread could have come along, locked
4276 4276                   * them, and queued for an async thread.  It would be
4277 4277                   * possible for all of the async threads to be tied
4278 4278                   * up waiting to lock the pages again and they would
4279 4279                   * all already be locked and waiting for an async
4280 4280                   * thread to handle them.  Deadlock.
4281 4281                   */
4282 4282                  if (!(flags & B_ASYNC)) {
4283 4283                          error = nfs_putpage(vp, io_off, io_len,
4284 4284                              B_INVAL | B_FORCE, cr, NULL);
4285 4285                  }
4286 4286          } else {
4287 4287                  if (error)
4288 4288                          flags |= B_ERROR;
4289 4289                  else if (rp->r_flags & ROUTOFSPACE) {
4290 4290                          mutex_enter(&rp->r_statelock);
4291 4291                          rp->r_flags &= ~ROUTOFSPACE;
4292 4292                          mutex_exit(&rp->r_statelock);
4293 4293                  }
4294 4294                  pvn_write_done(pp, flags);
4295 4295          }
4296 4296  
4297 4297          return (error);
4298 4298  }
4299 4299  
4300 4300  /* ARGSUSED */
4301 4301  static int
4302 4302  nfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
4303 4303          size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
4304 4304          caller_context_t *ct)
4305 4305  {
4306 4306          struct segvn_crargs vn_a;
4307 4307          int error;
4308 4308          rnode_t *rp;
4309 4309          struct vattr va;
4310 4310  
4311 4311          if (nfs_zone() != VTOMI(vp)->mi_zone)
4312 4312                  return (EIO);
4313 4313  
4314 4314          if (vp->v_flag & VNOMAP)
4315 4315                  return (ENOSYS);
4316 4316  
4317 4317          if (off > MAXOFF32_T)
4318 4318                  return (EFBIG);
4319 4319  
4320 4320          if (off < 0 || off + len < 0)
4321 4321                  return (ENXIO);
4322 4322  
4323 4323          if (vp->v_type != VREG)
4324 4324                  return (ENODEV);
4325 4325  
4326 4326          /*
4327 4327           * If there is cached data and if close-to-open consistency
4328 4328           * checking is not turned off and if the file system is not
4329 4329           * mounted readonly, then force an over the wire getattr.
4330 4330           * Otherwise, just invoke nfsgetattr to get a copy of the
4331 4331           * attributes.  The attribute cache will be used unless it
4332 4332           * is timed out and if it is, then an over the wire getattr
4333 4333           * will be issued.
4334 4334           */
4335 4335          va.va_mask = AT_ALL;
4336 4336          if (vn_has_cached_data(vp) &&
4337 4337              !(VTOMI(vp)->mi_flags & MI_NOCTO) && !vn_is_readonly(vp))
4338 4338                  error = nfs_getattr_otw(vp, &va, cr);
4339 4339          else
4340 4340                  error = nfsgetattr(vp, &va, cr);
4341 4341          if (error)
4342 4342                  return (error);
4343 4343  
4344 4344          /*
4345 4345           * Check to see if the vnode is currently marked as not cachable.
4346 4346           * This means portions of the file are locked (through VOP_FRLOCK).
4347 4347           * In this case the map request must be refused.  We use
4348 4348           * rp->r_lkserlock to avoid a race with concurrent lock requests.
4349 4349           */
4350 4350          rp = VTOR(vp);
4351 4351  
4352 4352          /*
4353 4353           * Atomically increment r_inmap after acquiring r_rwlock. The
4354 4354           * idea here is to acquire r_rwlock to block read/write and
4355 4355           * not to protect r_inmap. r_inmap will inform nfs_read/write()
4356 4356           * that we are in nfs_map(). Now, r_rwlock is acquired in order
4357 4357           * and we can prevent the deadlock that would have occurred
4358 4358           * when nfs_addmap() would have acquired it out of order.
4359 4359           *
4360 4360           * Since we are not protecting r_inmap by any lock, we do not
4361 4361           * hold any lock when we decrement it. We atomically decrement
4362 4362           * r_inmap after we release r_lkserlock.
4363 4363           */
4364 4364  
4365 4365          if (nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, INTR(vp)))
4366 4366                  return (EINTR);
4367 4367          atomic_inc_uint(&rp->r_inmap);
4368 4368          nfs_rw_exit(&rp->r_rwlock);
4369 4369  
4370 4370          if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_READER, INTR(vp))) {
4371 4371                  atomic_dec_uint(&rp->r_inmap);
4372 4372                  return (EINTR);
4373 4373          }
4374 4374          if (vp->v_flag & VNOCACHE) {
4375 4375                  error = EAGAIN;
4376 4376                  goto done;
4377 4377          }
4378 4378  
4379 4379          /*
4380 4380           * Don't allow concurrent locks and mapping if mandatory locking is
4381 4381           * enabled.
4382 4382           */
4383 4383          if ((flk_has_remote_locks(vp) || lm_has_sleep(vp)) &&
4384 4384              MANDLOCK(vp, va.va_mode)) {
4385 4385                  error = EAGAIN;
4386 4386                  goto done;
4387 4387          }
4388 4388  
4389 4389          as_rangelock(as);
4390 4390          error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
4391 4391          if (error != 0) {
4392 4392                  as_rangeunlock(as);
4393 4393                  goto done;
4394 4394          }
4395 4395  
4396 4396          vn_a.vp = vp;
4397 4397          vn_a.offset = off;
4398 4398          vn_a.type = (flags & MAP_TYPE);
4399 4399          vn_a.prot = (uchar_t)prot;
4400 4400          vn_a.maxprot = (uchar_t)maxprot;
4401 4401          vn_a.flags = (flags & ~MAP_TYPE);
4402 4402          vn_a.cred = cr;
4403 4403          vn_a.amp = NULL;
4404 4404          vn_a.szc = 0;
4405 4405          vn_a.lgrp_mem_policy_flags = 0;
4406 4406  
4407 4407          error = as_map(as, *addrp, len, segvn_create, &vn_a);
4408 4408          as_rangeunlock(as);
4409 4409  
4410 4410  done:
4411 4411          nfs_rw_exit(&rp->r_lkserlock);
4412 4412          atomic_dec_uint(&rp->r_inmap);
4413 4413          return (error);
4414 4414  }
4415 4415  
4416 4416  /* ARGSUSED */
4417 4417  static int
4418 4418  nfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4419 4419          size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
4420 4420          caller_context_t *ct)
4421 4421  {
4422 4422          rnode_t *rp;
4423 4423  
4424 4424          if (vp->v_flag & VNOMAP)
4425 4425                  return (ENOSYS);
4426 4426          if (nfs_zone() != VTOMI(vp)->mi_zone)
4427 4427                  return (EIO);
4428 4428  
4429 4429          rp = VTOR(vp);
4430 4430          atomic_add_long((ulong_t *)&rp->r_mapcnt, btopr(len));
4431 4431  
4432 4432          return (0);
4433 4433  }
4434 4434  
4435 4435  /* ARGSUSED */
4436 4436  static int
4437 4437  nfs_frlock(vnode_t *vp, int cmd, struct flock64 *bfp, int flag, offset_t offset,
4438 4438          struct flk_callback *flk_cbp, cred_t *cr, caller_context_t *ct)
4439 4439  {
4440 4440          netobj lm_fh;
4441 4441          int rc;
4442 4442          u_offset_t start, end;
4443 4443          rnode_t *rp;
4444 4444          int error = 0, intr = INTR(vp);
4445 4445  
4446 4446          /* check for valid cmd parameter */
4447 4447          if (cmd != F_GETLK && cmd != F_SETLK && cmd != F_SETLKW)
4448 4448                  return (EINVAL);
4449 4449          if (nfs_zone() != VTOMI(vp)->mi_zone)
4450 4450                  return (EIO);
4451 4451  
4452 4452          /* Verify l_type. */
4453 4453          switch (bfp->l_type) {
4454 4454          case F_RDLCK:
4455 4455                  if (cmd != F_GETLK && !(flag & FREAD))
4456 4456                          return (EBADF);
4457 4457                  break;
4458 4458          case F_WRLCK:
4459 4459                  if (cmd != F_GETLK && !(flag & FWRITE))
4460 4460                          return (EBADF);
4461 4461                  break;
4462 4462          case F_UNLCK:
4463 4463                  intr = 0;
4464 4464                  break;
4465 4465  
4466 4466          default:
4467 4467                  return (EINVAL);
4468 4468          }
4469 4469  
4470 4470          /* check the validity of the lock range */
4471 4471          if (rc = flk_convert_lock_data(vp, bfp, &start, &end, offset))
4472 4472                  return (rc);
4473 4473          if (rc = flk_check_lock_data(start, end, MAXOFF32_T))
4474 4474                  return (rc);
4475 4475  
4476 4476          /*
4477 4477           * If the filesystem is mounted using local locking, pass the
4478 4478           * request off to the local locking code.
4479 4479           */
4480 4480          if (VTOMI(vp)->mi_flags & MI_LLOCK) {
4481 4481                  if (offset > MAXOFF32_T)
4482 4482                          return (EFBIG);
4483 4483                  if (cmd == F_SETLK || cmd == F_SETLKW) {
4484 4484                          /*
4485 4485                           * For complete safety, we should be holding
4486 4486                           * r_lkserlock.  However, we can't call
4487 4487                           * lm_safelock and then fs_frlock while
4488 4488                           * holding r_lkserlock, so just invoke
4489 4489                           * lm_safelock and expect that this will
4490 4490                           * catch enough of the cases.
4491 4491                           */
4492 4492                          if (!lm_safelock(vp, bfp, cr))
4493 4493                                  return (EAGAIN);
4494 4494                  }
4495 4495                  return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
4496 4496          }
4497 4497  
4498 4498          rp = VTOR(vp);
4499 4499  
4500 4500          /*
4501 4501           * Check whether the given lock request can proceed, given the
4502 4502           * current file mappings.
4503 4503           */
4504 4504          if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_WRITER, intr))
4505 4505                  return (EINTR);
4506 4506          if (cmd == F_SETLK || cmd == F_SETLKW) {
4507 4507                  if (!lm_safelock(vp, bfp, cr)) {
4508 4508                          rc = EAGAIN;
4509 4509                          goto done;
4510 4510                  }
4511 4511          }
4512 4512  
4513 4513          /*
4514 4514           * Flush the cache after waiting for async I/O to finish.  For new
4515 4515           * locks, this is so that the process gets the latest bits from the
4516 4516           * server.  For unlocks, this is so that other clients see the
4517 4517           * latest bits once the file has been unlocked.  If currently dirty
4518 4518           * pages can't be flushed, then don't allow a lock to be set.  But
4519 4519           * allow unlocks to succeed, to avoid having orphan locks on the
4520 4520           * server.
4521 4521           */
4522 4522          if (cmd != F_GETLK) {
4523 4523                  mutex_enter(&rp->r_statelock);
4524 4524                  while (rp->r_count > 0) {
4525 4525                          if (intr) {
4526 4526                                  klwp_t *lwp = ttolwp(curthread);
4527 4527  
4528 4528                                  if (lwp != NULL)
4529 4529                                          lwp->lwp_nostop++;
4530 4530                                  if (cv_wait_sig(&rp->r_cv, &rp->r_statelock)
4531 4531                                      == 0) {
4532 4532                                          if (lwp != NULL)
4533 4533                                                  lwp->lwp_nostop--;
4534 4534                                          rc = EINTR;
4535 4535                                          break;
4536 4536                                  }
4537 4537                                  if (lwp != NULL)
4538 4538                                          lwp->lwp_nostop--;
4539 4539                          } else
4540 4540                          cv_wait(&rp->r_cv, &rp->r_statelock);
4541 4541                  }
4542 4542                  mutex_exit(&rp->r_statelock);
4543 4543                  if (rc != 0)
4544 4544                          goto done;
4545 4545                  error = nfs_putpage(vp, (offset_t)0, 0, B_INVAL, cr, ct);
4546 4546                  if (error) {
4547 4547                          if (error == ENOSPC || error == EDQUOT) {
4548 4548                                  mutex_enter(&rp->r_statelock);
4549 4549                                  if (!rp->r_error)
4550 4550                                          rp->r_error = error;
4551 4551                                  mutex_exit(&rp->r_statelock);
4552 4552                          }
4553 4553                          if (bfp->l_type != F_UNLCK) {
4554 4554                                  rc = ENOLCK;
4555 4555                                  goto done;
4556 4556                          }
4557 4557                  }
4558 4558          }
4559 4559  
4560 4560          lm_fh.n_len = sizeof (fhandle_t);
4561 4561          lm_fh.n_bytes = (char *)VTOFH(vp);
4562 4562  
4563 4563          /*
4564 4564           * Call the lock manager to do the real work of contacting
4565 4565           * the server and obtaining the lock.
4566 4566           */
4567 4567          rc = lm_frlock(vp, cmd, bfp, flag, offset, cr, &lm_fh, flk_cbp);
4568 4568  
4569 4569          if (rc == 0)
4570 4570                  nfs_lockcompletion(vp, cmd);
4571 4571  
4572 4572  done:
4573 4573          nfs_rw_exit(&rp->r_lkserlock);
4574 4574          return (rc);
4575 4575  }
4576 4576  
4577 4577  /*
4578 4578   * Free storage space associated with the specified vnode.  The portion
4579 4579   * to be freed is specified by bfp->l_start and bfp->l_len (already
4580 4580   * normalized to a "whence" of 0).
4581 4581   *
4582 4582   * This is an experimental facility whose continued existence is not
4583 4583   * guaranteed.  Currently, we only support the special case
4584 4584   * of l_len == 0, meaning free to end of file.
4585 4585   */
4586 4586  /* ARGSUSED */
4587 4587  static int
4588 4588  nfs_space(vnode_t *vp, int cmd, struct flock64 *bfp, int flag,
4589 4589          offset_t offset, cred_t *cr, caller_context_t *ct)
4590 4590  {
4591 4591          int error;
4592 4592  
4593 4593          ASSERT(vp->v_type == VREG);
4594 4594          if (cmd != F_FREESP)
4595 4595                  return (EINVAL);
4596 4596  
4597 4597          if (offset > MAXOFF32_T)
4598 4598                  return (EFBIG);
4599 4599  
4600 4600          if ((bfp->l_start > MAXOFF32_T) || (bfp->l_end > MAXOFF32_T) ||
4601 4601              (bfp->l_len > MAXOFF32_T))
4602 4602                  return (EFBIG);
4603 4603  
4604 4604          if (nfs_zone() != VTOMI(vp)->mi_zone)
4605 4605                  return (EIO);
4606 4606  
4607 4607          error = convoff(vp, bfp, 0, offset);
4608 4608          if (!error) {
4609 4609                  ASSERT(bfp->l_start >= 0);
4610 4610                  if (bfp->l_len == 0) {
4611 4611                          struct vattr va;
4612 4612  
4613 4613                          /*
4614 4614                           * ftruncate should not change the ctime and
4615 4615                           * mtime if we truncate the file to its
4616 4616                           * previous size.
4617 4617                           */
4618 4618                          va.va_mask = AT_SIZE;
4619 4619                          error = nfsgetattr(vp, &va, cr);
4620 4620                          if (error || va.va_size == bfp->l_start)
4621 4621                                  return (error);
4622 4622                          va.va_mask = AT_SIZE;
4623 4623                          va.va_size = bfp->l_start;
4624 4624                          error = nfssetattr(vp, &va, 0, cr);
4625 4625  
4626 4626                          if (error == 0) {
4627 4627                                  if (bfp->l_start == 0) {
4628 4628                                          vnevent_truncate(vp, ct);
4629 4629                                  } else {
4630 4630                                          vnevent_resize(vp, ct);
4631 4631                                  }
4632 4632                          }
4633 4633                  } else
4634 4634                          error = EINVAL;
4635 4635          }
4636 4636  
4637 4637          return (error);
4638 4638  }
4639 4639  
4640 4640  /* ARGSUSED */
4641 4641  static int
4642 4642  nfs_realvp(vnode_t *vp, vnode_t **vpp, caller_context_t *ct)
4643 4643  {
4644 4644  
4645 4645          return (EINVAL);
4646 4646  }
4647 4647  
4648 4648  /*
4649 4649   * Setup and add an address space callback to do the work of the delmap call.
4650 4650   * The callback will (and must be) deleted in the actual callback function.
4651 4651   *
4652 4652   * This is done in order to take care of the problem that we have with holding
4653 4653   * the address space's a_lock for a long period of time (e.g. if the NFS server
4654 4654   * is down).  Callbacks will be executed in the address space code while the
4655 4655   * a_lock is not held.  Holding the address space's a_lock causes things such
4656 4656   * as ps and fork to hang because they are trying to acquire this lock as well.
4657 4657   */
4658 4658  /* ARGSUSED */
4659 4659  static int
4660 4660  nfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4661 4661          size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr,
4662 4662          caller_context_t *ct)
4663 4663  {
4664 4664          int                     caller_found;
4665 4665          int                     error;
4666 4666          rnode_t                 *rp;
4667 4667          nfs_delmap_args_t       *dmapp;
4668 4668          nfs_delmapcall_t        *delmap_call;
4669 4669  
4670 4670          if (vp->v_flag & VNOMAP)
4671 4671                  return (ENOSYS);
4672 4672          /*
4673 4673           * A process may not change zones if it has NFS pages mmap'ed
4674 4674           * in, so we can't legitimately get here from the wrong zone.
4675 4675           */
4676 4676          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
4677 4677  
4678 4678          rp = VTOR(vp);
4679 4679  
4680 4680          /*
4681 4681           * The way that the address space of this process deletes its mapping
4682 4682           * of this file is via the following call chains:
4683 4683           * - as_free()->SEGOP_UNMAP()/segvn_unmap()->VOP_DELMAP()/nfs_delmap()
4684 4684           * - as_unmap()->SEGOP_UNMAP()/segvn_unmap()->VOP_DELMAP()/nfs_delmap()
4685 4685           *
4686 4686           * With the use of address space callbacks we are allowed to drop the
4687 4687           * address space lock, a_lock, while executing the NFS operations that
4688 4688           * need to go over the wire.  Returning EAGAIN to the caller of this
4689 4689           * function is what drives the execution of the callback that we add
4690 4690           * below.  The callback will be executed by the address space code
4691 4691           * after dropping the a_lock.  When the callback is finished, since
4692 4692           * we dropped the a_lock, it must be re-acquired and segvn_unmap()
4693 4693           * is called again on the same segment to finish the rest of the work
4694 4694           * that needs to happen during unmapping.
4695 4695           *
4696 4696           * This action of calling back into the segment driver causes
4697 4697           * nfs_delmap() to get called again, but since the callback was
4698 4698           * already executed at this point, it already did the work and there
4699 4699           * is nothing left for us to do.
4700 4700           *
4701 4701           * To Summarize:
4702 4702           * - The first time nfs_delmap is called by the current thread is when
4703 4703           * we add the caller associated with this delmap to the delmap caller
4704 4704           * list, add the callback, and return EAGAIN.
4705 4705           * - The second time in this call chain when nfs_delmap is called we
4706 4706           * will find this caller in the delmap caller list and realize there
4707 4707           * is no more work to do thus removing this caller from the list and
4708 4708           * returning the error that was set in the callback execution.
4709 4709           */
4710 4710          caller_found = nfs_find_and_delete_delmapcall(rp, &error);
4711 4711          if (caller_found) {
4712 4712                  /*
4713 4713                   * 'error' is from the actual delmap operations.  To avoid
4714 4714                   * hangs, we need to handle the return of EAGAIN differently
4715 4715                   * since this is what drives the callback execution.
4716 4716                   * In this case, we don't want to return EAGAIN and do the
4717 4717                   * callback execution because there are none to execute.
4718 4718                   */
4719 4719                  if (error == EAGAIN)
4720 4720                          return (0);
4721 4721                  else
4722 4722                          return (error);
4723 4723          }
4724 4724  
4725 4725          /* current caller was not in the list */
4726 4726          delmap_call = nfs_init_delmapcall();
4727 4727  
4728 4728          mutex_enter(&rp->r_statelock);
4729 4729          list_insert_tail(&rp->r_indelmap, delmap_call);
4730 4730          mutex_exit(&rp->r_statelock);
4731 4731  
4732 4732          dmapp = kmem_alloc(sizeof (nfs_delmap_args_t), KM_SLEEP);
4733 4733  
4734 4734          dmapp->vp = vp;
4735 4735          dmapp->off = off;
4736 4736          dmapp->addr = addr;
4737 4737          dmapp->len = len;
4738 4738          dmapp->prot = prot;
4739 4739          dmapp->maxprot = maxprot;
4740 4740          dmapp->flags = flags;
4741 4741          dmapp->cr = cr;
4742 4742          dmapp->caller = delmap_call;
4743 4743  
4744 4744          error = as_add_callback(as, nfs_delmap_callback, dmapp,
4745 4745              AS_UNMAP_EVENT, addr, len, KM_SLEEP);
4746 4746  
4747 4747          return (error ? error : EAGAIN);
4748 4748  }
4749 4749  
4750 4750  /*
4751 4751   * Remove some pages from an mmap'd vnode.  Just update the
4752 4752   * count of pages.  If doing close-to-open, then flush all
4753 4753   * of the pages associated with this file.  Otherwise, start
4754 4754   * an asynchronous page flush to write out any dirty pages.
4755 4755   * This will also associate a credential with the rnode which
4756 4756   * can be used to write the pages.
4757 4757   */
4758 4758  /* ARGSUSED */
4759 4759  static void
4760 4760  nfs_delmap_callback(struct as *as, void *arg, uint_t event)
4761 4761  {
4762 4762          int                     error;
4763 4763          rnode_t                 *rp;
4764 4764          mntinfo_t               *mi;
4765 4765          nfs_delmap_args_t       *dmapp = (nfs_delmap_args_t *)arg;
4766 4766  
4767 4767          rp = VTOR(dmapp->vp);
4768 4768          mi = VTOMI(dmapp->vp);
4769 4769  
4770 4770          atomic_add_long((ulong_t *)&rp->r_mapcnt, -btopr(dmapp->len));
4771 4771          ASSERT(rp->r_mapcnt >= 0);
4772 4772  
4773 4773          /*
4774 4774           * Initiate a page flush if there are pages, the file system
4775 4775           * was not mounted readonly, the segment was mapped shared, and
4776 4776           * the pages themselves were writeable.
4777 4777           */
4778 4778          if (vn_has_cached_data(dmapp->vp) && !vn_is_readonly(dmapp->vp) &&
4779 4779              dmapp->flags == MAP_SHARED && (dmapp->maxprot & PROT_WRITE)) {
4780 4780                  mutex_enter(&rp->r_statelock);
4781 4781                  rp->r_flags |= RDIRTY;
4782 4782                  mutex_exit(&rp->r_statelock);
4783 4783                  /*
4784 4784                   * If this is a cross-zone access a sync putpage won't work, so
4785 4785                   * the best we can do is try an async putpage.  That seems
4786 4786                   * better than something more draconian such as discarding the
4787 4787                   * dirty pages.
4788 4788                   */
4789 4789                  if ((mi->mi_flags & MI_NOCTO) ||
4790 4790                      nfs_zone() != mi->mi_zone)
4791 4791                          error = nfs_putpage(dmapp->vp, dmapp->off, dmapp->len,
4792 4792                              B_ASYNC, dmapp->cr, NULL);
4793 4793                  else
4794 4794                          error = nfs_putpage(dmapp->vp, dmapp->off, dmapp->len,
4795 4795                              0, dmapp->cr, NULL);
4796 4796                  if (!error) {
4797 4797                          mutex_enter(&rp->r_statelock);
4798 4798                          error = rp->r_error;
4799 4799                          rp->r_error = 0;
4800 4800                          mutex_exit(&rp->r_statelock);
4801 4801                  }
4802 4802          } else
4803 4803                  error = 0;
4804 4804  
4805 4805          if ((rp->r_flags & RDIRECTIO) || (mi->mi_flags & MI_DIRECTIO))
4806 4806                  (void) nfs_putpage(dmapp->vp, dmapp->off, dmapp->len,
4807 4807                      B_INVAL, dmapp->cr, NULL);
4808 4808  
4809 4809          dmapp->caller->error = error;
4810 4810          (void) as_delete_callback(as, arg);
4811 4811          kmem_free(dmapp, sizeof (nfs_delmap_args_t));
4812 4812  }
4813 4813  
4814 4814  /* ARGSUSED */
4815 4815  static int
4816 4816  nfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
4817 4817          caller_context_t *ct)
4818 4818  {
4819 4819          int error = 0;
4820 4820  
4821 4821          if (nfs_zone() != VTOMI(vp)->mi_zone)
4822 4822                  return (EIO);
4823 4823          /*
4824 4824           * This looks a little weird because it's written in a general
4825 4825           * manner but we make little use of cases.  If cntl() ever gets
4826 4826           * widely used, the outer switch will make more sense.
4827 4827           */
4828 4828  
4829 4829          switch (cmd) {
4830 4830  
4831 4831          /*
4832 4832           * Large file spec - need to base answer new query with
4833 4833           * hardcoded constant based on the protocol.
4834 4834           */
4835 4835          case _PC_FILESIZEBITS:
4836 4836                  *valp = 32;
4837 4837                  return (0);
4838 4838  
4839 4839          case _PC_LINK_MAX:
4840 4840          case _PC_NAME_MAX:
4841 4841          case _PC_PATH_MAX:
4842 4842          case _PC_SYMLINK_MAX:
4843 4843          case _PC_CHOWN_RESTRICTED:
4844 4844          case _PC_NO_TRUNC: {
4845 4845                  mntinfo_t *mi;
4846 4846                  struct pathcnf *pc;
4847 4847  
4848 4848                  if ((mi = VTOMI(vp)) == NULL || (pc = mi->mi_pathconf) == NULL)
4849 4849                          return (EINVAL);
4850 4850                  error = _PC_ISSET(cmd, pc->pc_mask);    /* error or bool */
4851 4851                  switch (cmd) {
4852 4852                  case _PC_LINK_MAX:
4853 4853                          *valp = pc->pc_link_max;
4854 4854                          break;
4855 4855                  case _PC_NAME_MAX:
4856 4856                          *valp = pc->pc_name_max;
4857 4857                          break;
4858 4858                  case _PC_PATH_MAX:
4859 4859                  case _PC_SYMLINK_MAX:
4860 4860                          *valp = pc->pc_path_max;
4861 4861                          break;
4862 4862                  case _PC_CHOWN_RESTRICTED:
4863 4863                          /*
4864 4864                           * if we got here, error is really a boolean which
4865 4865                           * indicates whether cmd is set or not.
4866 4866                           */
4867 4867                          *valp = error ? 1 : 0;  /* see above */
4868 4868                          error = 0;
4869 4869                          break;
4870 4870                  case _PC_NO_TRUNC:
4871 4871                          /*
4872 4872                           * if we got here, error is really a boolean which
4873 4873                           * indicates whether cmd is set or not.
4874 4874                           */
4875 4875                          *valp = error ? 1 : 0;  /* see above */
4876 4876                          error = 0;
4877 4877                          break;
4878 4878                  }
4879 4879                  return (error ? EINVAL : 0);
4880 4880                  }
4881 4881  
4882 4882          case _PC_XATTR_EXISTS:
4883 4883                  *valp = 0;
4884 4884                  if (vp->v_vfsp->vfs_flag & VFS_XATTR) {
4885 4885                          vnode_t *avp;
4886 4886                          rnode_t *rp;
4887 4887                          mntinfo_t *mi = VTOMI(vp);
4888 4888  
4889 4889                          if (!(mi->mi_flags & MI_EXTATTR))
4890 4890                                  return (0);
4891 4891  
4892 4892                          rp = VTOR(vp);
4893 4893                          if (nfs_rw_enter_sig(&rp->r_rwlock, RW_READER,
4894 4894                              INTR(vp)))
4895 4895                                  return (EINTR);
4896 4896  
4897 4897                          error = nfslookup_dnlc(vp, XATTR_DIR_NAME, &avp, cr);
4898 4898                          if (error || avp == NULL)
4899 4899                                  error = acl_getxattrdir2(vp, &avp, 0, cr, 0);
4900 4900  
4901 4901                          nfs_rw_exit(&rp->r_rwlock);
4902 4902  
4903 4903                          if (error == 0 && avp != NULL) {
4904 4904                                  error = do_xattr_exists_check(avp, valp, cr);
4905 4905                                  VN_RELE(avp);
4906 4906                          }
4907 4907                  }
4908 4908                  return (error ? EINVAL : 0);
4909 4909  
4910 4910          case _PC_ACL_ENABLED:
4911 4911                  *valp = _ACL_ACLENT_ENABLED;
4912 4912                  return (0);
4913 4913  
4914 4914          default:
4915 4915                  return (EINVAL);
4916 4916          }
4917 4917  }
4918 4918  
4919 4919  /*
4920 4920   * Called by async thread to do synchronous pageio. Do the i/o, wait
4921 4921   * for it to complete, and cleanup the page list when done.
4922 4922   */
4923 4923  static int
4924 4924  nfs_sync_pageio(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len,
4925 4925          int flags, cred_t *cr)
4926 4926  {
4927 4927          int error;
4928 4928  
4929 4929          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
4930 4930          error = nfs_rdwrlbn(vp, pp, io_off, io_len, flags, cr);
4931 4931          if (flags & B_READ)
4932 4932                  pvn_read_done(pp, (error ? B_ERROR : 0) | flags);
4933 4933          else
4934 4934                  pvn_write_done(pp, (error ? B_ERROR : 0) | flags);
4935 4935          return (error);
4936 4936  }
4937 4937  
4938 4938  /* ARGSUSED */
4939 4939  static int
4940 4940  nfs_pageio(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len,
4941 4941          int flags, cred_t *cr, caller_context_t *ct)
4942 4942  {
4943 4943          int error;
4944 4944          rnode_t *rp;
4945 4945  
4946 4946          if (pp == NULL)
4947 4947                  return (EINVAL);
4948 4948  
4949 4949          if (io_off > MAXOFF32_T)
4950 4950                  return (EFBIG);
4951 4951          if (nfs_zone() != VTOMI(vp)->mi_zone)
4952 4952                  return (EIO);
4953 4953          rp = VTOR(vp);
4954 4954          mutex_enter(&rp->r_statelock);
4955 4955          rp->r_count++;
4956 4956          mutex_exit(&rp->r_statelock);
4957 4957  
4958 4958          if (flags & B_ASYNC) {
4959 4959                  error = nfs_async_pageio(vp, pp, io_off, io_len, flags, cr,
4960 4960                      nfs_sync_pageio);
4961 4961          } else
4962 4962                  error = nfs_rdwrlbn(vp, pp, io_off, io_len, flags, cr);
4963 4963          mutex_enter(&rp->r_statelock);
4964 4964          rp->r_count--;
4965 4965          cv_broadcast(&rp->r_cv);
4966 4966          mutex_exit(&rp->r_statelock);
4967 4967          return (error);
4968 4968  }
4969 4969  
4970 4970  /* ARGSUSED */
4971 4971  static int
4972 4972  nfs_setsecattr(vnode_t *vp, vsecattr_t *vsecattr, int flag, cred_t *cr,
4973 4973          caller_context_t *ct)
4974 4974  {
4975 4975          int error;
4976 4976          mntinfo_t *mi;
4977 4977  
4978 4978          mi = VTOMI(vp);
4979 4979  
4980 4980          if (nfs_zone() != mi->mi_zone)
4981 4981                  return (EIO);
4982 4982          if (mi->mi_flags & MI_ACL) {
4983 4983                  error = acl_setacl2(vp, vsecattr, flag, cr);
4984 4984                  if (mi->mi_flags & MI_ACL)
4985 4985                          return (error);
4986 4986          }
4987 4987  
4988 4988          return (ENOSYS);
4989 4989  }
4990 4990  
4991 4991  /* ARGSUSED */
4992 4992  static int
4993 4993  nfs_getsecattr(vnode_t *vp, vsecattr_t *vsecattr, int flag, cred_t *cr,
4994 4994          caller_context_t *ct)
4995 4995  {
4996 4996          int error;
4997 4997          mntinfo_t *mi;
4998 4998  
4999 4999          mi = VTOMI(vp);
5000 5000  
5001 5001          if (nfs_zone() != mi->mi_zone)
5002 5002                  return (EIO);
5003 5003          if (mi->mi_flags & MI_ACL) {
5004 5004                  error = acl_getacl2(vp, vsecattr, flag, cr);
5005 5005                  if (mi->mi_flags & MI_ACL)
5006 5006                          return (error);
5007 5007          }
5008 5008  
5009 5009          return (fs_fab_acl(vp, vsecattr, flag, cr, ct));
5010 5010  }
5011 5011  
5012 5012  /* ARGSUSED */
5013 5013  static int
5014 5014  nfs_shrlock(vnode_t *vp, int cmd, struct shrlock *shr, int flag, cred_t *cr,
5015 5015          caller_context_t *ct)
5016 5016  {
5017 5017          int error;
5018 5018          struct shrlock nshr;
5019 5019          struct nfs_owner nfs_owner;
5020 5020          netobj lm_fh;
5021 5021  
5022 5022          if (nfs_zone() != VTOMI(vp)->mi_zone)
5023 5023                  return (EIO);
5024 5024  
5025 5025          /*
5026 5026           * check for valid cmd parameter
5027 5027           */
5028 5028          if (cmd != F_SHARE && cmd != F_UNSHARE && cmd != F_HASREMOTELOCKS)
5029 5029                  return (EINVAL);
5030 5030  
5031 5031          /*
5032 5032           * Check access permissions
5033 5033           */
5034 5034          if (cmd == F_SHARE &&
5035 5035              (((shr->s_access & F_RDACC) && !(flag & FREAD)) ||
5036 5036              ((shr->s_access & F_WRACC) && !(flag & FWRITE))))
5037 5037                  return (EBADF);
5038 5038  
5039 5039          /*
5040 5040           * If the filesystem is mounted using local locking, pass the
5041 5041           * request off to the local share code.
5042 5042           */
5043 5043          if (VTOMI(vp)->mi_flags & MI_LLOCK)
5044 5044                  return (fs_shrlock(vp, cmd, shr, flag, cr, ct));
5045 5045  
5046 5046          switch (cmd) {
5047 5047          case F_SHARE:
5048 5048          case F_UNSHARE:
5049 5049                  lm_fh.n_len = sizeof (fhandle_t);
5050 5050                  lm_fh.n_bytes = (char *)VTOFH(vp);
5051 5051  
5052 5052                  /*
5053 5053                   * If passed an owner that is too large to fit in an
5054 5054                   * nfs_owner it is likely a recursive call from the
5055 5055                   * lock manager client and pass it straight through.  If
5056 5056                   * it is not a nfs_owner then simply return an error.
5057 5057                   */
5058 5058                  if (shr->s_own_len > sizeof (nfs_owner.lowner)) {
5059 5059                          if (((struct nfs_owner *)shr->s_owner)->magic !=
5060 5060                              NFS_OWNER_MAGIC)
5061 5061                                  return (EINVAL);
5062 5062  
5063 5063                          if (error = lm_shrlock(vp, cmd, shr, flag, &lm_fh)) {
5064 5064                                  error = set_errno(error);
5065 5065                          }
5066 5066                          return (error);
5067 5067                  }
5068 5068                  /*
5069 5069                   * Remote share reservations owner is a combination of
5070 5070                   * a magic number, hostname, and the local owner
5071 5071                   */
5072 5072                  bzero(&nfs_owner, sizeof (nfs_owner));
5073 5073                  nfs_owner.magic = NFS_OWNER_MAGIC;
5074 5074                  (void) strncpy(nfs_owner.hname, uts_nodename(),
5075 5075                      sizeof (nfs_owner.hname));
5076 5076                  bcopy(shr->s_owner, nfs_owner.lowner, shr->s_own_len);
5077 5077                  nshr.s_access = shr->s_access;
5078 5078                  nshr.s_deny = shr->s_deny;
5079 5079                  nshr.s_sysid = 0;
5080 5080                  nshr.s_pid = ttoproc(curthread)->p_pid;
5081 5081                  nshr.s_own_len = sizeof (nfs_owner);
5082 5082                  nshr.s_owner = (caddr_t)&nfs_owner;
5083 5083  
5084 5084                  if (error = lm_shrlock(vp, cmd, &nshr, flag, &lm_fh)) {
5085 5085                          error = set_errno(error);
5086 5086                  }
5087 5087  
5088 5088                  break;
5089 5089  
5090 5090          case F_HASREMOTELOCKS:
5091 5091                  /*
5092 5092                   * NFS client can't store remote locks itself
5093 5093                   */
5094 5094                  shr->s_access = 0;
5095 5095                  error = 0;
5096 5096                  break;
5097 5097  
5098 5098          default:
5099 5099                  error = EINVAL;
5100 5100                  break;
5101 5101          }
5102 5102  
5103 5103          return (error);
5104 5104  }
  
    | 
      ↓ open down ↓ | 
    5104 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX