Print this page
    
OS-20 share_nfs(1m) charset handling is unreliable
OS-22 Page fault at nfscmd_dropped_entrysize+0x1e()
OS-23 NFSv2/3/4: READDIR responses are inconsistent when charset conversion fails
OS-24 rfs3_readdir(): Issues related to nfscmd_convdirent()
Reviewed by: Jan Kryl <jan.kryl@nexenta.com>
Reviewed by: Gordon Ross <gordon.ross@nexenta.com>
re #13613 rb4516 Tunables needs volatile keyword
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/common/fs/nfs/nfs_vnops.c
          +++ new/usr/src/uts/common/fs/nfs/nfs_vnops.c
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  
    | 
      ↓ open down ↓ | 
    12 lines elided | 
    
      ↑ open up ↑ | 
  
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
       23 + * Copyright 2013 Nexenta Systems, Inc.  All rights reserved.
  23   24   *
  24   25   *      Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T.
  25   26   *      All rights reserved.
  26   27   */
  27   28  
  28   29  /*
  29   30   * Copyright (c) 2013, Joyent, Inc. All rights reserved.
  30   31   * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
  31   32   */
  32   33  
  33   34  #include <sys/param.h>
  34   35  #include <sys/types.h>
  35   36  #include <sys/systm.h>
  36   37  #include <sys/cred.h>
  37   38  #include <sys/time.h>
  38   39  #include <sys/vnode.h>
  39   40  #include <sys/vfs.h>
  40   41  #include <sys/vfs_opreg.h>
  41   42  #include <sys/file.h>
  42   43  #include <sys/filio.h>
  43   44  #include <sys/uio.h>
  44   45  #include <sys/buf.h>
  45   46  #include <sys/mman.h>
  46   47  #include <sys/pathname.h>
  47   48  #include <sys/dirent.h>
  48   49  #include <sys/debug.h>
  49   50  #include <sys/vmsystm.h>
  50   51  #include <sys/fcntl.h>
  51   52  #include <sys/flock.h>
  52   53  #include <sys/swap.h>
  53   54  #include <sys/errno.h>
  54   55  #include <sys/strsubr.h>
  55   56  #include <sys/sysmacros.h>
  56   57  #include <sys/kmem.h>
  57   58  #include <sys/cmn_err.h>
  58   59  #include <sys/pathconf.h>
  59   60  #include <sys/utsname.h>
  60   61  #include <sys/dnlc.h>
  61   62  #include <sys/acl.h>
  62   63  #include <sys/atomic.h>
  63   64  #include <sys/policy.h>
  64   65  #include <sys/sdt.h>
  65   66  
  66   67  #include <rpc/types.h>
  67   68  #include <rpc/auth.h>
  68   69  #include <rpc/clnt.h>
  69   70  
  70   71  #include <nfs/nfs.h>
  71   72  #include <nfs/nfs_clnt.h>
  72   73  #include <nfs/rnode.h>
  73   74  #include <nfs/nfs_acl.h>
  74   75  #include <nfs/lm.h>
  75   76  
  76   77  #include <vm/hat.h>
  77   78  #include <vm/as.h>
  78   79  #include <vm/page.h>
  79   80  #include <vm/pvn.h>
  80   81  #include <vm/seg.h>
  81   82  #include <vm/seg_map.h>
  82   83  #include <vm/seg_kpm.h>
  83   84  #include <vm/seg_vn.h>
  84   85  
  85   86  #include <fs/fs_subr.h>
  86   87  
  87   88  #include <sys/ddi.h>
  88   89  
  89   90  static int      nfs_rdwrlbn(vnode_t *, page_t *, u_offset_t, size_t, int,
  90   91                          cred_t *);
  91   92  static int      nfswrite(vnode_t *, caddr_t, uint_t, int, cred_t *);
  92   93  static int      nfsread(vnode_t *, caddr_t, uint_t, int, size_t *, cred_t *);
  93   94  static int      nfssetattr(vnode_t *, struct vattr *, int, cred_t *);
  94   95  static int      nfslookup_dnlc(vnode_t *, char *, vnode_t **, cred_t *);
  95   96  static int      nfslookup_otw(vnode_t *, char *, vnode_t **, cred_t *, int);
  96   97  static int      nfsrename(vnode_t *, char *, vnode_t *, char *, cred_t *,
  97   98                          caller_context_t *);
  98   99  static int      nfsreaddir(vnode_t *, rddir_cache *, cred_t *);
  99  100  static int      nfs_bio(struct buf *, cred_t *);
 100  101  static int      nfs_getapage(vnode_t *, u_offset_t, size_t, uint_t *,
 101  102                          page_t *[], size_t, struct seg *, caddr_t,
 102  103                          enum seg_rw, cred_t *);
 103  104  static void     nfs_readahead(vnode_t *, u_offset_t, caddr_t, struct seg *,
 104  105                          cred_t *);
 105  106  static int      nfs_sync_putapage(vnode_t *, page_t *, u_offset_t, size_t,
 106  107                          int, cred_t *);
 107  108  static int      nfs_sync_pageio(vnode_t *, page_t *, u_offset_t, size_t,
 108  109                          int, cred_t *);
 109  110  static void     nfs_delmap_callback(struct as *, void *, uint_t);
 110  111  
 111  112  /*
 112  113   * Error flags used to pass information about certain special errors
 113  114   * which need to be handled specially.
 114  115   */
 115  116  #define NFS_EOF                 -98
 116  117  
 117  118  /*
 118  119   * These are the vnode ops routines which implement the vnode interface to
 119  120   * the networked file system.  These routines just take their parameters,
 120  121   * make them look networkish by putting the right info into interface structs,
 121  122   * and then calling the appropriate remote routine(s) to do the work.
 122  123   *
 123  124   * Note on directory name lookup cacheing:  If we detect a stale fhandle,
 124  125   * we purge the directory cache relative to that vnode.  This way, the
 125  126   * user won't get burned by the cache repeatedly.  See <nfs/rnode.h> for
 126  127   * more details on rnode locking.
 127  128   */
 128  129  
 129  130  static int      nfs_open(vnode_t **, int, cred_t *, caller_context_t *);
 130  131  static int      nfs_close(vnode_t *, int, int, offset_t, cred_t *,
 131  132                          caller_context_t *);
 132  133  static int      nfs_read(vnode_t *, struct uio *, int, cred_t *,
 133  134                          caller_context_t *);
 134  135  static int      nfs_write(vnode_t *, struct uio *, int, cred_t *,
 135  136                          caller_context_t *);
 136  137  static int      nfs_ioctl(vnode_t *, int, intptr_t, int, cred_t *, int *,
 137  138                          caller_context_t *);
 138  139  static int      nfs_getattr(vnode_t *, struct vattr *, int, cred_t *,
 139  140                          caller_context_t *);
 140  141  static int      nfs_setattr(vnode_t *, struct vattr *, int, cred_t *,
 141  142                          caller_context_t *);
 142  143  static int      nfs_access(vnode_t *, int, int, cred_t *, caller_context_t *);
 143  144  static int      nfs_accessx(void *, int, cred_t *);
 144  145  static int      nfs_readlink(vnode_t *, struct uio *, cred_t *,
 145  146                          caller_context_t *);
 146  147  static int      nfs_fsync(vnode_t *, int, cred_t *, caller_context_t *);
 147  148  static void     nfs_inactive(vnode_t *, cred_t *, caller_context_t *);
 148  149  static int      nfs_lookup(vnode_t *, char *, vnode_t **, struct pathname *,
 149  150                          int, vnode_t *, cred_t *, caller_context_t *,
 150  151                          int *, pathname_t *);
 151  152  static int      nfs_create(vnode_t *, char *, struct vattr *, enum vcexcl,
 152  153                          int, vnode_t **, cred_t *, int, caller_context_t *,
 153  154                          vsecattr_t *);
 154  155  static int      nfs_remove(vnode_t *, char *, cred_t *, caller_context_t *,
 155  156                          int);
 156  157  static int      nfs_link(vnode_t *, vnode_t *, char *, cred_t *,
 157  158                          caller_context_t *, int);
 158  159  static int      nfs_rename(vnode_t *, char *, vnode_t *, char *, cred_t *,
 159  160                          caller_context_t *, int);
 160  161  static int      nfs_mkdir(vnode_t *, char *, struct vattr *, vnode_t **,
 161  162                          cred_t *, caller_context_t *, int, vsecattr_t *);
 162  163  static int      nfs_rmdir(vnode_t *, char *, vnode_t *, cred_t *,
 163  164                          caller_context_t *, int);
 164  165  static int      nfs_symlink(vnode_t *, char *, struct vattr *, char *,
 165  166                          cred_t *, caller_context_t *, int);
 166  167  static int      nfs_readdir(vnode_t *, struct uio *, cred_t *, int *,
 167  168                          caller_context_t *, int);
 168  169  static int      nfs_fid(vnode_t *, fid_t *, caller_context_t *);
 169  170  static int      nfs_rwlock(vnode_t *, int, caller_context_t *);
 170  171  static void     nfs_rwunlock(vnode_t *, int, caller_context_t *);
 171  172  static int      nfs_seek(vnode_t *, offset_t, offset_t *, caller_context_t *);
 172  173  static int      nfs_getpage(vnode_t *, offset_t, size_t, uint_t *,
 173  174                          page_t *[], size_t, struct seg *, caddr_t,
 174  175                          enum seg_rw, cred_t *, caller_context_t *);
 175  176  static int      nfs_putpage(vnode_t *, offset_t, size_t, int, cred_t *,
 176  177                          caller_context_t *);
 177  178  static int      nfs_map(vnode_t *, offset_t, struct as *, caddr_t *, size_t,
 178  179                          uchar_t, uchar_t, uint_t, cred_t *, caller_context_t *);
 179  180  static int      nfs_addmap(vnode_t *, offset_t, struct as *, caddr_t, size_t,
 180  181                          uchar_t, uchar_t, uint_t, cred_t *, caller_context_t *);
 181  182  static int      nfs_frlock(vnode_t *, int, struct flock64 *, int, offset_t,
 182  183                          struct flk_callback *, cred_t *, caller_context_t *);
 183  184  static int      nfs_space(vnode_t *, int, struct flock64 *, int, offset_t,
 184  185                          cred_t *, caller_context_t *);
 185  186  static int      nfs_realvp(vnode_t *, vnode_t **, caller_context_t *);
 186  187  static int      nfs_delmap(vnode_t *, offset_t, struct as *, caddr_t, size_t,
 187  188                          uint_t, uint_t, uint_t, cred_t *, caller_context_t *);
 188  189  static int      nfs_pathconf(vnode_t *, int, ulong_t *, cred_t *,
 189  190                          caller_context_t *);
 190  191  static int      nfs_pageio(vnode_t *, page_t *, u_offset_t, size_t, int,
 191  192                          cred_t *, caller_context_t *);
 192  193  static int      nfs_setsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
 193  194                          caller_context_t *);
 194  195  static int      nfs_getsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
 195  196                          caller_context_t *);
 196  197  static int      nfs_shrlock(vnode_t *, int, struct shrlock *, int, cred_t *,
 197  198                          caller_context_t *);
 198  199  
 199  200  struct vnodeops *nfs_vnodeops;
 200  201  
 201  202  const fs_operation_def_t nfs_vnodeops_template[] = {
 202  203          VOPNAME_OPEN,           { .vop_open = nfs_open },
 203  204          VOPNAME_CLOSE,          { .vop_close = nfs_close },
 204  205          VOPNAME_READ,           { .vop_read = nfs_read },
 205  206          VOPNAME_WRITE,          { .vop_write = nfs_write },
 206  207          VOPNAME_IOCTL,          { .vop_ioctl = nfs_ioctl },
 207  208          VOPNAME_GETATTR,        { .vop_getattr = nfs_getattr },
 208  209          VOPNAME_SETATTR,        { .vop_setattr = nfs_setattr },
 209  210          VOPNAME_ACCESS,         { .vop_access = nfs_access },
 210  211          VOPNAME_LOOKUP,         { .vop_lookup = nfs_lookup },
 211  212          VOPNAME_CREATE,         { .vop_create = nfs_create },
 212  213          VOPNAME_REMOVE,         { .vop_remove = nfs_remove },
 213  214          VOPNAME_LINK,           { .vop_link = nfs_link },
 214  215          VOPNAME_RENAME,         { .vop_rename = nfs_rename },
 215  216          VOPNAME_MKDIR,          { .vop_mkdir = nfs_mkdir },
 216  217          VOPNAME_RMDIR,          { .vop_rmdir = nfs_rmdir },
 217  218          VOPNAME_READDIR,        { .vop_readdir = nfs_readdir },
 218  219          VOPNAME_SYMLINK,        { .vop_symlink = nfs_symlink },
 219  220          VOPNAME_READLINK,       { .vop_readlink = nfs_readlink },
 220  221          VOPNAME_FSYNC,          { .vop_fsync = nfs_fsync },
 221  222          VOPNAME_INACTIVE,       { .vop_inactive = nfs_inactive },
 222  223          VOPNAME_FID,            { .vop_fid = nfs_fid },
 223  224          VOPNAME_RWLOCK,         { .vop_rwlock = nfs_rwlock },
 224  225          VOPNAME_RWUNLOCK,       { .vop_rwunlock = nfs_rwunlock },
 225  226          VOPNAME_SEEK,           { .vop_seek = nfs_seek },
 226  227          VOPNAME_FRLOCK,         { .vop_frlock = nfs_frlock },
 227  228          VOPNAME_SPACE,          { .vop_space = nfs_space },
 228  229          VOPNAME_REALVP,         { .vop_realvp = nfs_realvp },
 229  230          VOPNAME_GETPAGE,        { .vop_getpage = nfs_getpage },
 230  231          VOPNAME_PUTPAGE,        { .vop_putpage = nfs_putpage },
 231  232          VOPNAME_MAP,            { .vop_map = nfs_map },
 232  233          VOPNAME_ADDMAP,         { .vop_addmap = nfs_addmap },
 233  234          VOPNAME_DELMAP,         { .vop_delmap = nfs_delmap },
 234  235          VOPNAME_DUMP,           { .vop_dump = nfs_dump },
 235  236          VOPNAME_PATHCONF,       { .vop_pathconf = nfs_pathconf },
 236  237          VOPNAME_PAGEIO,         { .vop_pageio = nfs_pageio },
 237  238          VOPNAME_SETSECATTR,     { .vop_setsecattr = nfs_setsecattr },
 238  239          VOPNAME_GETSECATTR,     { .vop_getsecattr = nfs_getsecattr },
 239  240          VOPNAME_SHRLOCK,        { .vop_shrlock = nfs_shrlock },
 240  241          VOPNAME_VNEVENT,        { .vop_vnevent = fs_vnevent_support },
 241  242          NULL,                   NULL
 242  243  };
 243  244  
 244  245  /*
 245  246   * XXX:  This is referenced in modstubs.s
 246  247   */
 247  248  struct vnodeops *
 248  249  nfs_getvnodeops(void)
 249  250  {
 250  251          return (nfs_vnodeops);
 251  252  }
 252  253  
 253  254  /* ARGSUSED */
 254  255  static int
 255  256  nfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
 256  257  {
 257  258          int error;
 258  259          struct vattr va;
 259  260          rnode_t *rp;
 260  261          vnode_t *vp;
 261  262  
 262  263          vp = *vpp;
 263  264          rp = VTOR(vp);
 264  265          if (nfs_zone() != VTOMI(vp)->mi_zone)
 265  266                  return (EIO);
 266  267          mutex_enter(&rp->r_statelock);
 267  268          if (rp->r_cred == NULL) {
 268  269                  crhold(cr);
 269  270                  rp->r_cred = cr;
 270  271          }
 271  272          mutex_exit(&rp->r_statelock);
 272  273  
 273  274          /*
 274  275           * If there is no cached data or if close-to-open
 275  276           * consistency checking is turned off, we can avoid
 276  277           * the over the wire getattr.  Otherwise, if the
 277  278           * file system is mounted readonly, then just verify
 278  279           * the caches are up to date using the normal mechanism.
 279  280           * Else, if the file is not mmap'd, then just mark
 280  281           * the attributes as timed out.  They will be refreshed
 281  282           * and the caches validated prior to being used.
 282  283           * Else, the file system is mounted writeable so
 283  284           * force an over the wire GETATTR in order to ensure
 284  285           * that all cached data is valid.
 285  286           */
 286  287          if (vp->v_count > 1 ||
 287  288              ((vn_has_cached_data(vp) || HAVE_RDDIR_CACHE(rp)) &&
 288  289              !(VTOMI(vp)->mi_flags & MI_NOCTO))) {
 289  290                  if (vn_is_readonly(vp))
 290  291                          error = nfs_validate_caches(vp, cr);
 291  292                  else if (rp->r_mapcnt == 0 && vp->v_count == 1) {
 292  293                          PURGE_ATTRCACHE(vp);
 293  294                          error = 0;
 294  295                  } else {
 295  296                          va.va_mask = AT_ALL;
 296  297                          error = nfs_getattr_otw(vp, &va, cr);
 297  298                  }
 298  299          } else
 299  300                  error = 0;
 300  301  
 301  302          return (error);
 302  303  }
 303  304  
 304  305  /* ARGSUSED */
 305  306  static int
 306  307  nfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
 307  308          caller_context_t *ct)
 308  309  {
 309  310          rnode_t *rp;
 310  311          int error;
 311  312          struct vattr va;
 312  313  
 313  314          /*
 314  315           * zone_enter(2) prevents processes from changing zones with NFS files
 315  316           * open; if we happen to get here from the wrong zone we can't do
 316  317           * anything over the wire.
 317  318           */
 318  319          if (VTOMI(vp)->mi_zone != nfs_zone()) {
 319  320                  /*
 320  321                   * We could attempt to clean up locks, except we're sure
 321  322                   * that the current process didn't acquire any locks on
 322  323                   * the file: any attempt to lock a file belong to another zone
 323  324                   * will fail, and one can't lock an NFS file and then change
 324  325                   * zones, as that fails too.
 325  326                   *
 326  327                   * Returning an error here is the sane thing to do.  A
 327  328                   * subsequent call to VN_RELE() which translates to a
 328  329                   * nfs_inactive() will clean up state: if the zone of the
 329  330                   * vnode's origin is still alive and kicking, an async worker
 330  331                   * thread will handle the request (from the correct zone), and
 331  332                   * everything (minus the final nfs_getattr_otw() call) should
 332  333                   * be OK. If the zone is going away nfs_async_inactive() will
 333  334                   * throw away cached pages inline.
 334  335                   */
 335  336                  return (EIO);
 336  337          }
 337  338  
 338  339          /*
 339  340           * If we are using local locking for this filesystem, then
 340  341           * release all of the SYSV style record locks.  Otherwise,
 341  342           * we are doing network locking and we need to release all
 342  343           * of the network locks.  All of the locks held by this
 343  344           * process on this file are released no matter what the
 344  345           * incoming reference count is.
 345  346           */
 346  347          if (VTOMI(vp)->mi_flags & MI_LLOCK) {
 347  348                  cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
 348  349                  cleanshares(vp, ttoproc(curthread)->p_pid);
 349  350          } else
 350  351                  nfs_lockrelease(vp, flag, offset, cr);
 351  352  
 352  353          if (count > 1)
 353  354                  return (0);
 354  355  
 355  356          /*
 356  357           * If the file has been `unlinked', then purge the
 357  358           * DNLC so that this vnode will get reycled quicker
 358  359           * and the .nfs* file on the server will get removed.
 359  360           */
 360  361          rp = VTOR(vp);
 361  362          if (rp->r_unldvp != NULL)
 362  363                  dnlc_purge_vp(vp);
 363  364  
 364  365          /*
 365  366           * If the file was open for write and there are pages,
 366  367           * then if the file system was mounted using the "no-close-
 367  368           *      to-open" semantics, then start an asynchronous flush
 368  369           *      of the all of the pages in the file.
 369  370           * else the file system was not mounted using the "no-close-
 370  371           *      to-open" semantics, then do a synchronous flush and
 371  372           *      commit of all of the dirty and uncommitted pages.
 372  373           *
 373  374           * The asynchronous flush of the pages in the "nocto" path
 374  375           * mostly just associates a cred pointer with the rnode so
 375  376           * writes which happen later will have a better chance of
 376  377           * working.  It also starts the data being written to the
 377  378           * server, but without unnecessarily delaying the application.
 378  379           */
 379  380          if ((flag & FWRITE) && vn_has_cached_data(vp)) {
 380  381                  if ((VTOMI(vp)->mi_flags & MI_NOCTO)) {
 381  382                          error = nfs_putpage(vp, (offset_t)0, 0, B_ASYNC,
 382  383                              cr, ct);
 383  384                          if (error == EAGAIN)
 384  385                                  error = 0;
 385  386                  } else
 386  387                          error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
 387  388                  if (!error) {
 388  389                          mutex_enter(&rp->r_statelock);
 389  390                          error = rp->r_error;
 390  391                          rp->r_error = 0;
 391  392                          mutex_exit(&rp->r_statelock);
 392  393                  }
 393  394          } else {
 394  395                  mutex_enter(&rp->r_statelock);
 395  396                  error = rp->r_error;
 396  397                  rp->r_error = 0;
 397  398                  mutex_exit(&rp->r_statelock);
 398  399          }
 399  400  
 400  401          /*
 401  402           * If RWRITEATTR is set, then issue an over the wire GETATTR to
 402  403           * refresh the attribute cache with a set of attributes which
 403  404           * weren't returned from a WRITE.  This will enable the close-
 404  405           * to-open processing to work.
 405  406           */
 406  407          if (rp->r_flags & RWRITEATTR)
 407  408                  (void) nfs_getattr_otw(vp, &va, cr);
 408  409  
 409  410          return (error);
 410  411  }
 411  412  
 412  413  /* ARGSUSED */
 413  414  static int
 414  415  nfs_read(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr,
 415  416          caller_context_t *ct)
 416  417  {
 417  418          rnode_t *rp;
 418  419          u_offset_t off;
 419  420          offset_t diff;
 420  421          int on;
 421  422          size_t n;
 422  423          caddr_t base;
 423  424          uint_t flags;
 424  425          int error;
 425  426          mntinfo_t *mi;
 426  427  
 427  428          rp = VTOR(vp);
 428  429          mi = VTOMI(vp);
 429  430  
 430  431          if (nfs_zone() != mi->mi_zone)
 431  432                  return (EIO);
 432  433  
 433  434          ASSERT(nfs_rw_lock_held(&rp->r_rwlock, RW_READER));
 434  435  
 435  436          if (vp->v_type != VREG)
 436  437                  return (EISDIR);
 437  438  
 438  439          if (uiop->uio_resid == 0)
 439  440                  return (0);
 440  441  
 441  442          if (uiop->uio_loffset > MAXOFF32_T)
 442  443                  return (EFBIG);
 443  444  
 444  445          if (uiop->uio_loffset < 0 ||
 445  446              uiop->uio_loffset + uiop->uio_resid > MAXOFF32_T)
 446  447                  return (EINVAL);
 447  448  
 448  449          /*
 449  450           * Bypass VM if caching has been disabled (e.g., locking) or if
 450  451           * using client-side direct I/O and the file is not mmap'd and
 451  452           * there are no cached pages.
 452  453           */
 453  454          if ((vp->v_flag & VNOCACHE) ||
 454  455              (((rp->r_flags & RDIRECTIO) || (mi->mi_flags & MI_DIRECTIO)) &&
 455  456              rp->r_mapcnt == 0 && rp->r_inmap == 0 &&
 456  457              !vn_has_cached_data(vp))) {
 457  458                  size_t bufsize;
 458  459                  size_t resid = 0;
 459  460  
 460  461                  /*
 461  462                   * Let's try to do read in as large a chunk as we can
 462  463                   * (Filesystem (NFS client) bsize if possible/needed).
 463  464                   * For V3, this is 32K and for V2, this is 8K.
 464  465                   */
 465  466                  bufsize = MIN(uiop->uio_resid, VTOMI(vp)->mi_curread);
 466  467                  base = kmem_alloc(bufsize, KM_SLEEP);
 467  468                  do {
 468  469                          n = MIN(uiop->uio_resid, bufsize);
 469  470                          error = nfsread(vp, base, uiop->uio_offset, n,
 470  471                              &resid, cr);
 471  472                          if (!error) {
 472  473                                  n -= resid;
 473  474                                  error = uiomove(base, n, UIO_READ, uiop);
 474  475                          }
 475  476                  } while (!error && uiop->uio_resid > 0 && n > 0);
 476  477                  kmem_free(base, bufsize);
 477  478                  return (error);
 478  479          }
 479  480  
 480  481          error = 0;
 481  482  
 482  483          do {
 483  484                  off = uiop->uio_loffset & MAXBMASK; /* mapping offset */
 484  485                  on = uiop->uio_loffset & MAXBOFFSET; /* Relative offset */
 485  486                  n = MIN(MAXBSIZE - on, uiop->uio_resid);
 486  487  
 487  488                  error = nfs_validate_caches(vp, cr);
 488  489                  if (error)
 489  490                          break;
 490  491  
 491  492                  mutex_enter(&rp->r_statelock);
 492  493                  while (rp->r_flags & RINCACHEPURGE) {
 493  494                          if (!cv_wait_sig(&rp->r_cv, &rp->r_statelock)) {
 494  495                                  mutex_exit(&rp->r_statelock);
 495  496                                  return (EINTR);
 496  497                          }
 497  498                  }
 498  499                  diff = rp->r_size - uiop->uio_loffset;
 499  500                  mutex_exit(&rp->r_statelock);
 500  501                  if (diff <= 0)
 501  502                          break;
 502  503                  if (diff < n)
 503  504                          n = (size_t)diff;
 504  505  
 505  506                  if (vpm_enable) {
 506  507                          /*
 507  508                           * Copy data.
 508  509                           */
 509  510                          error = vpm_data_copy(vp, off + on, n, uiop,
 510  511                              1, NULL, 0, S_READ);
 511  512                  } else {
 512  513                          base = segmap_getmapflt(segkmap, vp, off + on, n,
 513  514                              1, S_READ);
 514  515                          error = uiomove(base + on, n, UIO_READ, uiop);
 515  516                  }
 516  517  
 517  518                  if (!error) {
 518  519                          /*
 519  520                           * If read a whole block or read to eof,
 520  521                           * won't need this buffer again soon.
 521  522                           */
 522  523                          mutex_enter(&rp->r_statelock);
 523  524                          if (n + on == MAXBSIZE ||
 524  525                              uiop->uio_loffset == rp->r_size)
 525  526                                  flags = SM_DONTNEED;
 526  527                          else
 527  528                                  flags = 0;
 528  529                          mutex_exit(&rp->r_statelock);
 529  530                          if (vpm_enable) {
 530  531                                  error = vpm_sync_pages(vp, off, n, flags);
 531  532                          } else {
 532  533                                  error = segmap_release(segkmap, base, flags);
 533  534                          }
 534  535                  } else {
 535  536                          if (vpm_enable) {
 536  537                                  (void) vpm_sync_pages(vp, off, n, 0);
 537  538                          } else {
 538  539                                  (void) segmap_release(segkmap, base, 0);
 539  540                          }
 540  541                  }
 541  542          } while (!error && uiop->uio_resid > 0);
 542  543  
 543  544          return (error);
 544  545  }
 545  546  
 546  547  /* ARGSUSED */
 547  548  static int
 548  549  nfs_write(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr,
 549  550          caller_context_t *ct)
 550  551  {
 551  552          rnode_t *rp;
 552  553          u_offset_t off;
 553  554          caddr_t base;
 554  555          uint_t flags;
 555  556          int remainder;
 556  557          size_t n;
 557  558          int on;
 558  559          int error;
 559  560          int resid;
 560  561          offset_t offset;
 561  562          rlim_t limit;
 562  563          mntinfo_t *mi;
 563  564  
 564  565          rp = VTOR(vp);
 565  566  
 566  567          mi = VTOMI(vp);
 567  568          if (nfs_zone() != mi->mi_zone)
 568  569                  return (EIO);
 569  570          if (vp->v_type != VREG)
 570  571                  return (EISDIR);
 571  572  
 572  573          if (uiop->uio_resid == 0)
 573  574                  return (0);
 574  575  
 575  576          if (ioflag & FAPPEND) {
 576  577                  struct vattr va;
 577  578  
 578  579                  /*
 579  580                   * Must serialize if appending.
 580  581                   */
 581  582                  if (nfs_rw_lock_held(&rp->r_rwlock, RW_READER)) {
 582  583                          nfs_rw_exit(&rp->r_rwlock);
 583  584                          if (nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER,
 584  585                              INTR(vp)))
 585  586                                  return (EINTR);
 586  587                  }
 587  588  
 588  589                  va.va_mask = AT_SIZE;
 589  590                  error = nfsgetattr(vp, &va, cr);
 590  591                  if (error)
 591  592                          return (error);
 592  593                  uiop->uio_loffset = va.va_size;
 593  594          }
 594  595  
 595  596          if (uiop->uio_loffset > MAXOFF32_T)
 596  597                  return (EFBIG);
 597  598  
 598  599          offset = uiop->uio_loffset + uiop->uio_resid;
 599  600  
 600  601          if (uiop->uio_loffset < 0 || offset > MAXOFF32_T)
 601  602                  return (EINVAL);
 602  603  
 603  604          if (uiop->uio_llimit > (rlim64_t)MAXOFF32_T) {
 604  605                  limit = MAXOFF32_T;
 605  606          } else {
 606  607                  limit = (rlim_t)uiop->uio_llimit;
 607  608          }
 608  609  
 609  610          /*
 610  611           * Check to make sure that the process will not exceed
 611  612           * its limit on file size.  It is okay to write up to
 612  613           * the limit, but not beyond.  Thus, the write which
 613  614           * reaches the limit will be short and the next write
 614  615           * will return an error.
 615  616           */
 616  617          remainder = 0;
 617  618          if (offset > limit) {
 618  619                  remainder = offset - limit;
 619  620                  uiop->uio_resid = limit - uiop->uio_offset;
 620  621                  if (uiop->uio_resid <= 0) {
 621  622                          proc_t *p = ttoproc(curthread);
 622  623  
 623  624                          uiop->uio_resid += remainder;
 624  625                          mutex_enter(&p->p_lock);
 625  626                          (void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE],
 626  627                              p->p_rctls, p, RCA_UNSAFE_SIGINFO);
 627  628                          mutex_exit(&p->p_lock);
 628  629                          return (EFBIG);
 629  630                  }
 630  631          }
 631  632  
 632  633          if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_READER, INTR(vp)))
 633  634                  return (EINTR);
 634  635  
 635  636          /*
 636  637           * Bypass VM if caching has been disabled (e.g., locking) or if
 637  638           * using client-side direct I/O and the file is not mmap'd and
 638  639           * there are no cached pages.
 639  640           */
 640  641          if ((vp->v_flag & VNOCACHE) ||
 641  642              (((rp->r_flags & RDIRECTIO) || (mi->mi_flags & MI_DIRECTIO)) &&
 642  643              rp->r_mapcnt == 0 && rp->r_inmap == 0 &&
 643  644              !vn_has_cached_data(vp))) {
 644  645                  size_t bufsize;
 645  646                  int count;
 646  647                  uint_t org_offset;
 647  648  
 648  649  nfs_fwrite:
 649  650                  if (rp->r_flags & RSTALE) {
 650  651                          resid = uiop->uio_resid;
 651  652                          offset = uiop->uio_loffset;
 652  653                          error = rp->r_error;
 653  654                          /*
 654  655                           * A close may have cleared r_error, if so,
 655  656                           * propagate ESTALE error return properly
 656  657                           */
 657  658                          if (error == 0)
 658  659                                  error = ESTALE;
 659  660                          goto bottom;
 660  661                  }
 661  662                  bufsize = MIN(uiop->uio_resid, mi->mi_curwrite);
 662  663                  base = kmem_alloc(bufsize, KM_SLEEP);
 663  664                  do {
 664  665                          resid = uiop->uio_resid;
 665  666                          offset = uiop->uio_loffset;
 666  667                          count = MIN(uiop->uio_resid, bufsize);
 667  668                          org_offset = uiop->uio_offset;
 668  669                          error = uiomove(base, count, UIO_WRITE, uiop);
 669  670                          if (!error) {
 670  671                                  error = nfswrite(vp, base, org_offset,
 671  672                                      count, cr);
 672  673                          }
 673  674                  } while (!error && uiop->uio_resid > 0);
 674  675                  kmem_free(base, bufsize);
 675  676                  goto bottom;
 676  677          }
 677  678  
 678  679          do {
 679  680                  off = uiop->uio_loffset & MAXBMASK; /* mapping offset */
 680  681                  on = uiop->uio_loffset & MAXBOFFSET; /* Relative offset */
 681  682                  n = MIN(MAXBSIZE - on, uiop->uio_resid);
 682  683  
 683  684                  resid = uiop->uio_resid;
 684  685                  offset = uiop->uio_loffset;
 685  686  
 686  687                  if (rp->r_flags & RSTALE) {
 687  688                          error = rp->r_error;
 688  689                          /*
 689  690                           * A close may have cleared r_error, if so,
 690  691                           * propagate ESTALE error return properly
 691  692                           */
 692  693                          if (error == 0)
 693  694                                  error = ESTALE;
 694  695                          break;
 695  696                  }
 696  697  
 697  698                  /*
 698  699                   * Don't create dirty pages faster than they
 699  700                   * can be cleaned so that the system doesn't
 700  701                   * get imbalanced.  If the async queue is
 701  702                   * maxed out, then wait for it to drain before
 702  703                   * creating more dirty pages.  Also, wait for
 703  704                   * any threads doing pagewalks in the vop_getattr
 704  705                   * entry points so that they don't block for
 705  706                   * long periods.
 706  707                   */
 707  708                  mutex_enter(&rp->r_statelock);
 708  709                  while ((mi->mi_max_threads != 0 &&
 709  710                      rp->r_awcount > 2 * mi->mi_max_threads) ||
 710  711                      rp->r_gcount > 0) {
 711  712                          if (INTR(vp)) {
 712  713                                  klwp_t *lwp = ttolwp(curthread);
 713  714  
 714  715                                  if (lwp != NULL)
 715  716                                          lwp->lwp_nostop++;
 716  717                                  if (!cv_wait_sig(&rp->r_cv, &rp->r_statelock)) {
 717  718                                          mutex_exit(&rp->r_statelock);
 718  719                                          if (lwp != NULL)
 719  720                                                  lwp->lwp_nostop--;
 720  721                                          error = EINTR;
 721  722                                          goto bottom;
 722  723                                  }
 723  724                                  if (lwp != NULL)
 724  725                                          lwp->lwp_nostop--;
 725  726                          } else
 726  727                                  cv_wait(&rp->r_cv, &rp->r_statelock);
 727  728                  }
 728  729                  mutex_exit(&rp->r_statelock);
 729  730  
 730  731                  /*
 731  732                   * Touch the page and fault it in if it is not in core
 732  733                   * before segmap_getmapflt or vpm_data_copy can lock it.
 733  734                   * This is to avoid the deadlock if the buffer is mapped
 734  735                   * to the same file through mmap which we want to write.
 735  736                   */
 736  737                  uio_prefaultpages((long)n, uiop);
 737  738  
 738  739                  if (vpm_enable) {
 739  740                          /*
 740  741                           * It will use kpm mappings, so no need to
 741  742                           * pass an address.
 742  743                           */
 743  744                          error = writerp(rp, NULL, n, uiop, 0);
 744  745                  } else  {
 745  746                          if (segmap_kpm) {
 746  747                                  int pon = uiop->uio_loffset & PAGEOFFSET;
 747  748                                  size_t pn = MIN(PAGESIZE - pon,
 748  749                                      uiop->uio_resid);
 749  750                                  int pagecreate;
 750  751  
 751  752                                  mutex_enter(&rp->r_statelock);
 752  753                                  pagecreate = (pon == 0) && (pn == PAGESIZE ||
 753  754                                      uiop->uio_loffset + pn >= rp->r_size);
 754  755                                  mutex_exit(&rp->r_statelock);
 755  756  
 756  757                                  base = segmap_getmapflt(segkmap, vp, off + on,
 757  758                                      pn, !pagecreate, S_WRITE);
 758  759  
 759  760                                  error = writerp(rp, base + pon, n, uiop,
 760  761                                      pagecreate);
 761  762  
 762  763                          } else {
 763  764                                  base = segmap_getmapflt(segkmap, vp, off + on,
 764  765                                      n, 0, S_READ);
 765  766                                  error = writerp(rp, base + on, n, uiop, 0);
 766  767                          }
 767  768                  }
 768  769  
 769  770                  if (!error) {
 770  771                          if (mi->mi_flags & MI_NOAC)
 771  772                                  flags = SM_WRITE;
 772  773                          else if (n + on == MAXBSIZE || IS_SWAPVP(vp)) {
 773  774                                  /*
 774  775                                   * Have written a whole block.
 775  776                                   * Start an asynchronous write
 776  777                                   * and mark the buffer to
 777  778                                   * indicate that it won't be
 778  779                                   * needed again soon.
 779  780                                   */
 780  781                                  flags = SM_WRITE | SM_ASYNC | SM_DONTNEED;
 781  782                          } else
 782  783                                  flags = 0;
 783  784                          if ((ioflag & (FSYNC|FDSYNC)) ||
 784  785                              (rp->r_flags & ROUTOFSPACE)) {
 785  786                                  flags &= ~SM_ASYNC;
 786  787                                  flags |= SM_WRITE;
 787  788                          }
 788  789                          if (vpm_enable) {
 789  790                                  error = vpm_sync_pages(vp, off, n, flags);
 790  791                          } else {
 791  792                                  error = segmap_release(segkmap, base, flags);
 792  793                          }
 793  794                  } else {
 794  795                          if (vpm_enable) {
 795  796                                  (void) vpm_sync_pages(vp, off, n, 0);
 796  797                          } else {
 797  798                                  (void) segmap_release(segkmap, base, 0);
 798  799                          }
 799  800                          /*
 800  801                           * In the event that we got an access error while
 801  802                           * faulting in a page for a write-only file just
 802  803                           * force a write.
 803  804                           */
 804  805                          if (error == EACCES)
 805  806                                  goto nfs_fwrite;
 806  807                  }
 807  808          } while (!error && uiop->uio_resid > 0);
 808  809  
 809  810  bottom:
 810  811          if (error) {
 811  812                  uiop->uio_resid = resid + remainder;
 812  813                  uiop->uio_loffset = offset;
 813  814          } else
 814  815                  uiop->uio_resid += remainder;
 815  816  
 816  817          nfs_rw_exit(&rp->r_lkserlock);
 817  818  
 818  819          return (error);
 819  820  }
 820  821  
 821  822  /*
 822  823   * Flags are composed of {B_ASYNC, B_INVAL, B_FREE, B_DONTNEED}
 823  824   */
 824  825  static int
 825  826  nfs_rdwrlbn(vnode_t *vp, page_t *pp, u_offset_t off, size_t len,
 826  827          int flags, cred_t *cr)
 827  828  {
 828  829          struct buf *bp;
 829  830          int error;
 830  831  
 831  832          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
 832  833          bp = pageio_setup(pp, len, vp, flags);
 833  834          ASSERT(bp != NULL);
 834  835  
 835  836          /*
 836  837           * pageio_setup should have set b_addr to 0.  This
 837  838           * is correct since we want to do I/O on a page
 838  839           * boundary.  bp_mapin will use this addr to calculate
 839  840           * an offset, and then set b_addr to the kernel virtual
 840  841           * address it allocated for us.
 841  842           */
 842  843          ASSERT(bp->b_un.b_addr == 0);
 843  844  
 844  845          bp->b_edev = 0;
 845  846          bp->b_dev = 0;
 846  847          bp->b_lblkno = lbtodb(off);
 847  848          bp->b_file = vp;
 848  849          bp->b_offset = (offset_t)off;
 849  850          bp_mapin(bp);
 850  851  
 851  852          error = nfs_bio(bp, cr);
 852  853  
 853  854          bp_mapout(bp);
 854  855          pageio_done(bp);
 855  856  
 856  857          return (error);
 857  858  }
 858  859  
 859  860  /*
 860  861   * Write to file.  Writes to remote server in largest size
 861  862   * chunks that the server can handle.  Write is synchronous.
 862  863   */
 863  864  static int
 864  865  nfswrite(vnode_t *vp, caddr_t base, uint_t offset, int count, cred_t *cr)
 865  866  {
 866  867          rnode_t *rp;
 867  868          mntinfo_t *mi;
 868  869          struct nfswriteargs wa;
 869  870          struct nfsattrstat ns;
 870  871          int error;
 871  872          int tsize;
 872  873          int douprintf;
 873  874  
 874  875          douprintf = 1;
 875  876  
 876  877          rp = VTOR(vp);
 877  878          mi = VTOMI(vp);
 878  879  
 879  880          ASSERT(nfs_zone() == mi->mi_zone);
 880  881  
 881  882          wa.wa_args = &wa.wa_args_buf;
 882  883          wa.wa_fhandle = *VTOFH(vp);
 883  884  
 884  885          do {
 885  886                  tsize = MIN(mi->mi_curwrite, count);
 886  887                  wa.wa_data = base;
 887  888                  wa.wa_begoff = offset;
 888  889                  wa.wa_totcount = tsize;
 889  890                  wa.wa_count = tsize;
 890  891                  wa.wa_offset = offset;
 891  892  
 892  893                  if (mi->mi_io_kstats) {
 893  894                          mutex_enter(&mi->mi_lock);
 894  895                          kstat_runq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
 895  896                          mutex_exit(&mi->mi_lock);
 896  897                  }
 897  898                  wa.wa_mblk = NULL;
 898  899                  do {
 899  900                          error = rfs2call(mi, RFS_WRITE,
 900  901                              xdr_writeargs, (caddr_t)&wa,
 901  902                              xdr_attrstat, (caddr_t)&ns, cr,
 902  903                              &douprintf, &ns.ns_status, 0, NULL);
 903  904                  } while (error == ENFS_TRYAGAIN);
 904  905                  if (mi->mi_io_kstats) {
 905  906                          mutex_enter(&mi->mi_lock);
 906  907                          kstat_runq_exit(KSTAT_IO_PTR(mi->mi_io_kstats));
 907  908                          mutex_exit(&mi->mi_lock);
 908  909                  }
 909  910  
 910  911                  if (!error) {
 911  912                          error = geterrno(ns.ns_status);
 912  913                          /*
 913  914                           * Can't check for stale fhandle and purge caches
 914  915                           * here because pages are held by nfs_getpage.
 915  916                           * Just mark the attribute cache as timed out
 916  917                           * and set RWRITEATTR to indicate that the file
 917  918                           * was modified with a WRITE operation.
 918  919                           */
 919  920                          if (!error) {
 920  921                                  count -= tsize;
 921  922                                  base += tsize;
 922  923                                  offset += tsize;
 923  924                                  if (mi->mi_io_kstats) {
 924  925                                          mutex_enter(&mi->mi_lock);
 925  926                                          KSTAT_IO_PTR(mi->mi_io_kstats)->
 926  927                                              writes++;
 927  928                                          KSTAT_IO_PTR(mi->mi_io_kstats)->
 928  929                                              nwritten += tsize;
 929  930                                          mutex_exit(&mi->mi_lock);
 930  931                                  }
 931  932                                  lwp_stat_update(LWP_STAT_OUBLK, 1);
 932  933                                  mutex_enter(&rp->r_statelock);
 933  934                                  PURGE_ATTRCACHE_LOCKED(rp);
 934  935                                  rp->r_flags |= RWRITEATTR;
 935  936                                  mutex_exit(&rp->r_statelock);
 936  937                          }
 937  938                  }
 938  939          } while (!error && count);
 939  940  
 940  941          return (error);
 941  942  }
 942  943  
 943  944  /*
 944  945   * Read from a file.  Reads data in largest chunks our interface can handle.
 945  946   */
 946  947  static int
 947  948  nfsread(vnode_t *vp, caddr_t base, uint_t offset,
 948  949      int count, size_t *residp, cred_t *cr)
 949  950  {
 950  951          mntinfo_t *mi;
 951  952          struct nfsreadargs ra;
 952  953          struct nfsrdresult rr;
 953  954          int tsize;
 954  955          int error;
 955  956          int douprintf;
 956  957          failinfo_t fi;
 957  958          rnode_t *rp;
 958  959          struct vattr va;
 959  960          hrtime_t t;
 960  961  
 961  962          rp = VTOR(vp);
 962  963          mi = VTOMI(vp);
 963  964  
 964  965          ASSERT(nfs_zone() == mi->mi_zone);
 965  966  
 966  967          douprintf = 1;
 967  968  
 968  969          ra.ra_fhandle = *VTOFH(vp);
 969  970  
 970  971          fi.vp = vp;
 971  972          fi.fhp = (caddr_t)&ra.ra_fhandle;
 972  973          fi.copyproc = nfscopyfh;
 973  974          fi.lookupproc = nfslookup;
 974  975          fi.xattrdirproc = acl_getxattrdir2;
 975  976  
 976  977          do {
 977  978                  if (mi->mi_io_kstats) {
 978  979                          mutex_enter(&mi->mi_lock);
 979  980                          kstat_runq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
 980  981                          mutex_exit(&mi->mi_lock);
 981  982                  }
 982  983  
 983  984                  do {
 984  985                          tsize = MIN(mi->mi_curread, count);
 985  986                          rr.rr_data = base;
 986  987                          ra.ra_offset = offset;
 987  988                          ra.ra_totcount = tsize;
 988  989                          ra.ra_count = tsize;
 989  990                          ra.ra_data = base;
 990  991                          t = gethrtime();
 991  992                          error = rfs2call(mi, RFS_READ,
 992  993                              xdr_readargs, (caddr_t)&ra,
 993  994                              xdr_rdresult, (caddr_t)&rr, cr,
 994  995                              &douprintf, &rr.rr_status, 0, &fi);
 995  996                  } while (error == ENFS_TRYAGAIN);
 996  997  
 997  998                  if (mi->mi_io_kstats) {
 998  999                          mutex_enter(&mi->mi_lock);
 999 1000                          kstat_runq_exit(KSTAT_IO_PTR(mi->mi_io_kstats));
1000 1001                          mutex_exit(&mi->mi_lock);
1001 1002                  }
1002 1003  
1003 1004                  if (!error) {
1004 1005                          error = geterrno(rr.rr_status);
1005 1006                          if (!error) {
1006 1007                                  count -= rr.rr_count;
1007 1008                                  base += rr.rr_count;
1008 1009                                  offset += rr.rr_count;
1009 1010                                  if (mi->mi_io_kstats) {
1010 1011                                          mutex_enter(&mi->mi_lock);
1011 1012                                          KSTAT_IO_PTR(mi->mi_io_kstats)->reads++;
1012 1013                                          KSTAT_IO_PTR(mi->mi_io_kstats)->nread +=
1013 1014                                              rr.rr_count;
1014 1015                                          mutex_exit(&mi->mi_lock);
1015 1016                                  }
1016 1017                                  lwp_stat_update(LWP_STAT_INBLK, 1);
1017 1018                          }
1018 1019                  }
1019 1020          } while (!error && count && rr.rr_count == tsize);
1020 1021  
1021 1022          *residp = count;
1022 1023  
1023 1024          if (!error) {
1024 1025                  /*
1025 1026                   * Since no error occurred, we have the current
1026 1027                   * attributes and we need to do a cache check and then
1027 1028                   * potentially update the cached attributes.  We can't
1028 1029                   * use the normal attribute check and cache mechanisms
1029 1030                   * because they might cause a cache flush which would
1030 1031                   * deadlock.  Instead, we just check the cache to see
1031 1032                   * if the attributes have changed.  If it is, then we
1032 1033                   * just mark the attributes as out of date.  The next
1033 1034                   * time that the attributes are checked, they will be
1034 1035                   * out of date, new attributes will be fetched, and
1035 1036                   * the page cache will be flushed.  If the attributes
1036 1037                   * weren't changed, then we just update the cached
1037 1038                   * attributes with these attributes.
1038 1039                   */
1039 1040                  /*
1040 1041                   * If NFS_ACL is supported on the server, then the
1041 1042                   * attributes returned by server may have minimal
1042 1043                   * permissions sometimes denying access to users having
1043 1044                   * proper access.  To get the proper attributes, mark
1044 1045                   * the attributes as expired so that they will be
1045 1046                   * regotten via the NFS_ACL GETATTR2 procedure.
1046 1047                   */
1047 1048                  error = nattr_to_vattr(vp, &rr.rr_attr, &va);
1048 1049                  mutex_enter(&rp->r_statelock);
1049 1050                  if (error || !CACHE_VALID(rp, va.va_mtime, va.va_size) ||
1050 1051                      (mi->mi_flags & MI_ACL)) {
1051 1052                          mutex_exit(&rp->r_statelock);
1052 1053                          PURGE_ATTRCACHE(vp);
1053 1054                  } else {
1054 1055                          if (rp->r_mtime <= t) {
1055 1056                                  nfs_attrcache_va(vp, &va);
1056 1057                          }
1057 1058                          mutex_exit(&rp->r_statelock);
1058 1059                  }
1059 1060          }
1060 1061  
1061 1062          return (error);
1062 1063  }
1063 1064  
1064 1065  /* ARGSUSED */
1065 1066  static int
1066 1067  nfs_ioctl(vnode_t *vp, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp,
1067 1068          caller_context_t *ct)
1068 1069  {
1069 1070  
1070 1071          if (nfs_zone() != VTOMI(vp)->mi_zone)
1071 1072                  return (EIO);
1072 1073          switch (cmd) {
1073 1074                  case _FIODIRECTIO:
1074 1075                          return (nfs_directio(vp, (int)arg, cr));
1075 1076                  default:
1076 1077                          return (ENOTTY);
1077 1078          }
1078 1079  }
1079 1080  
1080 1081  /* ARGSUSED */
1081 1082  static int
1082 1083  nfs_getattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr,
1083 1084          caller_context_t *ct)
1084 1085  {
1085 1086          int error;
1086 1087          rnode_t *rp;
1087 1088  
1088 1089          if (nfs_zone() != VTOMI(vp)->mi_zone)
1089 1090                  return (EIO);
1090 1091          /*
1091 1092           * If it has been specified that the return value will
1092 1093           * just be used as a hint, and we are only being asked
1093 1094           * for size, fsid or rdevid, then return the client's
1094 1095           * notion of these values without checking to make sure
1095 1096           * that the attribute cache is up to date.
1096 1097           * The whole point is to avoid an over the wire GETATTR
1097 1098           * call.
1098 1099           */
1099 1100          rp = VTOR(vp);
1100 1101          if (flags & ATTR_HINT) {
1101 1102                  if (vap->va_mask ==
1102 1103                      (vap->va_mask & (AT_SIZE | AT_FSID | AT_RDEV))) {
1103 1104                          mutex_enter(&rp->r_statelock);
1104 1105                          if (vap->va_mask | AT_SIZE)
1105 1106                                  vap->va_size = rp->r_size;
1106 1107                          if (vap->va_mask | AT_FSID)
1107 1108                                  vap->va_fsid = rp->r_attr.va_fsid;
1108 1109                          if (vap->va_mask | AT_RDEV)
1109 1110                                  vap->va_rdev = rp->r_attr.va_rdev;
1110 1111                          mutex_exit(&rp->r_statelock);
1111 1112                          return (0);
1112 1113                  }
1113 1114          }
1114 1115  
1115 1116          /*
1116 1117           * Only need to flush pages if asking for the mtime
1117 1118           * and if there any dirty pages or any outstanding
1118 1119           * asynchronous (write) requests for this file.
1119 1120           */
1120 1121          if (vap->va_mask & AT_MTIME) {
1121 1122                  if (vn_has_cached_data(vp) &&
1122 1123                      ((rp->r_flags & RDIRTY) || rp->r_awcount > 0)) {
1123 1124                          mutex_enter(&rp->r_statelock);
1124 1125                          rp->r_gcount++;
1125 1126                          mutex_exit(&rp->r_statelock);
1126 1127                          error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
1127 1128                          mutex_enter(&rp->r_statelock);
1128 1129                          if (error && (error == ENOSPC || error == EDQUOT)) {
1129 1130                                  if (!rp->r_error)
1130 1131                                          rp->r_error = error;
1131 1132                          }
1132 1133                          if (--rp->r_gcount == 0)
1133 1134                                  cv_broadcast(&rp->r_cv);
1134 1135                          mutex_exit(&rp->r_statelock);
1135 1136                  }
1136 1137          }
1137 1138  
1138 1139          return (nfsgetattr(vp, vap, cr));
1139 1140  }
1140 1141  
1141 1142  /*ARGSUSED4*/
1142 1143  static int
1143 1144  nfs_setattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr,
1144 1145                  caller_context_t *ct)
1145 1146  {
1146 1147          int error;
1147 1148          uint_t mask;
1148 1149          struct vattr va;
1149 1150  
1150 1151          mask = vap->va_mask;
1151 1152  
1152 1153          if (mask & AT_NOSET)
1153 1154                  return (EINVAL);
1154 1155  
1155 1156          if ((mask & AT_SIZE) &&
1156 1157              vap->va_type == VREG &&
1157 1158              vap->va_size > MAXOFF32_T)
1158 1159                  return (EFBIG);
1159 1160  
1160 1161          if (nfs_zone() != VTOMI(vp)->mi_zone)
1161 1162                  return (EIO);
1162 1163  
1163 1164          va.va_mask = AT_UID | AT_MODE;
1164 1165  
1165 1166          error = nfsgetattr(vp, &va, cr);
1166 1167          if (error)
1167 1168                  return (error);
1168 1169  
1169 1170          error = secpolicy_vnode_setattr(cr, vp, vap, &va, flags, nfs_accessx,
1170 1171              vp);
1171 1172  
1172 1173          if (error)
1173 1174                  return (error);
1174 1175  
1175 1176          error = nfssetattr(vp, vap, flags, cr);
1176 1177  
1177 1178          if (error == 0 && (mask & AT_SIZE) && vap->va_size == 0)
1178 1179                  vnevent_truncate(vp, ct);
1179 1180  
1180 1181          return (error);
1181 1182  }
1182 1183  
1183 1184  static int
1184 1185  nfssetattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr)
1185 1186  {
1186 1187          int error;
1187 1188          uint_t mask;
1188 1189          struct nfssaargs args;
1189 1190          struct nfsattrstat ns;
1190 1191          int douprintf;
1191 1192          rnode_t *rp;
1192 1193          struct vattr va;
1193 1194          mode_t omode;
1194 1195          mntinfo_t *mi;
1195 1196          vsecattr_t *vsp;
1196 1197          hrtime_t t;
1197 1198  
1198 1199          mask = vap->va_mask;
1199 1200  
1200 1201          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
1201 1202  
1202 1203          rp = VTOR(vp);
1203 1204  
1204 1205          /*
1205 1206           * Only need to flush pages if there are any pages and
1206 1207           * if the file is marked as dirty in some fashion.  The
1207 1208           * file must be flushed so that we can accurately
1208 1209           * determine the size of the file and the cached data
1209 1210           * after the SETATTR returns.  A file is considered to
1210 1211           * be dirty if it is either marked with RDIRTY, has
1211 1212           * outstanding i/o's active, or is mmap'd.  In this
1212 1213           * last case, we can't tell whether there are dirty
1213 1214           * pages, so we flush just to be sure.
1214 1215           */
1215 1216          if (vn_has_cached_data(vp) &&
1216 1217              ((rp->r_flags & RDIRTY) ||
1217 1218              rp->r_count > 0 ||
1218 1219              rp->r_mapcnt > 0)) {
1219 1220                  ASSERT(vp->v_type != VCHR);
1220 1221                  error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, NULL);
1221 1222                  if (error && (error == ENOSPC || error == EDQUOT)) {
1222 1223                          mutex_enter(&rp->r_statelock);
1223 1224                          if (!rp->r_error)
1224 1225                                  rp->r_error = error;
1225 1226                          mutex_exit(&rp->r_statelock);
1226 1227                  }
1227 1228          }
1228 1229  
1229 1230          /*
1230 1231           * If the system call was utime(2) or utimes(2) and the
1231 1232           * application did not specify the times, then set the
1232 1233           * mtime nanosecond field to 1 billion.  This will get
1233 1234           * translated from 1 billion nanoseconds to 1 million
1234 1235           * microseconds in the over the wire request.  The
1235 1236           * server will use 1 million in the microsecond field
1236 1237           * to tell whether both the mtime and atime should be
1237 1238           * set to the server's current time.
1238 1239           *
1239 1240           * This is an overload of the protocol and should be
1240 1241           * documented in the NFS Version 2 protocol specification.
1241 1242           */
1242 1243          if ((mask & AT_MTIME) && !(flags & ATTR_UTIME)) {
1243 1244                  vap->va_mtime.tv_nsec = 1000000000;
1244 1245                  if (NFS_TIME_T_OK(vap->va_mtime.tv_sec) &&
1245 1246                      NFS_TIME_T_OK(vap->va_atime.tv_sec)) {
1246 1247                          error = vattr_to_sattr(vap, &args.saa_sa);
1247 1248                  } else {
1248 1249                          /*
1249 1250                           * Use server times. vap time values will not be used.
1250 1251                           * To ensure no time overflow, make sure vap has
1251 1252                           * valid values, but retain the original values.
1252 1253                           */
1253 1254                          timestruc_t     mtime = vap->va_mtime;
1254 1255                          timestruc_t     atime = vap->va_atime;
1255 1256                          time_t          now;
1256 1257  
1257 1258                          now = gethrestime_sec();
1258 1259                          if (NFS_TIME_T_OK(now)) {
1259 1260                                  /* Just in case server does not know of this */
1260 1261                                  vap->va_mtime.tv_sec = now;
1261 1262                                  vap->va_atime.tv_sec = now;
1262 1263                          } else {
1263 1264                                  vap->va_mtime.tv_sec = 0;
1264 1265                                  vap->va_atime.tv_sec = 0;
1265 1266                          }
1266 1267                          error = vattr_to_sattr(vap, &args.saa_sa);
1267 1268                          /* set vap times back on */
1268 1269                          vap->va_mtime = mtime;
1269 1270                          vap->va_atime = atime;
1270 1271                  }
1271 1272          } else {
1272 1273                  /* Either do not set times or use the client specified times */
1273 1274                  error = vattr_to_sattr(vap, &args.saa_sa);
1274 1275          }
1275 1276          if (error) {
1276 1277                  /* req time field(s) overflow - return immediately */
1277 1278                  return (error);
1278 1279          }
1279 1280          args.saa_fh = *VTOFH(vp);
1280 1281  
1281 1282          va.va_mask = AT_MODE;
1282 1283          error = nfsgetattr(vp, &va, cr);
1283 1284          if (error)
1284 1285                  return (error);
1285 1286          omode = va.va_mode;
1286 1287  
1287 1288          mi = VTOMI(vp);
1288 1289  
1289 1290          douprintf = 1;
1290 1291  
1291 1292          t = gethrtime();
1292 1293  
1293 1294          error = rfs2call(mi, RFS_SETATTR,
1294 1295              xdr_saargs, (caddr_t)&args,
1295 1296              xdr_attrstat, (caddr_t)&ns, cr,
1296 1297              &douprintf, &ns.ns_status, 0, NULL);
1297 1298  
1298 1299          /*
1299 1300           * Purge the access cache and ACL cache if changing either the
1300 1301           * owner of the file, the group owner, or the mode.  These may
1301 1302           * change the access permissions of the file, so purge old
1302 1303           * information and start over again.
1303 1304           */
1304 1305          if ((mask & (AT_UID | AT_GID | AT_MODE)) && (mi->mi_flags & MI_ACL)) {
1305 1306                  (void) nfs_access_purge_rp(rp);
1306 1307                  if (rp->r_secattr != NULL) {
1307 1308                          mutex_enter(&rp->r_statelock);
1308 1309                          vsp = rp->r_secattr;
1309 1310                          rp->r_secattr = NULL;
1310 1311                          mutex_exit(&rp->r_statelock);
1311 1312                          if (vsp != NULL)
1312 1313                                  nfs_acl_free(vsp);
1313 1314                  }
1314 1315          }
1315 1316  
1316 1317          if (!error) {
1317 1318                  error = geterrno(ns.ns_status);
1318 1319                  if (!error) {
1319 1320                          /*
1320 1321                           * If changing the size of the file, invalidate
1321 1322                           * any local cached data which is no longer part
1322 1323                           * of the file.  We also possibly invalidate the
1323 1324                           * last page in the file.  We could use
1324 1325                           * pvn_vpzero(), but this would mark the page as
1325 1326                           * modified and require it to be written back to
1326 1327                           * the server for no particularly good reason.
1327 1328                           * This way, if we access it, then we bring it
1328 1329                           * back in.  A read should be cheaper than a
1329 1330                           * write.
1330 1331                           */
1331 1332                          if (mask & AT_SIZE) {
1332 1333                                  nfs_invalidate_pages(vp,
1333 1334                                      (vap->va_size & PAGEMASK), cr);
1334 1335                          }
1335 1336                          (void) nfs_cache_fattr(vp, &ns.ns_attr, &va, t, cr);
1336 1337                          /*
1337 1338                           * If NFS_ACL is supported on the server, then the
1338 1339                           * attributes returned by server may have minimal
1339 1340                           * permissions sometimes denying access to users having
1340 1341                           * proper access.  To get the proper attributes, mark
1341 1342                           * the attributes as expired so that they will be
1342 1343                           * regotten via the NFS_ACL GETATTR2 procedure.
1343 1344                           */
1344 1345                          if (mi->mi_flags & MI_ACL) {
1345 1346                                  PURGE_ATTRCACHE(vp);
1346 1347                          }
1347 1348                          /*
1348 1349                           * This next check attempts to deal with NFS
1349 1350                           * servers which can not handle increasing
1350 1351                           * the size of the file via setattr.  Most
1351 1352                           * of these servers do not return an error,
1352 1353                           * but do not change the size of the file.
1353 1354                           * Hence, this check and then attempt to set
1354 1355                           * the file size by writing 1 byte at the
1355 1356                           * offset of the end of the file that we need.
1356 1357                           */
1357 1358                          if ((mask & AT_SIZE) &&
1358 1359                              ns.ns_attr.na_size < (uint32_t)vap->va_size) {
1359 1360                                  char zb = '\0';
1360 1361  
1361 1362                                  error = nfswrite(vp, &zb,
1362 1363                                      vap->va_size - sizeof (zb),
1363 1364                                      sizeof (zb), cr);
1364 1365                          }
1365 1366                          /*
1366 1367                           * Some servers will change the mode to clear the setuid
1367 1368                           * and setgid bits when changing the uid or gid.  The
1368 1369                           * client needs to compensate appropriately.
1369 1370                           */
1370 1371                          if (mask & (AT_UID | AT_GID)) {
1371 1372                                  int terror;
1372 1373  
1373 1374                                  va.va_mask = AT_MODE;
1374 1375                                  terror = nfsgetattr(vp, &va, cr);
1375 1376                                  if (!terror &&
1376 1377                                      (((mask & AT_MODE) &&
1377 1378                                      va.va_mode != vap->va_mode) ||
1378 1379                                      (!(mask & AT_MODE) &&
1379 1380                                      va.va_mode != omode))) {
1380 1381                                          va.va_mask = AT_MODE;
1381 1382                                          if (mask & AT_MODE)
1382 1383                                                  va.va_mode = vap->va_mode;
1383 1384                                          else
1384 1385                                                  va.va_mode = omode;
1385 1386                                          (void) nfssetattr(vp, &va, 0, cr);
1386 1387                                  }
1387 1388                          }
1388 1389                  } else {
1389 1390                          PURGE_ATTRCACHE(vp);
1390 1391                          PURGE_STALE_FH(error, vp, cr);
1391 1392                  }
1392 1393          } else {
1393 1394                  PURGE_ATTRCACHE(vp);
1394 1395          }
1395 1396  
1396 1397          return (error);
1397 1398  }
1398 1399  
1399 1400  static int
1400 1401  nfs_accessx(void *vp, int mode, cred_t *cr)
1401 1402  {
1402 1403          ASSERT(nfs_zone() == VTOMI((vnode_t *)vp)->mi_zone);
1403 1404          return (nfs_access(vp, mode, 0, cr, NULL));
1404 1405  }
1405 1406  
1406 1407  /* ARGSUSED */
1407 1408  static int
1408 1409  nfs_access(vnode_t *vp, int mode, int flags, cred_t *cr, caller_context_t *ct)
1409 1410  {
1410 1411          struct vattr va;
1411 1412          int error;
1412 1413          mntinfo_t *mi;
1413 1414          int shift = 0;
1414 1415  
1415 1416          mi = VTOMI(vp);
1416 1417  
1417 1418          if (nfs_zone() != mi->mi_zone)
1418 1419                  return (EIO);
1419 1420          if (mi->mi_flags & MI_ACL) {
1420 1421                  error = acl_access2(vp, mode, flags, cr);
1421 1422                  if (mi->mi_flags & MI_ACL)
1422 1423                          return (error);
1423 1424          }
1424 1425  
1425 1426          va.va_mask = AT_MODE | AT_UID | AT_GID;
1426 1427          error = nfsgetattr(vp, &va, cr);
1427 1428          if (error)
1428 1429                  return (error);
1429 1430  
1430 1431          /*
1431 1432           * Disallow write attempts on read-only
1432 1433           * file systems, unless the file is a
1433 1434           * device node.
1434 1435           */
1435 1436          if ((mode & VWRITE) && vn_is_readonly(vp) && !IS_DEVVP(vp))
1436 1437                  return (EROFS);
1437 1438  
1438 1439          /*
1439 1440           * Disallow attempts to access mandatory lock files.
1440 1441           */
1441 1442          if ((mode & (VWRITE | VREAD | VEXEC)) &&
1442 1443              MANDLOCK(vp, va.va_mode))
1443 1444                  return (EACCES);
1444 1445  
1445 1446          /*
1446 1447           * Access check is based on only
1447 1448           * one of owner, group, public.
1448 1449           * If not owner, then check group.
1449 1450           * If not a member of the group,
1450 1451           * then check public access.
1451 1452           */
  
    | 
      ↓ open down ↓ | 
    1419 lines elided | 
    
      ↑ open up ↑ | 
  
1452 1453          if (crgetuid(cr) != va.va_uid) {
1453 1454                  shift += 3;
1454 1455                  if (!groupmember(va.va_gid, cr))
1455 1456                          shift += 3;
1456 1457          }
1457 1458  
1458 1459          return (secpolicy_vnode_access2(cr, vp, va.va_uid,
1459 1460              va.va_mode << shift, mode));
1460 1461  }
1461 1462  
1462      -static int nfs_do_symlink_cache = 1;
     1463 +volatile int nfs_do_symlink_cache = 1;
1463 1464  
1464 1465  /* ARGSUSED */
1465 1466  static int
1466 1467  nfs_readlink(vnode_t *vp, struct uio *uiop, cred_t *cr, caller_context_t *ct)
1467 1468  {
1468 1469          int error;
1469 1470          struct nfsrdlnres rl;
1470 1471          rnode_t *rp;
1471 1472          int douprintf;
1472 1473          failinfo_t fi;
1473 1474  
1474 1475          /*
1475 1476           * We want to be consistent with UFS semantics so we will return
1476 1477           * EINVAL instead of ENXIO. This violates the XNFS spec and
1477 1478           * the RFC 1094, which are wrong any way. BUGID 1138002.
1478 1479           */
1479 1480          if (vp->v_type != VLNK)
1480 1481                  return (EINVAL);
1481 1482  
1482 1483          if (nfs_zone() != VTOMI(vp)->mi_zone)
1483 1484                  return (EIO);
1484 1485  
1485 1486          rp = VTOR(vp);
1486 1487          if (nfs_do_symlink_cache && rp->r_symlink.contents != NULL) {
1487 1488                  error = nfs_validate_caches(vp, cr);
1488 1489                  if (error)
1489 1490                          return (error);
1490 1491                  mutex_enter(&rp->r_statelock);
1491 1492                  if (rp->r_symlink.contents != NULL) {
1492 1493                          error = uiomove(rp->r_symlink.contents,
1493 1494                              rp->r_symlink.len, UIO_READ, uiop);
1494 1495                          mutex_exit(&rp->r_statelock);
1495 1496                          return (error);
1496 1497                  }
1497 1498                  mutex_exit(&rp->r_statelock);
1498 1499          }
1499 1500  
1500 1501  
1501 1502          rl.rl_data = kmem_alloc(NFS_MAXPATHLEN, KM_SLEEP);
1502 1503  
1503 1504          fi.vp = vp;
1504 1505          fi.fhp = NULL;          /* no need to update, filehandle not copied */
1505 1506          fi.copyproc = nfscopyfh;
1506 1507          fi.lookupproc = nfslookup;
1507 1508          fi.xattrdirproc = acl_getxattrdir2;
1508 1509  
1509 1510          douprintf = 1;
1510 1511  
1511 1512          error = rfs2call(VTOMI(vp), RFS_READLINK,
1512 1513              xdr_readlink, (caddr_t)VTOFH(vp),
1513 1514              xdr_rdlnres, (caddr_t)&rl, cr,
1514 1515              &douprintf, &rl.rl_status, 0, &fi);
1515 1516  
1516 1517          if (error) {
1517 1518  
1518 1519                  kmem_free((void *)rl.rl_data, NFS_MAXPATHLEN);
1519 1520                  return (error);
1520 1521          }
1521 1522  
1522 1523          error = geterrno(rl.rl_status);
1523 1524          if (!error) {
1524 1525                  error = uiomove(rl.rl_data, (int)rl.rl_count, UIO_READ, uiop);
1525 1526                  if (nfs_do_symlink_cache && rp->r_symlink.contents == NULL) {
1526 1527                          mutex_enter(&rp->r_statelock);
1527 1528                          if (rp->r_symlink.contents == NULL) {
1528 1529                                  rp->r_symlink.contents = rl.rl_data;
1529 1530                                  rp->r_symlink.len = (int)rl.rl_count;
1530 1531                                  rp->r_symlink.size = NFS_MAXPATHLEN;
1531 1532                                  mutex_exit(&rp->r_statelock);
1532 1533                          } else {
1533 1534                                  mutex_exit(&rp->r_statelock);
1534 1535  
1535 1536                                  kmem_free((void *)rl.rl_data,
1536 1537                                      NFS_MAXPATHLEN);
1537 1538                          }
1538 1539                  } else {
1539 1540  
1540 1541                          kmem_free((void *)rl.rl_data, NFS_MAXPATHLEN);
1541 1542                  }
1542 1543          } else {
1543 1544                  PURGE_STALE_FH(error, vp, cr);
1544 1545  
1545 1546                  kmem_free((void *)rl.rl_data, NFS_MAXPATHLEN);
1546 1547          }
1547 1548  
1548 1549          /*
1549 1550           * Conform to UFS semantics (see comment above)
1550 1551           */
1551 1552          return (error == ENXIO ? EINVAL : error);
1552 1553  }
1553 1554  
1554 1555  /*
1555 1556   * Flush local dirty pages to stable storage on the server.
1556 1557   *
1557 1558   * If FNODSYNC is specified, then there is nothing to do because
1558 1559   * metadata changes are not cached on the client before being
1559 1560   * sent to the server.
1560 1561   */
1561 1562  /* ARGSUSED */
1562 1563  static int
1563 1564  nfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
1564 1565  {
1565 1566          int error;
1566 1567  
1567 1568          if ((syncflag & FNODSYNC) || IS_SWAPVP(vp))
1568 1569                  return (0);
1569 1570  
1570 1571          if (nfs_zone() != VTOMI(vp)->mi_zone)
1571 1572                  return (EIO);
1572 1573  
1573 1574          error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
1574 1575          if (!error)
1575 1576                  error = VTOR(vp)->r_error;
1576 1577          return (error);
1577 1578  }
1578 1579  
1579 1580  
1580 1581  /*
1581 1582   * Weirdness: if the file was removed or the target of a rename
1582 1583   * operation while it was open, it got renamed instead.  Here we
1583 1584   * remove the renamed file.
1584 1585   */
1585 1586  /* ARGSUSED */
1586 1587  static void
1587 1588  nfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
1588 1589  {
1589 1590          rnode_t *rp;
1590 1591  
1591 1592          ASSERT(vp != DNLC_NO_VNODE);
1592 1593  
1593 1594          /*
1594 1595           * If this is coming from the wrong zone, we let someone in the right
1595 1596           * zone take care of it asynchronously.  We can get here due to
1596 1597           * VN_RELE() being called from pageout() or fsflush().  This call may
1597 1598           * potentially turn into an expensive no-op if, for instance, v_count
1598 1599           * gets incremented in the meantime, but it's still correct.
1599 1600           */
1600 1601          if (nfs_zone() != VTOMI(vp)->mi_zone) {
1601 1602                  nfs_async_inactive(vp, cr, nfs_inactive);
1602 1603                  return;
1603 1604          }
1604 1605  
1605 1606          rp = VTOR(vp);
1606 1607  redo:
1607 1608          if (rp->r_unldvp != NULL) {
1608 1609                  /*
1609 1610                   * Save the vnode pointer for the directory where the
1610 1611                   * unlinked-open file got renamed, then set it to NULL
1611 1612                   * to prevent another thread from getting here before
1612 1613                   * we're done with the remove.  While we have the
1613 1614                   * statelock, make local copies of the pertinent rnode
1614 1615                   * fields.  If we weren't to do this in an atomic way, the
1615 1616                   * the unl* fields could become inconsistent with respect
1616 1617                   * to each other due to a race condition between this
1617 1618                   * code and nfs_remove().  See bug report 1034328.
1618 1619                   */
1619 1620                  mutex_enter(&rp->r_statelock);
1620 1621                  if (rp->r_unldvp != NULL) {
1621 1622                          vnode_t *unldvp;
1622 1623                          char *unlname;
1623 1624                          cred_t *unlcred;
1624 1625                          struct nfsdiropargs da;
1625 1626                          enum nfsstat status;
1626 1627                          int douprintf;
1627 1628                          int error;
1628 1629  
1629 1630                          unldvp = rp->r_unldvp;
1630 1631                          rp->r_unldvp = NULL;
1631 1632                          unlname = rp->r_unlname;
1632 1633                          rp->r_unlname = NULL;
1633 1634                          unlcred = rp->r_unlcred;
1634 1635                          rp->r_unlcred = NULL;
1635 1636                          mutex_exit(&rp->r_statelock);
1636 1637  
1637 1638                          /*
1638 1639                           * If there are any dirty pages left, then flush
1639 1640                           * them.  This is unfortunate because they just
1640 1641                           * may get thrown away during the remove operation,
1641 1642                           * but we have to do this for correctness.
1642 1643                           */
1643 1644                          if (vn_has_cached_data(vp) &&
1644 1645                              ((rp->r_flags & RDIRTY) || rp->r_count > 0)) {
1645 1646                                  ASSERT(vp->v_type != VCHR);
1646 1647                                  error = nfs_putpage(vp, (offset_t)0, 0, 0,
1647 1648                                      cr, ct);
1648 1649                                  if (error) {
1649 1650                                          mutex_enter(&rp->r_statelock);
1650 1651                                          if (!rp->r_error)
1651 1652                                                  rp->r_error = error;
1652 1653                                          mutex_exit(&rp->r_statelock);
1653 1654                                  }
1654 1655                          }
1655 1656  
1656 1657                          /*
1657 1658                           * Do the remove operation on the renamed file
1658 1659                           */
1659 1660                          setdiropargs(&da, unlname, unldvp);
1660 1661  
1661 1662                          douprintf = 1;
1662 1663  
1663 1664                          (void) rfs2call(VTOMI(unldvp), RFS_REMOVE,
1664 1665                              xdr_diropargs, (caddr_t)&da,
1665 1666                              xdr_enum, (caddr_t)&status, unlcred,
1666 1667                              &douprintf, &status, 0, NULL);
1667 1668  
1668 1669                          if (HAVE_RDDIR_CACHE(VTOR(unldvp)))
1669 1670                                  nfs_purge_rddir_cache(unldvp);
1670 1671                          PURGE_ATTRCACHE(unldvp);
1671 1672  
1672 1673                          /*
1673 1674                           * Release stuff held for the remove
1674 1675                           */
1675 1676                          VN_RELE(unldvp);
1676 1677                          kmem_free(unlname, MAXNAMELEN);
1677 1678                          crfree(unlcred);
1678 1679                          goto redo;
1679 1680                  }
1680 1681                  mutex_exit(&rp->r_statelock);
1681 1682          }
1682 1683  
1683 1684          rp_addfree(rp, cr);
1684 1685  }
1685 1686  
1686 1687  /*
1687 1688   * Remote file system operations having to do with directory manipulation.
1688 1689   */
1689 1690  
1690 1691  /* ARGSUSED */
1691 1692  static int
1692 1693  nfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
1693 1694          int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
1694 1695          int *direntflags, pathname_t *realpnp)
1695 1696  {
1696 1697          int error;
1697 1698          vnode_t *vp;
1698 1699          vnode_t *avp = NULL;
1699 1700          rnode_t *drp;
1700 1701  
1701 1702          if (nfs_zone() != VTOMI(dvp)->mi_zone)
1702 1703                  return (EPERM);
1703 1704  
1704 1705          drp = VTOR(dvp);
1705 1706  
1706 1707          /*
1707 1708           * Are we looking up extended attributes?  If so, "dvp" is
1708 1709           * the file or directory for which we want attributes, and
1709 1710           * we need a lookup of the hidden attribute directory
1710 1711           * before we lookup the rest of the path.
1711 1712           */
1712 1713          if (flags & LOOKUP_XATTR) {
1713 1714                  bool_t cflag = ((flags & CREATE_XATTR_DIR) != 0);
1714 1715                  mntinfo_t *mi;
1715 1716  
1716 1717                  mi = VTOMI(dvp);
1717 1718                  if (!(mi->mi_flags & MI_EXTATTR))
1718 1719                          return (EINVAL);
1719 1720  
1720 1721                  if (nfs_rw_enter_sig(&drp->r_rwlock, RW_READER, INTR(dvp)))
1721 1722                          return (EINTR);
1722 1723  
1723 1724                  (void) nfslookup_dnlc(dvp, XATTR_DIR_NAME, &avp, cr);
1724 1725                  if (avp == NULL)
1725 1726                          error = acl_getxattrdir2(dvp, &avp, cflag, cr, 0);
1726 1727                  else
1727 1728                          error = 0;
1728 1729  
1729 1730                  nfs_rw_exit(&drp->r_rwlock);
1730 1731  
1731 1732                  if (error) {
1732 1733                          if (mi->mi_flags & MI_EXTATTR)
1733 1734                                  return (error);
1734 1735                          return (EINVAL);
1735 1736                  }
1736 1737                  dvp = avp;
1737 1738                  drp = VTOR(dvp);
1738 1739          }
1739 1740  
1740 1741          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_READER, INTR(dvp))) {
1741 1742                  error = EINTR;
1742 1743                  goto out;
1743 1744          }
1744 1745  
1745 1746          error = nfslookup(dvp, nm, vpp, pnp, flags, rdir, cr, 0);
1746 1747  
1747 1748          nfs_rw_exit(&drp->r_rwlock);
1748 1749  
1749 1750          /*
1750 1751           * If vnode is a device, create special vnode.
1751 1752           */
1752 1753          if (!error && IS_DEVVP(*vpp)) {
1753 1754                  vp = *vpp;
1754 1755                  *vpp = specvp(vp, vp->v_rdev, vp->v_type, cr);
  
    | 
      ↓ open down ↓ | 
    282 lines elided | 
    
      ↑ open up ↑ | 
  
1755 1756                  VN_RELE(vp);
1756 1757          }
1757 1758  
1758 1759  out:
1759 1760          if (avp != NULL)
1760 1761                  VN_RELE(avp);
1761 1762  
1762 1763          return (error);
1763 1764  }
1764 1765  
1765      -static int nfs_lookup_neg_cache = 1;
     1766 +volatile int nfs_lookup_neg_cache = 1;
1766 1767  
1767 1768  #ifdef DEBUG
1768 1769  static int nfs_lookup_dnlc_hits = 0;
1769 1770  static int nfs_lookup_dnlc_misses = 0;
1770 1771  static int nfs_lookup_dnlc_neg_hits = 0;
1771 1772  static int nfs_lookup_dnlc_disappears = 0;
1772 1773  static int nfs_lookup_dnlc_lookups = 0;
1773 1774  #endif
1774 1775  
1775 1776  /* ARGSUSED */
1776 1777  int
1777 1778  nfslookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
1778 1779          int flags, vnode_t *rdir, cred_t *cr, int rfscall_flags)
1779 1780  {
1780 1781          int error;
1781 1782  
1782 1783          ASSERT(nfs_zone() == VTOMI(dvp)->mi_zone);
1783 1784  
1784 1785          /*
1785 1786           * If lookup is for "", just return dvp.  Don't need
1786 1787           * to send it over the wire, look it up in the dnlc,
1787 1788           * or perform any access checks.
1788 1789           */
1789 1790          if (*nm == '\0') {
1790 1791                  VN_HOLD(dvp);
1791 1792                  *vpp = dvp;
1792 1793                  return (0);
1793 1794          }
1794 1795  
1795 1796          /*
1796 1797           * Can't do lookups in non-directories.
1797 1798           */
1798 1799          if (dvp->v_type != VDIR)
1799 1800                  return (ENOTDIR);
1800 1801  
1801 1802          /*
1802 1803           * If we're called with RFSCALL_SOFT, it's important that
1803 1804           * the only rfscall is one we make directly; if we permit
1804 1805           * an access call because we're looking up "." or validating
1805 1806           * a dnlc hit, we'll deadlock because that rfscall will not
1806 1807           * have the RFSCALL_SOFT set.
1807 1808           */
1808 1809          if (rfscall_flags & RFSCALL_SOFT)
1809 1810                  goto callit;
1810 1811  
1811 1812          /*
1812 1813           * If lookup is for ".", just return dvp.  Don't need
1813 1814           * to send it over the wire or look it up in the dnlc,
1814 1815           * just need to check access.
1815 1816           */
1816 1817          if (strcmp(nm, ".") == 0) {
1817 1818                  error = nfs_access(dvp, VEXEC, 0, cr, NULL);
1818 1819                  if (error)
1819 1820                          return (error);
1820 1821                  VN_HOLD(dvp);
1821 1822                  *vpp = dvp;
1822 1823                  return (0);
1823 1824          }
1824 1825  
1825 1826          /*
1826 1827           * Lookup this name in the DNLC.  If there was a valid entry,
1827 1828           * then return the results of the lookup.
1828 1829           */
1829 1830          error = nfslookup_dnlc(dvp, nm, vpp, cr);
1830 1831          if (error || *vpp != NULL)
1831 1832                  return (error);
1832 1833  
1833 1834  callit:
1834 1835          error = nfslookup_otw(dvp, nm, vpp, cr, rfscall_flags);
1835 1836  
1836 1837          return (error);
1837 1838  }
1838 1839  
1839 1840  static int
1840 1841  nfslookup_dnlc(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr)
1841 1842  {
1842 1843          int error;
1843 1844          vnode_t *vp;
1844 1845  
1845 1846          ASSERT(*nm != '\0');
1846 1847          ASSERT(nfs_zone() == VTOMI(dvp)->mi_zone);
1847 1848  
1848 1849          /*
1849 1850           * Lookup this name in the DNLC.  If successful, then validate
1850 1851           * the caches and then recheck the DNLC.  The DNLC is rechecked
1851 1852           * just in case this entry got invalidated during the call
1852 1853           * to nfs_validate_caches.
1853 1854           *
1854 1855           * An assumption is being made that it is safe to say that a
1855 1856           * file exists which may not on the server.  Any operations to
1856 1857           * the server will fail with ESTALE.
1857 1858           */
1858 1859  #ifdef DEBUG
1859 1860          nfs_lookup_dnlc_lookups++;
1860 1861  #endif
1861 1862          vp = dnlc_lookup(dvp, nm);
1862 1863          if (vp != NULL) {
1863 1864                  VN_RELE(vp);
1864 1865                  if (vp == DNLC_NO_VNODE && !vn_is_readonly(dvp)) {
1865 1866                          PURGE_ATTRCACHE(dvp);
1866 1867                  }
1867 1868                  error = nfs_validate_caches(dvp, cr);
1868 1869                  if (error)
1869 1870                          return (error);
1870 1871                  vp = dnlc_lookup(dvp, nm);
1871 1872                  if (vp != NULL) {
1872 1873                          error = nfs_access(dvp, VEXEC, 0, cr, NULL);
1873 1874                          if (error) {
1874 1875                                  VN_RELE(vp);
1875 1876                                  return (error);
1876 1877                          }
1877 1878                          if (vp == DNLC_NO_VNODE) {
1878 1879                                  VN_RELE(vp);
1879 1880  #ifdef DEBUG
1880 1881                                  nfs_lookup_dnlc_neg_hits++;
1881 1882  #endif
1882 1883                                  return (ENOENT);
1883 1884                          }
1884 1885                          *vpp = vp;
1885 1886  #ifdef DEBUG
1886 1887                          nfs_lookup_dnlc_hits++;
1887 1888  #endif
1888 1889                          return (0);
1889 1890                  }
1890 1891  #ifdef DEBUG
1891 1892                  nfs_lookup_dnlc_disappears++;
1892 1893  #endif
1893 1894          }
1894 1895  #ifdef DEBUG
1895 1896          else
1896 1897                  nfs_lookup_dnlc_misses++;
1897 1898  #endif
1898 1899  
1899 1900          *vpp = NULL;
1900 1901  
1901 1902          return (0);
1902 1903  }
1903 1904  
1904 1905  static int
1905 1906  nfslookup_otw(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr,
1906 1907          int rfscall_flags)
1907 1908  {
1908 1909          int error;
1909 1910          struct nfsdiropargs da;
1910 1911          struct nfsdiropres dr;
1911 1912          int douprintf;
1912 1913          failinfo_t fi;
1913 1914          hrtime_t t;
1914 1915  
1915 1916          ASSERT(*nm != '\0');
1916 1917          ASSERT(dvp->v_type == VDIR);
1917 1918          ASSERT(nfs_zone() == VTOMI(dvp)->mi_zone);
1918 1919  
1919 1920          setdiropargs(&da, nm, dvp);
1920 1921  
1921 1922          fi.vp = dvp;
1922 1923          fi.fhp = NULL;          /* no need to update, filehandle not copied */
1923 1924          fi.copyproc = nfscopyfh;
1924 1925          fi.lookupproc = nfslookup;
1925 1926          fi.xattrdirproc = acl_getxattrdir2;
1926 1927  
1927 1928          douprintf = 1;
1928 1929  
1929 1930          t = gethrtime();
1930 1931  
1931 1932          error = rfs2call(VTOMI(dvp), RFS_LOOKUP,
1932 1933              xdr_diropargs, (caddr_t)&da,
1933 1934              xdr_diropres, (caddr_t)&dr, cr,
1934 1935              &douprintf, &dr.dr_status, rfscall_flags, &fi);
1935 1936  
1936 1937          if (!error) {
1937 1938                  error = geterrno(dr.dr_status);
1938 1939                  if (!error) {
1939 1940                          *vpp = makenfsnode(&dr.dr_fhandle, &dr.dr_attr,
1940 1941                              dvp->v_vfsp, t, cr, VTOR(dvp)->r_path, nm);
1941 1942                          /*
1942 1943                           * If NFS_ACL is supported on the server, then the
1943 1944                           * attributes returned by server may have minimal
1944 1945                           * permissions sometimes denying access to users having
1945 1946                           * proper access.  To get the proper attributes, mark
1946 1947                           * the attributes as expired so that they will be
1947 1948                           * regotten via the NFS_ACL GETATTR2 procedure.
1948 1949                           */
1949 1950                          if (VTOMI(*vpp)->mi_flags & MI_ACL) {
1950 1951                                  PURGE_ATTRCACHE(*vpp);
1951 1952                          }
1952 1953                          if (!(rfscall_flags & RFSCALL_SOFT))
1953 1954                                  dnlc_update(dvp, nm, *vpp);
1954 1955                  } else {
1955 1956                          PURGE_STALE_FH(error, dvp, cr);
1956 1957                          if (error == ENOENT && nfs_lookup_neg_cache)
1957 1958                                  dnlc_enter(dvp, nm, DNLC_NO_VNODE);
1958 1959                  }
1959 1960          }
1960 1961  
1961 1962          return (error);
1962 1963  }
1963 1964  
1964 1965  /* ARGSUSED */
1965 1966  static int
1966 1967  nfs_create(vnode_t *dvp, char *nm, struct vattr *va, enum vcexcl exclusive,
1967 1968          int mode, vnode_t **vpp, cred_t *cr, int lfaware, caller_context_t *ct,
1968 1969          vsecattr_t *vsecp)
1969 1970  {
1970 1971          int error;
1971 1972          struct nfscreatargs args;
1972 1973          struct nfsdiropres dr;
1973 1974          int douprintf;
1974 1975          vnode_t *vp;
1975 1976          rnode_t *rp;
1976 1977          struct vattr vattr;
1977 1978          rnode_t *drp;
1978 1979          vnode_t *tempvp;
1979 1980          hrtime_t t;
1980 1981  
1981 1982          drp = VTOR(dvp);
1982 1983  
1983 1984          if (nfs_zone() != VTOMI(dvp)->mi_zone)
1984 1985                  return (EPERM);
1985 1986          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
1986 1987                  return (EINTR);
1987 1988  
1988 1989          /*
1989 1990           * We make a copy of the attributes because the caller does not
1990 1991           * expect us to change what va points to.
1991 1992           */
1992 1993          vattr = *va;
1993 1994  
1994 1995          /*
1995 1996           * If the pathname is "", just use dvp.  Don't need
1996 1997           * to send it over the wire, look it up in the dnlc,
1997 1998           * or perform any access checks.
1998 1999           */
1999 2000          if (*nm == '\0') {
2000 2001                  error = 0;
2001 2002                  VN_HOLD(dvp);
2002 2003                  vp = dvp;
2003 2004          /*
2004 2005           * If the pathname is ".", just use dvp.  Don't need
2005 2006           * to send it over the wire or look it up in the dnlc,
2006 2007           * just need to check access.
2007 2008           */
2008 2009          } else if (strcmp(nm, ".") == 0) {
2009 2010                  error = nfs_access(dvp, VEXEC, 0, cr, ct);
2010 2011                  if (error) {
2011 2012                          nfs_rw_exit(&drp->r_rwlock);
2012 2013                          return (error);
2013 2014                  }
2014 2015                  VN_HOLD(dvp);
2015 2016                  vp = dvp;
2016 2017          /*
2017 2018           * We need to go over the wire, just to be sure whether the
2018 2019           * file exists or not.  Using the DNLC can be dangerous in
2019 2020           * this case when making a decision regarding existence.
2020 2021           */
2021 2022          } else {
2022 2023                  error = nfslookup_otw(dvp, nm, &vp, cr, 0);
2023 2024          }
2024 2025          if (!error) {
2025 2026                  if (exclusive == EXCL)
2026 2027                          error = EEXIST;
2027 2028                  else if (vp->v_type == VDIR && (mode & VWRITE))
2028 2029                          error = EISDIR;
2029 2030                  else {
2030 2031                          /*
2031 2032                           * If vnode is a device, create special vnode.
2032 2033                           */
2033 2034                          if (IS_DEVVP(vp)) {
2034 2035                                  tempvp = vp;
2035 2036                                  vp = specvp(vp, vp->v_rdev, vp->v_type, cr);
2036 2037                                  VN_RELE(tempvp);
2037 2038                          }
2038 2039                          if (!(error = VOP_ACCESS(vp, mode, 0, cr, ct))) {
2039 2040                                  if ((vattr.va_mask & AT_SIZE) &&
2040 2041                                      vp->v_type == VREG) {
2041 2042                                          vattr.va_mask = AT_SIZE;
2042 2043                                          error = nfssetattr(vp, &vattr, 0, cr);
2043 2044  
2044 2045                                          if (!error) {
2045 2046                                                  /*
2046 2047                                                   * Existing file was truncated;
2047 2048                                                   * emit a create event.
2048 2049                                                   */
2049 2050                                                  vnevent_create(vp, ct);
2050 2051                                          }
2051 2052                                  }
2052 2053                          }
2053 2054                  }
2054 2055                  nfs_rw_exit(&drp->r_rwlock);
2055 2056                  if (error) {
2056 2057                          VN_RELE(vp);
2057 2058                  } else {
2058 2059                          *vpp = vp;
2059 2060                  }
2060 2061                  return (error);
2061 2062          }
2062 2063  
2063 2064          ASSERT(vattr.va_mask & AT_TYPE);
2064 2065          if (vattr.va_type == VREG) {
2065 2066                  ASSERT(vattr.va_mask & AT_MODE);
2066 2067                  if (MANDMODE(vattr.va_mode)) {
2067 2068                          nfs_rw_exit(&drp->r_rwlock);
2068 2069                          return (EACCES);
2069 2070                  }
2070 2071          }
2071 2072  
2072 2073          dnlc_remove(dvp, nm);
2073 2074  
2074 2075          setdiropargs(&args.ca_da, nm, dvp);
2075 2076  
2076 2077          /*
2077 2078           * Decide what the group-id of the created file should be.
2078 2079           * Set it in attribute list as advisory...then do a setattr
2079 2080           * if the server didn't get it right the first time.
2080 2081           */
2081 2082          error = setdirgid(dvp, &vattr.va_gid, cr);
2082 2083          if (error) {
2083 2084                  nfs_rw_exit(&drp->r_rwlock);
2084 2085                  return (error);
2085 2086          }
2086 2087          vattr.va_mask |= AT_GID;
2087 2088  
2088 2089          /*
2089 2090           * This is a completely gross hack to make mknod
2090 2091           * work over the wire until we can wack the protocol
2091 2092           */
2092 2093  #define IFCHR           0020000         /* character special */
2093 2094  #define IFBLK           0060000         /* block special */
2094 2095  #define IFSOCK          0140000         /* socket */
2095 2096  
2096 2097          /*
2097 2098           * dev_t is uint_t in 5.x and short in 4.x. Both 4.x
2098 2099           * supports 8 bit majors. 5.x supports 14 bit majors. 5.x supports 18
2099 2100           * bits in the minor number where 4.x supports 8 bits.  If the 5.x
2100 2101           * minor/major numbers <= 8 bits long, compress the device
2101 2102           * number before sending it. Otherwise, the 4.x server will not
2102 2103           * create the device with the correct device number and nothing can be
2103 2104           * done about this.
2104 2105           */
2105 2106          if (vattr.va_type == VCHR || vattr.va_type == VBLK) {
2106 2107                  dev_t d = vattr.va_rdev;
2107 2108                  dev32_t dev32;
2108 2109  
2109 2110                  if (vattr.va_type == VCHR)
2110 2111                          vattr.va_mode |= IFCHR;
2111 2112                  else
2112 2113                          vattr.va_mode |= IFBLK;
2113 2114  
2114 2115                  (void) cmpldev(&dev32, d);
2115 2116                  if (dev32 & ~((SO4_MAXMAJ << L_BITSMINOR32) | SO4_MAXMIN))
2116 2117                          vattr.va_size = (u_offset_t)dev32;
2117 2118                  else
2118 2119                          vattr.va_size = (u_offset_t)nfsv2_cmpdev(d);
2119 2120  
2120 2121                  vattr.va_mask |= AT_MODE|AT_SIZE;
2121 2122          } else if (vattr.va_type == VFIFO) {
2122 2123                  vattr.va_mode |= IFCHR;         /* xtra kludge for namedpipe */
2123 2124                  vattr.va_size = (u_offset_t)NFS_FIFO_DEV;       /* blech */
2124 2125                  vattr.va_mask |= AT_MODE|AT_SIZE;
2125 2126          } else if (vattr.va_type == VSOCK) {
2126 2127                  vattr.va_mode |= IFSOCK;
2127 2128                  /*
2128 2129                   * To avoid triggering bugs in the servers set AT_SIZE
2129 2130                   * (all other RFS_CREATE calls set this).
2130 2131                   */
2131 2132                  vattr.va_size = 0;
2132 2133                  vattr.va_mask |= AT_MODE|AT_SIZE;
2133 2134          }
2134 2135  
2135 2136          args.ca_sa = &args.ca_sa_buf;
2136 2137          error = vattr_to_sattr(&vattr, args.ca_sa);
2137 2138          if (error) {
2138 2139                  /* req time field(s) overflow - return immediately */
2139 2140                  nfs_rw_exit(&drp->r_rwlock);
2140 2141                  return (error);
2141 2142          }
2142 2143  
2143 2144          douprintf = 1;
2144 2145  
2145 2146          t = gethrtime();
2146 2147  
2147 2148          error = rfs2call(VTOMI(dvp), RFS_CREATE,
2148 2149              xdr_creatargs, (caddr_t)&args,
2149 2150              xdr_diropres, (caddr_t)&dr, cr,
2150 2151              &douprintf, &dr.dr_status, 0, NULL);
2151 2152  
2152 2153          PURGE_ATTRCACHE(dvp);   /* mod time changed */
2153 2154  
2154 2155          if (!error) {
2155 2156                  error = geterrno(dr.dr_status);
2156 2157                  if (!error) {
2157 2158                          if (HAVE_RDDIR_CACHE(drp))
2158 2159                                  nfs_purge_rddir_cache(dvp);
2159 2160                          vp = makenfsnode(&dr.dr_fhandle, &dr.dr_attr,
2160 2161                              dvp->v_vfsp, t, cr, NULL, NULL);
2161 2162                          /*
2162 2163                           * If NFS_ACL is supported on the server, then the
2163 2164                           * attributes returned by server may have minimal
2164 2165                           * permissions sometimes denying access to users having
2165 2166                           * proper access.  To get the proper attributes, mark
2166 2167                           * the attributes as expired so that they will be
2167 2168                           * regotten via the NFS_ACL GETATTR2 procedure.
2168 2169                           */
2169 2170                          if (VTOMI(vp)->mi_flags & MI_ACL) {
2170 2171                                  PURGE_ATTRCACHE(vp);
2171 2172                          }
2172 2173                          dnlc_update(dvp, nm, vp);
2173 2174                          rp = VTOR(vp);
2174 2175                          if (vattr.va_size == 0) {
2175 2176                                  mutex_enter(&rp->r_statelock);
2176 2177                                  rp->r_size = 0;
2177 2178                                  mutex_exit(&rp->r_statelock);
2178 2179                                  if (vn_has_cached_data(vp)) {
2179 2180                                          ASSERT(vp->v_type != VCHR);
2180 2181                                          nfs_invalidate_pages(vp,
2181 2182                                              (u_offset_t)0, cr);
2182 2183                                  }
2183 2184                          }
2184 2185  
2185 2186                          /*
2186 2187                           * Make sure the gid was set correctly.
2187 2188                           * If not, try to set it (but don't lose
2188 2189                           * any sleep over it).
2189 2190                           */
2190 2191                          if (vattr.va_gid != rp->r_attr.va_gid) {
2191 2192                                  vattr.va_mask = AT_GID;
2192 2193                                  (void) nfssetattr(vp, &vattr, 0, cr);
2193 2194                          }
2194 2195  
2195 2196                          /*
2196 2197                           * If vnode is a device create special vnode
2197 2198                           */
2198 2199                          if (IS_DEVVP(vp)) {
2199 2200                                  *vpp = specvp(vp, vp->v_rdev, vp->v_type, cr);
2200 2201                                  VN_RELE(vp);
2201 2202                          } else
2202 2203                                  *vpp = vp;
2203 2204                  } else {
2204 2205                          PURGE_STALE_FH(error, dvp, cr);
2205 2206                  }
2206 2207          }
2207 2208  
2208 2209          nfs_rw_exit(&drp->r_rwlock);
2209 2210  
2210 2211          return (error);
2211 2212  }
2212 2213  
2213 2214  /*
2214 2215   * Weirdness: if the vnode to be removed is open
2215 2216   * we rename it instead of removing it and nfs_inactive
2216 2217   * will remove the new name.
2217 2218   */
2218 2219  /* ARGSUSED */
2219 2220  static int
2220 2221  nfs_remove(vnode_t *dvp, char *nm, cred_t *cr, caller_context_t *ct, int flags)
2221 2222  {
2222 2223          int error;
2223 2224          struct nfsdiropargs da;
2224 2225          enum nfsstat status;
2225 2226          vnode_t *vp;
2226 2227          char *tmpname;
2227 2228          int douprintf;
2228 2229          rnode_t *rp;
2229 2230          rnode_t *drp;
2230 2231  
2231 2232          if (nfs_zone() != VTOMI(dvp)->mi_zone)
2232 2233                  return (EPERM);
2233 2234          drp = VTOR(dvp);
2234 2235          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
2235 2236                  return (EINTR);
2236 2237  
2237 2238          error = nfslookup(dvp, nm, &vp, NULL, 0, NULL, cr, 0);
2238 2239          if (error) {
2239 2240                  nfs_rw_exit(&drp->r_rwlock);
2240 2241                  return (error);
2241 2242          }
2242 2243  
2243 2244          if (vp->v_type == VDIR && secpolicy_fs_linkdir(cr, dvp->v_vfsp)) {
2244 2245                  VN_RELE(vp);
2245 2246                  nfs_rw_exit(&drp->r_rwlock);
2246 2247                  return (EPERM);
2247 2248          }
2248 2249  
2249 2250          /*
2250 2251           * First just remove the entry from the name cache, as it
2251 2252           * is most likely the only entry for this vp.
2252 2253           */
2253 2254          dnlc_remove(dvp, nm);
2254 2255  
2255 2256          /*
2256 2257           * If the file has a v_count > 1 then there may be more than one
2257 2258           * entry in the name cache due multiple links or an open file,
2258 2259           * but we don't have the real reference count so flush all
2259 2260           * possible entries.
2260 2261           */
2261 2262          if (vp->v_count > 1)
2262 2263                  dnlc_purge_vp(vp);
2263 2264  
2264 2265          /*
2265 2266           * Now we have the real reference count on the vnode
2266 2267           */
2267 2268          rp = VTOR(vp);
2268 2269          mutex_enter(&rp->r_statelock);
2269 2270          if (vp->v_count > 1 &&
2270 2271              (rp->r_unldvp == NULL || strcmp(nm, rp->r_unlname) == 0)) {
2271 2272                  mutex_exit(&rp->r_statelock);
2272 2273                  tmpname = newname();
2273 2274                  error = nfsrename(dvp, nm, dvp, tmpname, cr, ct);
2274 2275                  if (error)
2275 2276                          kmem_free(tmpname, MAXNAMELEN);
2276 2277                  else {
2277 2278                          mutex_enter(&rp->r_statelock);
2278 2279                          if (rp->r_unldvp == NULL) {
2279 2280                                  VN_HOLD(dvp);
2280 2281                                  rp->r_unldvp = dvp;
2281 2282                                  if (rp->r_unlcred != NULL)
2282 2283                                          crfree(rp->r_unlcred);
2283 2284                                  crhold(cr);
2284 2285                                  rp->r_unlcred = cr;
2285 2286                                  rp->r_unlname = tmpname;
2286 2287                          } else {
2287 2288                                  kmem_free(rp->r_unlname, MAXNAMELEN);
2288 2289                                  rp->r_unlname = tmpname;
2289 2290                          }
2290 2291                          mutex_exit(&rp->r_statelock);
2291 2292                  }
2292 2293          } else {
2293 2294                  mutex_exit(&rp->r_statelock);
2294 2295                  /*
2295 2296                   * We need to flush any dirty pages which happen to
2296 2297                   * be hanging around before removing the file.  This
2297 2298                   * shouldn't happen very often and mostly on file
2298 2299                   * systems mounted "nocto".
2299 2300                   */
2300 2301                  if (vn_has_cached_data(vp) &&
2301 2302                      ((rp->r_flags & RDIRTY) || rp->r_count > 0)) {
2302 2303                          error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
2303 2304                          if (error && (error == ENOSPC || error == EDQUOT)) {
2304 2305                                  mutex_enter(&rp->r_statelock);
2305 2306                                  if (!rp->r_error)
2306 2307                                          rp->r_error = error;
2307 2308                                  mutex_exit(&rp->r_statelock);
2308 2309                          }
2309 2310                  }
2310 2311  
2311 2312                  setdiropargs(&da, nm, dvp);
2312 2313  
2313 2314                  douprintf = 1;
2314 2315  
2315 2316                  error = rfs2call(VTOMI(dvp), RFS_REMOVE,
2316 2317                      xdr_diropargs, (caddr_t)&da,
2317 2318                      xdr_enum, (caddr_t)&status, cr,
2318 2319                      &douprintf, &status, 0, NULL);
2319 2320  
2320 2321                  /*
2321 2322                   * The xattr dir may be gone after last attr is removed,
2322 2323                   * so flush it from dnlc.
2323 2324                   */
2324 2325                  if (dvp->v_flag & V_XATTRDIR)
2325 2326                          dnlc_purge_vp(dvp);
2326 2327  
2327 2328                  PURGE_ATTRCACHE(dvp);   /* mod time changed */
2328 2329                  PURGE_ATTRCACHE(vp);    /* link count changed */
2329 2330  
2330 2331                  if (!error) {
2331 2332                          error = geterrno(status);
2332 2333                          if (!error) {
2333 2334                                  if (HAVE_RDDIR_CACHE(drp))
2334 2335                                          nfs_purge_rddir_cache(dvp);
2335 2336                          } else {
2336 2337                                  PURGE_STALE_FH(error, dvp, cr);
2337 2338                          }
2338 2339                  }
2339 2340          }
2340 2341  
2341 2342          if (error == 0) {
2342 2343                  vnevent_remove(vp, dvp, nm, ct);
2343 2344          }
2344 2345          VN_RELE(vp);
2345 2346  
2346 2347          nfs_rw_exit(&drp->r_rwlock);
2347 2348  
2348 2349          return (error);
2349 2350  }
2350 2351  
2351 2352  /* ARGSUSED */
2352 2353  static int
2353 2354  nfs_link(vnode_t *tdvp, vnode_t *svp, char *tnm, cred_t *cr,
2354 2355          caller_context_t *ct, int flags)
2355 2356  {
2356 2357          int error;
2357 2358          struct nfslinkargs args;
2358 2359          enum nfsstat status;
2359 2360          vnode_t *realvp;
2360 2361          int douprintf;
2361 2362          rnode_t *tdrp;
2362 2363  
2363 2364          if (nfs_zone() != VTOMI(tdvp)->mi_zone)
2364 2365                  return (EPERM);
2365 2366          if (VOP_REALVP(svp, &realvp, ct) == 0)
2366 2367                  svp = realvp;
2367 2368  
2368 2369          args.la_from = VTOFH(svp);
2369 2370          setdiropargs(&args.la_to, tnm, tdvp);
2370 2371  
2371 2372          tdrp = VTOR(tdvp);
2372 2373          if (nfs_rw_enter_sig(&tdrp->r_rwlock, RW_WRITER, INTR(tdvp)))
2373 2374                  return (EINTR);
2374 2375  
2375 2376          dnlc_remove(tdvp, tnm);
2376 2377  
2377 2378          douprintf = 1;
2378 2379  
2379 2380          error = rfs2call(VTOMI(svp), RFS_LINK,
2380 2381              xdr_linkargs, (caddr_t)&args,
2381 2382              xdr_enum, (caddr_t)&status, cr,
2382 2383              &douprintf, &status, 0, NULL);
2383 2384  
2384 2385          PURGE_ATTRCACHE(tdvp);  /* mod time changed */
2385 2386          PURGE_ATTRCACHE(svp);   /* link count changed */
2386 2387  
2387 2388          if (!error) {
2388 2389                  error = geterrno(status);
2389 2390                  if (!error) {
2390 2391                          if (HAVE_RDDIR_CACHE(tdrp))
2391 2392                                  nfs_purge_rddir_cache(tdvp);
2392 2393                  }
2393 2394          }
2394 2395  
2395 2396          nfs_rw_exit(&tdrp->r_rwlock);
2396 2397  
2397 2398          if (!error) {
2398 2399                  /*
2399 2400                   * Notify the source file of this link operation.
2400 2401                   */
2401 2402                  vnevent_link(svp, ct);
2402 2403          }
2403 2404          return (error);
2404 2405  }
2405 2406  
2406 2407  /* ARGSUSED */
2407 2408  static int
2408 2409  nfs_rename(vnode_t *odvp, char *onm, vnode_t *ndvp, char *nnm, cred_t *cr,
2409 2410          caller_context_t *ct, int flags)
2410 2411  {
2411 2412          vnode_t *realvp;
2412 2413  
2413 2414          if (nfs_zone() != VTOMI(odvp)->mi_zone)
2414 2415                  return (EPERM);
2415 2416          if (VOP_REALVP(ndvp, &realvp, ct) == 0)
2416 2417                  ndvp = realvp;
2417 2418  
2418 2419          return (nfsrename(odvp, onm, ndvp, nnm, cr, ct));
2419 2420  }
2420 2421  
2421 2422  /*
2422 2423   * nfsrename does the real work of renaming in NFS Version 2.
2423 2424   */
2424 2425  static int
2425 2426  nfsrename(vnode_t *odvp, char *onm, vnode_t *ndvp, char *nnm, cred_t *cr,
2426 2427      caller_context_t *ct)
2427 2428  {
2428 2429          int error;
2429 2430          enum nfsstat status;
2430 2431          struct nfsrnmargs args;
2431 2432          int douprintf;
2432 2433          vnode_t *nvp = NULL;
2433 2434          vnode_t *ovp = NULL;
2434 2435          char *tmpname;
2435 2436          rnode_t *rp;
2436 2437          rnode_t *odrp;
2437 2438          rnode_t *ndrp;
2438 2439  
2439 2440          ASSERT(nfs_zone() == VTOMI(odvp)->mi_zone);
2440 2441          if (strcmp(onm, ".") == 0 || strcmp(onm, "..") == 0 ||
2441 2442              strcmp(nnm, ".") == 0 || strcmp(nnm, "..") == 0)
2442 2443                  return (EINVAL);
2443 2444  
2444 2445          odrp = VTOR(odvp);
2445 2446          ndrp = VTOR(ndvp);
2446 2447          if ((intptr_t)odrp < (intptr_t)ndrp) {
2447 2448                  if (nfs_rw_enter_sig(&odrp->r_rwlock, RW_WRITER, INTR(odvp)))
2448 2449                          return (EINTR);
2449 2450                  if (nfs_rw_enter_sig(&ndrp->r_rwlock, RW_WRITER, INTR(ndvp))) {
2450 2451                          nfs_rw_exit(&odrp->r_rwlock);
2451 2452                          return (EINTR);
2452 2453                  }
2453 2454          } else {
2454 2455                  if (nfs_rw_enter_sig(&ndrp->r_rwlock, RW_WRITER, INTR(ndvp)))
2455 2456                          return (EINTR);
2456 2457                  if (nfs_rw_enter_sig(&odrp->r_rwlock, RW_WRITER, INTR(odvp))) {
2457 2458                          nfs_rw_exit(&ndrp->r_rwlock);
2458 2459                          return (EINTR);
2459 2460                  }
2460 2461          }
2461 2462  
2462 2463          /*
2463 2464           * Lookup the target file.  If it exists, it needs to be
2464 2465           * checked to see whether it is a mount point and whether
2465 2466           * it is active (open).
2466 2467           */
2467 2468          error = nfslookup(ndvp, nnm, &nvp, NULL, 0, NULL, cr, 0);
2468 2469          if (!error) {
2469 2470                  /*
2470 2471                   * If this file has been mounted on, then just
2471 2472                   * return busy because renaming to it would remove
2472 2473                   * the mounted file system from the name space.
2473 2474                   */
2474 2475                  if (vn_mountedvfs(nvp) != NULL) {
2475 2476                          VN_RELE(nvp);
2476 2477                          nfs_rw_exit(&odrp->r_rwlock);
2477 2478                          nfs_rw_exit(&ndrp->r_rwlock);
2478 2479                          return (EBUSY);
2479 2480                  }
2480 2481  
2481 2482                  /*
2482 2483                   * Purge the name cache of all references to this vnode
2483 2484                   * so that we can check the reference count to infer
2484 2485                   * whether it is active or not.
2485 2486                   */
2486 2487                  /*
2487 2488                   * First just remove the entry from the name cache, as it
2488 2489                   * is most likely the only entry for this vp.
2489 2490                   */
2490 2491                  dnlc_remove(ndvp, nnm);
2491 2492                  /*
2492 2493                   * If the file has a v_count > 1 then there may be more
2493 2494                   * than one entry in the name cache due multiple links
2494 2495                   * or an open file, but we don't have the real reference
2495 2496                   * count so flush all possible entries.
2496 2497                   */
2497 2498                  if (nvp->v_count > 1)
2498 2499                          dnlc_purge_vp(nvp);
2499 2500  
2500 2501                  /*
2501 2502                   * If the vnode is active and is not a directory,
2502 2503                   * arrange to rename it to a
2503 2504                   * temporary file so that it will continue to be
2504 2505                   * accessible.  This implements the "unlink-open-file"
2505 2506                   * semantics for the target of a rename operation.
2506 2507                   * Before doing this though, make sure that the
2507 2508                   * source and target files are not already the same.
2508 2509                   */
2509 2510                  if (nvp->v_count > 1 && nvp->v_type != VDIR) {
2510 2511                          /*
2511 2512                           * Lookup the source name.
2512 2513                           */
2513 2514                          error = nfslookup(odvp, onm, &ovp, NULL, 0, NULL,
2514 2515                              cr, 0);
2515 2516  
2516 2517                          /*
2517 2518                           * The source name *should* already exist.
2518 2519                           */
2519 2520                          if (error) {
2520 2521                                  VN_RELE(nvp);
2521 2522                                  nfs_rw_exit(&odrp->r_rwlock);
2522 2523                                  nfs_rw_exit(&ndrp->r_rwlock);
2523 2524                                  return (error);
2524 2525                          }
2525 2526  
2526 2527                          /*
2527 2528                           * Compare the two vnodes.  If they are the same,
2528 2529                           * just release all held vnodes and return success.
2529 2530                           */
2530 2531                          if (ovp == nvp) {
2531 2532                                  VN_RELE(ovp);
2532 2533                                  VN_RELE(nvp);
2533 2534                                  nfs_rw_exit(&odrp->r_rwlock);
2534 2535                                  nfs_rw_exit(&ndrp->r_rwlock);
2535 2536                                  return (0);
2536 2537                          }
2537 2538  
2538 2539                          /*
2539 2540                           * Can't mix and match directories and non-
2540 2541                           * directories in rename operations.  We already
2541 2542                           * know that the target is not a directory.  If
2542 2543                           * the source is a directory, return an error.
2543 2544                           */
2544 2545                          if (ovp->v_type == VDIR) {
2545 2546                                  VN_RELE(ovp);
2546 2547                                  VN_RELE(nvp);
2547 2548                                  nfs_rw_exit(&odrp->r_rwlock);
2548 2549                                  nfs_rw_exit(&ndrp->r_rwlock);
2549 2550                                  return (ENOTDIR);
2550 2551                          }
2551 2552  
2552 2553                          /*
2553 2554                           * The target file exists, is not the same as
2554 2555                           * the source file, and is active.  Link it
2555 2556                           * to a temporary filename to avoid having
2556 2557                           * the server removing the file completely.
2557 2558                           */
2558 2559                          tmpname = newname();
2559 2560                          error = nfs_link(ndvp, nvp, tmpname, cr, NULL, 0);
2560 2561                          if (error == EOPNOTSUPP) {
2561 2562                                  error = nfs_rename(ndvp, nnm, ndvp, tmpname,
2562 2563                                      cr, NULL, 0);
2563 2564                          }
2564 2565                          if (error) {
2565 2566                                  kmem_free(tmpname, MAXNAMELEN);
2566 2567                                  VN_RELE(ovp);
2567 2568                                  VN_RELE(nvp);
2568 2569                                  nfs_rw_exit(&odrp->r_rwlock);
2569 2570                                  nfs_rw_exit(&ndrp->r_rwlock);
2570 2571                                  return (error);
2571 2572                          }
2572 2573                          rp = VTOR(nvp);
2573 2574                          mutex_enter(&rp->r_statelock);
2574 2575                          if (rp->r_unldvp == NULL) {
2575 2576                                  VN_HOLD(ndvp);
2576 2577                                  rp->r_unldvp = ndvp;
2577 2578                                  if (rp->r_unlcred != NULL)
2578 2579                                          crfree(rp->r_unlcred);
2579 2580                                  crhold(cr);
2580 2581                                  rp->r_unlcred = cr;
2581 2582                                  rp->r_unlname = tmpname;
2582 2583                          } else {
2583 2584                                  kmem_free(rp->r_unlname, MAXNAMELEN);
2584 2585                                  rp->r_unlname = tmpname;
2585 2586                          }
2586 2587                          mutex_exit(&rp->r_statelock);
2587 2588                  }
2588 2589          }
2589 2590  
2590 2591          if (ovp == NULL) {
2591 2592                  /*
2592 2593                   * When renaming directories to be a subdirectory of a
2593 2594                   * different parent, the dnlc entry for ".." will no
2594 2595                   * longer be valid, so it must be removed.
2595 2596                   *
2596 2597                   * We do a lookup here to determine whether we are renaming
2597 2598                   * a directory and we need to check if we are renaming
2598 2599                   * an unlinked file.  This might have already been done
2599 2600                   * in previous code, so we check ovp == NULL to avoid
2600 2601                   * doing it twice.
2601 2602                   */
2602 2603  
2603 2604                  error = nfslookup(odvp, onm, &ovp, NULL, 0, NULL, cr, 0);
2604 2605  
2605 2606                  /*
2606 2607                   * The source name *should* already exist.
2607 2608                   */
2608 2609                  if (error) {
2609 2610                          nfs_rw_exit(&odrp->r_rwlock);
2610 2611                          nfs_rw_exit(&ndrp->r_rwlock);
2611 2612                          if (nvp) {
2612 2613                                  VN_RELE(nvp);
2613 2614                          }
2614 2615                          return (error);
2615 2616                  }
2616 2617                  ASSERT(ovp != NULL);
2617 2618          }
2618 2619  
2619 2620          dnlc_remove(odvp, onm);
2620 2621          dnlc_remove(ndvp, nnm);
2621 2622  
2622 2623          setdiropargs(&args.rna_from, onm, odvp);
2623 2624          setdiropargs(&args.rna_to, nnm, ndvp);
2624 2625  
2625 2626          douprintf = 1;
2626 2627  
2627 2628          error = rfs2call(VTOMI(odvp), RFS_RENAME,
2628 2629              xdr_rnmargs, (caddr_t)&args,
2629 2630              xdr_enum, (caddr_t)&status, cr,
2630 2631              &douprintf, &status, 0, NULL);
2631 2632  
2632 2633          PURGE_ATTRCACHE(odvp);  /* mod time changed */
2633 2634          PURGE_ATTRCACHE(ndvp);  /* mod time changed */
2634 2635  
2635 2636          if (!error) {
2636 2637                  error = geterrno(status);
2637 2638                  if (!error) {
2638 2639                          if (HAVE_RDDIR_CACHE(odrp))
2639 2640                                  nfs_purge_rddir_cache(odvp);
2640 2641                          if (HAVE_RDDIR_CACHE(ndrp))
2641 2642                                  nfs_purge_rddir_cache(ndvp);
2642 2643                          /*
2643 2644                           * when renaming directories to be a subdirectory of a
2644 2645                           * different parent, the dnlc entry for ".." will no
2645 2646                           * longer be valid, so it must be removed
2646 2647                           */
2647 2648                          rp = VTOR(ovp);
2648 2649                          if (ndvp != odvp) {
2649 2650                                  if (ovp->v_type == VDIR) {
2650 2651                                          dnlc_remove(ovp, "..");
2651 2652                                          if (HAVE_RDDIR_CACHE(rp))
2652 2653                                                  nfs_purge_rddir_cache(ovp);
2653 2654                                  }
2654 2655                          }
2655 2656  
2656 2657                          /*
2657 2658                           * If we are renaming the unlinked file, update the
2658 2659                           * r_unldvp and r_unlname as needed.
2659 2660                           */
2660 2661                          mutex_enter(&rp->r_statelock);
2661 2662                          if (rp->r_unldvp != NULL) {
2662 2663                                  if (strcmp(rp->r_unlname, onm) == 0) {
2663 2664                                          (void) strncpy(rp->r_unlname,
2664 2665                                              nnm, MAXNAMELEN);
2665 2666                                          rp->r_unlname[MAXNAMELEN - 1] = '\0';
2666 2667  
2667 2668                                          if (ndvp != rp->r_unldvp) {
2668 2669                                                  VN_RELE(rp->r_unldvp);
2669 2670                                                  rp->r_unldvp = ndvp;
2670 2671                                                  VN_HOLD(ndvp);
2671 2672                                          }
2672 2673                                  }
2673 2674                          }
2674 2675                          mutex_exit(&rp->r_statelock);
2675 2676                  } else {
2676 2677                          /*
2677 2678                           * System V defines rename to return EEXIST, not
2678 2679                           * ENOTEMPTY if the target directory is not empty.
2679 2680                           * Over the wire, the error is NFSERR_ENOTEMPTY
2680 2681                           * which geterrno maps to ENOTEMPTY.
2681 2682                           */
2682 2683                          if (error == ENOTEMPTY)
2683 2684                                  error = EEXIST;
2684 2685                  }
2685 2686          }
2686 2687  
2687 2688          if (error == 0) {
2688 2689                  if (nvp)
2689 2690                          vnevent_rename_dest(nvp, ndvp, nnm, ct);
2690 2691  
2691 2692                  if (odvp != ndvp)
2692 2693                          vnevent_rename_dest_dir(ndvp, ct);
2693 2694  
2694 2695                  ASSERT(ovp != NULL);
2695 2696                  vnevent_rename_src(ovp, odvp, onm, ct);
2696 2697          }
2697 2698  
2698 2699          if (nvp) {
2699 2700                  VN_RELE(nvp);
2700 2701          }
2701 2702          VN_RELE(ovp);
2702 2703  
2703 2704          nfs_rw_exit(&odrp->r_rwlock);
2704 2705          nfs_rw_exit(&ndrp->r_rwlock);
2705 2706  
2706 2707          return (error);
2707 2708  }
2708 2709  
2709 2710  /* ARGSUSED */
2710 2711  static int
2711 2712  nfs_mkdir(vnode_t *dvp, char *nm, struct vattr *va, vnode_t **vpp, cred_t *cr,
2712 2713          caller_context_t *ct, int flags, vsecattr_t *vsecp)
2713 2714  {
2714 2715          int error;
2715 2716          struct nfscreatargs args;
2716 2717          struct nfsdiropres dr;
2717 2718          int douprintf;
2718 2719          rnode_t *drp;
2719 2720          hrtime_t t;
2720 2721  
2721 2722          if (nfs_zone() != VTOMI(dvp)->mi_zone)
2722 2723                  return (EPERM);
2723 2724  
2724 2725          setdiropargs(&args.ca_da, nm, dvp);
2725 2726  
2726 2727          /*
2727 2728           * Decide what the group-id and set-gid bit of the created directory
2728 2729           * should be.  May have to do a setattr to get the gid right.
2729 2730           */
2730 2731          error = setdirgid(dvp, &va->va_gid, cr);
2731 2732          if (error)
2732 2733                  return (error);
2733 2734          error = setdirmode(dvp, &va->va_mode, cr);
2734 2735          if (error)
2735 2736                  return (error);
2736 2737          va->va_mask |= AT_MODE|AT_GID;
2737 2738  
2738 2739          args.ca_sa = &args.ca_sa_buf;
2739 2740          error = vattr_to_sattr(va, args.ca_sa);
2740 2741          if (error) {
2741 2742                  /* req time field(s) overflow - return immediately */
2742 2743                  return (error);
2743 2744          }
2744 2745  
2745 2746          drp = VTOR(dvp);
2746 2747          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
2747 2748                  return (EINTR);
2748 2749  
2749 2750          dnlc_remove(dvp, nm);
2750 2751  
2751 2752          douprintf = 1;
2752 2753  
2753 2754          t = gethrtime();
2754 2755  
2755 2756          error = rfs2call(VTOMI(dvp), RFS_MKDIR,
2756 2757              xdr_creatargs, (caddr_t)&args,
2757 2758              xdr_diropres, (caddr_t)&dr, cr,
2758 2759              &douprintf, &dr.dr_status, 0, NULL);
2759 2760  
2760 2761          PURGE_ATTRCACHE(dvp);   /* mod time changed */
2761 2762  
2762 2763          if (!error) {
2763 2764                  error = geterrno(dr.dr_status);
2764 2765                  if (!error) {
2765 2766                          if (HAVE_RDDIR_CACHE(drp))
2766 2767                                  nfs_purge_rddir_cache(dvp);
2767 2768                          /*
2768 2769                           * The attributes returned by RFS_MKDIR can not
2769 2770                           * be depended upon, so mark the attribute cache
2770 2771                           * as purged.  A subsequent GETATTR will get the
2771 2772                           * correct attributes from the server.
2772 2773                           */
2773 2774                          *vpp = makenfsnode(&dr.dr_fhandle, &dr.dr_attr,
2774 2775                              dvp->v_vfsp, t, cr, NULL, NULL);
2775 2776                          PURGE_ATTRCACHE(*vpp);
2776 2777                          dnlc_update(dvp, nm, *vpp);
2777 2778  
2778 2779                          /*
2779 2780                           * Make sure the gid was set correctly.
2780 2781                           * If not, try to set it (but don't lose
2781 2782                           * any sleep over it).
2782 2783                           */
2783 2784                          if (va->va_gid != VTOR(*vpp)->r_attr.va_gid) {
2784 2785                                  va->va_mask = AT_GID;
2785 2786                                  (void) nfssetattr(*vpp, va, 0, cr);
2786 2787                          }
2787 2788                  } else {
2788 2789                          PURGE_STALE_FH(error, dvp, cr);
2789 2790                  }
2790 2791          }
2791 2792  
2792 2793          nfs_rw_exit(&drp->r_rwlock);
2793 2794  
2794 2795          return (error);
2795 2796  }
2796 2797  
2797 2798  /* ARGSUSED */
2798 2799  static int
2799 2800  nfs_rmdir(vnode_t *dvp, char *nm, vnode_t *cdir, cred_t *cr,
2800 2801          caller_context_t *ct, int flags)
2801 2802  {
2802 2803          int error;
2803 2804          enum nfsstat status;
2804 2805          struct nfsdiropargs da;
2805 2806          vnode_t *vp;
2806 2807          int douprintf;
2807 2808          rnode_t *drp;
2808 2809  
2809 2810          if (nfs_zone() != VTOMI(dvp)->mi_zone)
2810 2811                  return (EPERM);
2811 2812          drp = VTOR(dvp);
2812 2813          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
2813 2814                  return (EINTR);
2814 2815  
2815 2816          /*
2816 2817           * Attempt to prevent a rmdir(".") from succeeding.
2817 2818           */
2818 2819          error = nfslookup(dvp, nm, &vp, NULL, 0, NULL, cr, 0);
2819 2820          if (error) {
2820 2821                  nfs_rw_exit(&drp->r_rwlock);
2821 2822                  return (error);
2822 2823          }
2823 2824  
2824 2825          if (vp == cdir) {
2825 2826                  VN_RELE(vp);
2826 2827                  nfs_rw_exit(&drp->r_rwlock);
2827 2828                  return (EINVAL);
2828 2829          }
2829 2830  
2830 2831          setdiropargs(&da, nm, dvp);
2831 2832  
2832 2833          /*
2833 2834           * First just remove the entry from the name cache, as it
2834 2835           * is most likely an entry for this vp.
2835 2836           */
2836 2837          dnlc_remove(dvp, nm);
2837 2838  
2838 2839          /*
2839 2840           * If there vnode reference count is greater than one, then
2840 2841           * there may be additional references in the DNLC which will
2841 2842           * need to be purged.  First, trying removing the entry for
2842 2843           * the parent directory and see if that removes the additional
2843 2844           * reference(s).  If that doesn't do it, then use dnlc_purge_vp
2844 2845           * to completely remove any references to the directory which
2845 2846           * might still exist in the DNLC.
2846 2847           */
2847 2848          if (vp->v_count > 1) {
2848 2849                  dnlc_remove(vp, "..");
2849 2850                  if (vp->v_count > 1)
2850 2851                          dnlc_purge_vp(vp);
2851 2852          }
2852 2853  
2853 2854          douprintf = 1;
2854 2855  
2855 2856          error = rfs2call(VTOMI(dvp), RFS_RMDIR,
2856 2857              xdr_diropargs, (caddr_t)&da,
2857 2858              xdr_enum, (caddr_t)&status, cr,
2858 2859              &douprintf, &status, 0, NULL);
2859 2860  
2860 2861          PURGE_ATTRCACHE(dvp);   /* mod time changed */
2861 2862  
2862 2863          if (error) {
2863 2864                  VN_RELE(vp);
2864 2865                  nfs_rw_exit(&drp->r_rwlock);
2865 2866                  return (error);
2866 2867          }
2867 2868  
2868 2869          error = geterrno(status);
2869 2870          if (!error) {
2870 2871                  if (HAVE_RDDIR_CACHE(drp))
2871 2872                          nfs_purge_rddir_cache(dvp);
2872 2873                  if (HAVE_RDDIR_CACHE(VTOR(vp)))
2873 2874                          nfs_purge_rddir_cache(vp);
2874 2875          } else {
2875 2876                  PURGE_STALE_FH(error, dvp, cr);
2876 2877                  /*
2877 2878                   * System V defines rmdir to return EEXIST, not
2878 2879                   * ENOTEMPTY if the directory is not empty.  Over
2879 2880                   * the wire, the error is NFSERR_ENOTEMPTY which
2880 2881                   * geterrno maps to ENOTEMPTY.
2881 2882                   */
2882 2883                  if (error == ENOTEMPTY)
2883 2884                          error = EEXIST;
2884 2885          }
2885 2886  
2886 2887          if (error == 0) {
2887 2888                  vnevent_rmdir(vp, dvp, nm, ct);
2888 2889          }
2889 2890          VN_RELE(vp);
2890 2891  
2891 2892          nfs_rw_exit(&drp->r_rwlock);
2892 2893  
2893 2894          return (error);
2894 2895  }
2895 2896  
2896 2897  /* ARGSUSED */
2897 2898  static int
2898 2899  nfs_symlink(vnode_t *dvp, char *lnm, struct vattr *tva, char *tnm, cred_t *cr,
2899 2900          caller_context_t *ct, int flags)
2900 2901  {
2901 2902          int error;
2902 2903          struct nfsslargs args;
2903 2904          enum nfsstat status;
2904 2905          int douprintf;
2905 2906          rnode_t *drp;
2906 2907  
2907 2908          if (nfs_zone() != VTOMI(dvp)->mi_zone)
2908 2909                  return (EPERM);
2909 2910          setdiropargs(&args.sla_from, lnm, dvp);
2910 2911          args.sla_sa = &args.sla_sa_buf;
2911 2912          error = vattr_to_sattr(tva, args.sla_sa);
2912 2913          if (error) {
2913 2914                  /* req time field(s) overflow - return immediately */
2914 2915                  return (error);
2915 2916          }
2916 2917          args.sla_tnm = tnm;
2917 2918  
2918 2919          drp = VTOR(dvp);
2919 2920          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
2920 2921                  return (EINTR);
2921 2922  
2922 2923          dnlc_remove(dvp, lnm);
2923 2924  
2924 2925          douprintf = 1;
2925 2926  
2926 2927          error = rfs2call(VTOMI(dvp), RFS_SYMLINK,
2927 2928              xdr_slargs, (caddr_t)&args,
2928 2929              xdr_enum, (caddr_t)&status, cr,
2929 2930              &douprintf, &status, 0, NULL);
2930 2931  
2931 2932          PURGE_ATTRCACHE(dvp);   /* mod time changed */
2932 2933  
2933 2934          if (!error) {
2934 2935                  error = geterrno(status);
2935 2936                  if (!error) {
2936 2937                          if (HAVE_RDDIR_CACHE(drp))
2937 2938                                  nfs_purge_rddir_cache(dvp);
2938 2939                  } else {
2939 2940                          PURGE_STALE_FH(error, dvp, cr);
2940 2941                  }
2941 2942          }
2942 2943  
2943 2944          nfs_rw_exit(&drp->r_rwlock);
2944 2945  
2945 2946          return (error);
  
    | 
      ↓ open down ↓ | 
    1170 lines elided | 
    
      ↑ open up ↑ | 
  
2946 2947  }
2947 2948  
2948 2949  #ifdef DEBUG
2949 2950  static int nfs_readdir_cache_hits = 0;
2950 2951  static int nfs_readdir_cache_shorts = 0;
2951 2952  static int nfs_readdir_cache_waits = 0;
2952 2953  static int nfs_readdir_cache_misses = 0;
2953 2954  static int nfs_readdir_readahead = 0;
2954 2955  #endif
2955 2956  
2956      -static int nfs_shrinkreaddir = 0;
     2957 +volatile int nfs_shrinkreaddir = 0;
2957 2958  
2958 2959  /*
2959 2960   * Read directory entries.
2960 2961   * There are some weird things to look out for here.  The uio_offset
2961 2962   * field is either 0 or it is the offset returned from a previous
2962 2963   * readdir.  It is an opaque value used by the server to find the
2963 2964   * correct directory block to read. The count field is the number
2964 2965   * of blocks to read on the server.  This is advisory only, the server
2965 2966   * may return only one block's worth of entries.  Entries may be compressed
2966 2967   * on the server.
2967 2968   */
2968 2969  /* ARGSUSED */
2969 2970  static int
2970 2971  nfs_readdir(vnode_t *vp, struct uio *uiop, cred_t *cr, int *eofp,
2971 2972          caller_context_t *ct, int flags)
2972 2973  {
2973 2974          int error;
2974 2975          size_t count;
2975 2976          rnode_t *rp;
2976 2977          rddir_cache *rdc;
2977 2978          rddir_cache *nrdc;
2978 2979          rddir_cache *rrdc;
2979 2980  #ifdef DEBUG
2980 2981          int missed;
2981 2982  #endif
2982 2983          rddir_cache srdc;
2983 2984          avl_index_t where;
2984 2985  
2985 2986          rp = VTOR(vp);
2986 2987  
2987 2988          ASSERT(nfs_rw_lock_held(&rp->r_rwlock, RW_READER));
2988 2989          if (nfs_zone() != VTOMI(vp)->mi_zone)
2989 2990                  return (EIO);
2990 2991          /*
2991 2992           * Make sure that the directory cache is valid.
2992 2993           */
2993 2994          if (HAVE_RDDIR_CACHE(rp)) {
2994 2995                  if (nfs_disable_rddir_cache) {
2995 2996                          /*
2996 2997                           * Setting nfs_disable_rddir_cache in /etc/system
2997 2998                           * allows interoperability with servers that do not
2998 2999                           * properly update the attributes of directories.
2999 3000                           * Any cached information gets purged before an
3000 3001                           * access is made to it.
3001 3002                           */
3002 3003                          nfs_purge_rddir_cache(vp);
3003 3004                  } else {
3004 3005                          error = nfs_validate_caches(vp, cr);
3005 3006                          if (error)
3006 3007                                  return (error);
3007 3008                  }
3008 3009          }
3009 3010  
3010 3011          /*
3011 3012           * UGLINESS: SunOS 3.2 servers apparently cannot always handle an
3012 3013           * RFS_READDIR request with rda_count set to more than 0x400. So
3013 3014           * we reduce the request size here purely for compatibility.
3014 3015           *
3015 3016           * In general, this is no longer required.  However, if a server
3016 3017           * is discovered which can not handle requests larger than 1024,
3017 3018           * nfs_shrinkreaddir can be set to 1 to enable this backwards
3018 3019           * compatibility.
3019 3020           *
3020 3021           * In any case, the request size is limited to NFS_MAXDATA bytes.
3021 3022           */
3022 3023          count = MIN(uiop->uio_iov->iov_len,
3023 3024              nfs_shrinkreaddir ? 0x400 : NFS_MAXDATA);
3024 3025  
3025 3026          nrdc = NULL;
3026 3027  #ifdef DEBUG
3027 3028          missed = 0;
3028 3029  #endif
3029 3030  top:
3030 3031          /*
3031 3032           * Short circuit last readdir which always returns 0 bytes.
3032 3033           * This can be done after the directory has been read through
3033 3034           * completely at least once.  This will set r_direof which
3034 3035           * can be used to find the value of the last cookie.
3035 3036           */
3036 3037          mutex_enter(&rp->r_statelock);
3037 3038          if (rp->r_direof != NULL &&
3038 3039              uiop->uio_offset == rp->r_direof->nfs_ncookie) {
3039 3040                  mutex_exit(&rp->r_statelock);
3040 3041  #ifdef DEBUG
3041 3042                  nfs_readdir_cache_shorts++;
3042 3043  #endif
3043 3044                  if (eofp)
3044 3045                          *eofp = 1;
3045 3046                  if (nrdc != NULL)
3046 3047                          rddir_cache_rele(nrdc);
3047 3048                  return (0);
3048 3049          }
3049 3050          /*
3050 3051           * Look for a cache entry.  Cache entries are identified
3051 3052           * by the NFS cookie value and the byte count requested.
3052 3053           */
3053 3054          srdc.nfs_cookie = uiop->uio_offset;
3054 3055          srdc.buflen = count;
3055 3056          rdc = avl_find(&rp->r_dir, &srdc, &where);
3056 3057          if (rdc != NULL) {
3057 3058                  rddir_cache_hold(rdc);
3058 3059                  /*
3059 3060                   * If the cache entry is in the process of being
3060 3061                   * filled in, wait until this completes.  The
3061 3062                   * RDDIRWAIT bit is set to indicate that someone
3062 3063                   * is waiting and then the thread currently
3063 3064                   * filling the entry is done, it should do a
3064 3065                   * cv_broadcast to wakeup all of the threads
3065 3066                   * waiting for it to finish.
3066 3067                   */
3067 3068                  if (rdc->flags & RDDIR) {
3068 3069                          nfs_rw_exit(&rp->r_rwlock);
3069 3070                          rdc->flags |= RDDIRWAIT;
3070 3071  #ifdef DEBUG
3071 3072                          nfs_readdir_cache_waits++;
3072 3073  #endif
3073 3074                          if (!cv_wait_sig(&rdc->cv, &rp->r_statelock)) {
3074 3075                                  /*
3075 3076                                   * We got interrupted, probably
3076 3077                                   * the user typed ^C or an alarm
3077 3078                                   * fired.  We free the new entry
3078 3079                                   * if we allocated one.
3079 3080                                   */
3080 3081                                  mutex_exit(&rp->r_statelock);
3081 3082                                  (void) nfs_rw_enter_sig(&rp->r_rwlock,
3082 3083                                      RW_READER, FALSE);
3083 3084                                  rddir_cache_rele(rdc);
3084 3085                                  if (nrdc != NULL)
3085 3086                                          rddir_cache_rele(nrdc);
3086 3087                                  return (EINTR);
3087 3088                          }
3088 3089                          mutex_exit(&rp->r_statelock);
3089 3090                          (void) nfs_rw_enter_sig(&rp->r_rwlock,
3090 3091                              RW_READER, FALSE);
3091 3092                          rddir_cache_rele(rdc);
3092 3093                          goto top;
3093 3094                  }
3094 3095                  /*
3095 3096                   * Check to see if a readdir is required to
3096 3097                   * fill the entry.  If so, mark this entry
3097 3098                   * as being filled, remove our reference,
3098 3099                   * and branch to the code to fill the entry.
3099 3100                   */
3100 3101                  if (rdc->flags & RDDIRREQ) {
3101 3102                          rdc->flags &= ~RDDIRREQ;
3102 3103                          rdc->flags |= RDDIR;
3103 3104                          if (nrdc != NULL)
3104 3105                                  rddir_cache_rele(nrdc);
3105 3106                          nrdc = rdc;
3106 3107                          mutex_exit(&rp->r_statelock);
3107 3108                          goto bottom;
3108 3109                  }
3109 3110  #ifdef DEBUG
3110 3111                  if (!missed)
3111 3112                          nfs_readdir_cache_hits++;
3112 3113  #endif
3113 3114                  /*
3114 3115                   * If an error occurred while attempting
3115 3116                   * to fill the cache entry, just return it.
3116 3117                   */
3117 3118                  if (rdc->error) {
3118 3119                          error = rdc->error;
3119 3120                          mutex_exit(&rp->r_statelock);
3120 3121                          rddir_cache_rele(rdc);
3121 3122                          if (nrdc != NULL)
3122 3123                                  rddir_cache_rele(nrdc);
3123 3124                          return (error);
3124 3125                  }
3125 3126  
3126 3127                  /*
3127 3128                   * The cache entry is complete and good,
3128 3129                   * copyout the dirent structs to the calling
3129 3130                   * thread.
3130 3131                   */
3131 3132                  error = uiomove(rdc->entries, rdc->entlen, UIO_READ, uiop);
3132 3133  
3133 3134                  /*
3134 3135                   * If no error occurred during the copyout,
3135 3136                   * update the offset in the uio struct to
3136 3137                   * contain the value of the next cookie
3137 3138                   * and set the eof value appropriately.
3138 3139                   */
3139 3140                  if (!error) {
3140 3141                          uiop->uio_offset = rdc->nfs_ncookie;
3141 3142                          if (eofp)
3142 3143                                  *eofp = rdc->eof;
3143 3144                  }
3144 3145  
3145 3146                  /*
3146 3147                   * Decide whether to do readahead.  Don't if
3147 3148                   * have already read to the end of directory.
3148 3149                   */
3149 3150                  if (rdc->eof) {
3150 3151                          rp->r_direof = rdc;
3151 3152                          mutex_exit(&rp->r_statelock);
3152 3153                          rddir_cache_rele(rdc);
3153 3154                          if (nrdc != NULL)
3154 3155                                  rddir_cache_rele(nrdc);
3155 3156                          return (error);
3156 3157                  }
3157 3158  
3158 3159                  /*
3159 3160                   * Check to see whether we found an entry
3160 3161                   * for the readahead.  If so, we don't need
3161 3162                   * to do anything further, so free the new
3162 3163                   * entry if one was allocated.  Otherwise,
3163 3164                   * allocate a new entry, add it to the cache,
3164 3165                   * and then initiate an asynchronous readdir
3165 3166                   * operation to fill it.
3166 3167                   */
3167 3168                  srdc.nfs_cookie = rdc->nfs_ncookie;
3168 3169                  srdc.buflen = count;
3169 3170                  rrdc = avl_find(&rp->r_dir, &srdc, &where);
3170 3171                  if (rrdc != NULL) {
3171 3172                          if (nrdc != NULL)
3172 3173                                  rddir_cache_rele(nrdc);
3173 3174                  } else {
3174 3175                          if (nrdc != NULL)
3175 3176                                  rrdc = nrdc;
3176 3177                          else {
3177 3178                                  rrdc = rddir_cache_alloc(KM_NOSLEEP);
3178 3179                          }
3179 3180                          if (rrdc != NULL) {
3180 3181                                  rrdc->nfs_cookie = rdc->nfs_ncookie;
3181 3182                                  rrdc->buflen = count;
3182 3183                                  avl_insert(&rp->r_dir, rrdc, where);
3183 3184                                  rddir_cache_hold(rrdc);
3184 3185                                  mutex_exit(&rp->r_statelock);
3185 3186                                  rddir_cache_rele(rdc);
3186 3187  #ifdef DEBUG
3187 3188                                  nfs_readdir_readahead++;
3188 3189  #endif
3189 3190                                  nfs_async_readdir(vp, rrdc, cr, nfsreaddir);
3190 3191                                  return (error);
3191 3192                          }
3192 3193                  }
3193 3194  
3194 3195                  mutex_exit(&rp->r_statelock);
3195 3196                  rddir_cache_rele(rdc);
3196 3197                  return (error);
3197 3198          }
3198 3199  
3199 3200          /*
3200 3201           * Didn't find an entry in the cache.  Construct a new empty
3201 3202           * entry and link it into the cache.  Other processes attempting
3202 3203           * to access this entry will need to wait until it is filled in.
3203 3204           *
3204 3205           * Since kmem_alloc may block, another pass through the cache
3205 3206           * will need to be taken to make sure that another process
3206 3207           * hasn't already added an entry to the cache for this request.
3207 3208           */
3208 3209          if (nrdc == NULL) {
3209 3210                  mutex_exit(&rp->r_statelock);
3210 3211                  nrdc = rddir_cache_alloc(KM_SLEEP);
3211 3212                  nrdc->nfs_cookie = uiop->uio_offset;
3212 3213                  nrdc->buflen = count;
3213 3214                  goto top;
3214 3215          }
3215 3216  
3216 3217          /*
3217 3218           * Add this entry to the cache.
3218 3219           */
3219 3220          avl_insert(&rp->r_dir, nrdc, where);
3220 3221          rddir_cache_hold(nrdc);
3221 3222          mutex_exit(&rp->r_statelock);
3222 3223  
3223 3224  bottom:
3224 3225  #ifdef DEBUG
3225 3226          missed = 1;
3226 3227          nfs_readdir_cache_misses++;
3227 3228  #endif
3228 3229          /*
3229 3230           * Do the readdir.
3230 3231           */
3231 3232          error = nfsreaddir(vp, nrdc, cr);
3232 3233  
3233 3234          /*
3234 3235           * If this operation failed, just return the error which occurred.
3235 3236           */
3236 3237          if (error != 0)
3237 3238                  return (error);
3238 3239  
3239 3240          /*
3240 3241           * Since the RPC operation will have taken sometime and blocked
3241 3242           * this process, another pass through the cache will need to be
3242 3243           * taken to find the correct cache entry.  It is possible that
3243 3244           * the correct cache entry will not be there (although one was
3244 3245           * added) because the directory changed during the RPC operation
3245 3246           * and the readdir cache was flushed.  In this case, just start
3246 3247           * over.  It is hoped that this will not happen too often... :-)
3247 3248           */
3248 3249          nrdc = NULL;
3249 3250          goto top;
3250 3251          /* NOTREACHED */
3251 3252  }
3252 3253  
3253 3254  static int
3254 3255  nfsreaddir(vnode_t *vp, rddir_cache *rdc, cred_t *cr)
3255 3256  {
3256 3257          int error;
3257 3258          struct nfsrddirargs rda;
3258 3259          struct nfsrddirres rd;
3259 3260          rnode_t *rp;
3260 3261          mntinfo_t *mi;
3261 3262          uint_t count;
3262 3263          int douprintf;
3263 3264          failinfo_t fi, *fip;
3264 3265  
3265 3266          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
3266 3267          count = rdc->buflen;
3267 3268  
3268 3269          rp = VTOR(vp);
3269 3270          mi = VTOMI(vp);
3270 3271  
3271 3272          rda.rda_fh = *VTOFH(vp);
3272 3273          rda.rda_offset = rdc->nfs_cookie;
3273 3274  
3274 3275          /*
3275 3276           * NFS client failover support
3276 3277           * suppress failover unless we have a zero cookie
3277 3278           */
3278 3279          if (rdc->nfs_cookie == (off_t)0) {
  
    | 
      ↓ open down ↓ | 
    312 lines elided | 
    
      ↑ open up ↑ | 
  
3279 3280                  fi.vp = vp;
3280 3281                  fi.fhp = (caddr_t)&rda.rda_fh;
3281 3282                  fi.copyproc = nfscopyfh;
3282 3283                  fi.lookupproc = nfslookup;
3283 3284                  fi.xattrdirproc = acl_getxattrdir2;
3284 3285                  fip = &fi;
3285 3286          } else {
3286 3287                  fip = NULL;
3287 3288          }
3288 3289  
3289      -        rd.rd_entries = kmem_alloc(rdc->buflen, KM_SLEEP);
     3290 +        rd.rd_dirents = kmem_alloc(rdc->buflen, KM_SLEEP);
3290 3291          rd.rd_size = count;
3291 3292          rd.rd_offset = rda.rda_offset;
3292 3293  
3293 3294          douprintf = 1;
3294 3295  
3295 3296          if (mi->mi_io_kstats) {
3296 3297                  mutex_enter(&mi->mi_lock);
3297 3298                  kstat_runq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
3298 3299                  mutex_exit(&mi->mi_lock);
3299 3300          }
3300 3301  
3301 3302          do {
3302 3303                  rda.rda_count = MIN(count, mi->mi_curread);
3303 3304                  error = rfs2call(mi, RFS_READDIR,
3304 3305                      xdr_rddirargs, (caddr_t)&rda,
3305 3306                      xdr_getrddirres, (caddr_t)&rd, cr,
3306 3307                      &douprintf, &rd.rd_status, 0, fip);
3307 3308          } while (error == ENFS_TRYAGAIN);
3308 3309  
3309 3310          if (mi->mi_io_kstats) {
3310 3311                  mutex_enter(&mi->mi_lock);
3311 3312                  kstat_runq_exit(KSTAT_IO_PTR(mi->mi_io_kstats));
3312 3313                  mutex_exit(&mi->mi_lock);
3313 3314          }
3314 3315  
3315 3316          /*
3316 3317           * Since we are actually doing a READDIR RPC, we must have
3317 3318           * exclusive access to the cache entry being filled.  Thus,
3318 3319           * it is safe to update all fields except for the flags
3319 3320           * field.  The r_statelock in the rnode must be held to
3320 3321           * prevent two different threads from simultaneously
3321 3322           * attempting to update the flags field.  This can happen
3322 3323           * if we are turning off RDDIR and the other thread is
3323 3324           * trying to set RDDIRWAIT.
3324 3325           */
3325 3326          ASSERT(rdc->flags & RDDIR);
3326 3327          if (!error) {
3327 3328                  error = geterrno(rd.rd_status);
3328 3329                  if (!error) {
  
    | 
      ↓ open down ↓ | 
    29 lines elided | 
    
      ↑ open up ↑ | 
  
3329 3330                          rdc->nfs_ncookie = rd.rd_offset;
3330 3331                          rdc->eof = rd.rd_eof ? 1 : 0;
3331 3332                          rdc->entlen = rd.rd_size;
3332 3333                          ASSERT(rdc->entlen <= rdc->buflen);
3333 3334  #ifdef DEBUG
3334 3335                          rdc->entries = rddir_cache_buf_alloc(rdc->buflen,
3335 3336                              KM_SLEEP);
3336 3337  #else
3337 3338                          rdc->entries = kmem_alloc(rdc->buflen, KM_SLEEP);
3338 3339  #endif
3339      -                        bcopy(rd.rd_entries, rdc->entries, rdc->entlen);
     3340 +                        bcopy(rd.rd_dirents, rdc->entries, rdc->entlen);
3340 3341                          rdc->error = 0;
3341 3342                          if (mi->mi_io_kstats) {
3342 3343                                  mutex_enter(&mi->mi_lock);
3343 3344                                  KSTAT_IO_PTR(mi->mi_io_kstats)->reads++;
3344 3345                                  KSTAT_IO_PTR(mi->mi_io_kstats)->nread +=
3345 3346                                      rd.rd_size;
3346 3347                                  mutex_exit(&mi->mi_lock);
3347 3348                          }
3348 3349                  } else {
3349 3350                          PURGE_STALE_FH(error, vp, cr);
3350 3351                  }
3351 3352          }
3352 3353          if (error) {
3353 3354                  rdc->entries = NULL;
3354 3355                  rdc->error = error;
3355 3356          }
3356      -        kmem_free(rd.rd_entries, rdc->buflen);
     3357 +        kmem_free(rd.rd_dirents, rdc->buflen);
3357 3358  
3358 3359          mutex_enter(&rp->r_statelock);
3359 3360          rdc->flags &= ~RDDIR;
3360 3361          if (rdc->flags & RDDIRWAIT) {
3361 3362                  rdc->flags &= ~RDDIRWAIT;
3362 3363                  cv_broadcast(&rdc->cv);
3363 3364          }
3364 3365          if (error)
3365 3366                  rdc->flags |= RDDIRREQ;
3366 3367          mutex_exit(&rp->r_statelock);
3367 3368  
3368 3369          rddir_cache_rele(rdc);
3369 3370  
3370 3371          return (error);
3371 3372  }
3372 3373  
3373 3374  #ifdef DEBUG
3374 3375  static int nfs_bio_do_stop = 0;
3375 3376  #endif
3376 3377  
3377 3378  static int
3378 3379  nfs_bio(struct buf *bp, cred_t *cr)
3379 3380  {
3380 3381          rnode_t *rp = VTOR(bp->b_vp);
3381 3382          int count;
3382 3383          int error;
3383 3384          cred_t *cred;
3384 3385          uint_t offset;
3385 3386  
3386 3387          DTRACE_IO1(start, struct buf *, bp);
3387 3388  
3388 3389          ASSERT(nfs_zone() == VTOMI(bp->b_vp)->mi_zone);
3389 3390          offset = dbtob(bp->b_blkno);
3390 3391  
3391 3392          if (bp->b_flags & B_READ) {
3392 3393                  mutex_enter(&rp->r_statelock);
3393 3394                  if (rp->r_cred != NULL) {
3394 3395                          cred = rp->r_cred;
3395 3396                          crhold(cred);
3396 3397                  } else {
3397 3398                          rp->r_cred = cr;
3398 3399                          crhold(cr);
3399 3400                          cred = cr;
3400 3401                          crhold(cred);
3401 3402                  }
3402 3403                  mutex_exit(&rp->r_statelock);
3403 3404          read_again:
3404 3405                  error = bp->b_error = nfsread(bp->b_vp, bp->b_un.b_addr,
3405 3406                      offset, bp->b_bcount, &bp->b_resid, cred);
3406 3407  
3407 3408                  crfree(cred);
3408 3409                  if (!error) {
3409 3410                          if (bp->b_resid) {
3410 3411                                  /*
3411 3412                                   * Didn't get it all because we hit EOF,
3412 3413                                   * zero all the memory beyond the EOF.
3413 3414                                   */
3414 3415                                  /* bzero(rdaddr + */
3415 3416                                  bzero(bp->b_un.b_addr +
3416 3417                                      bp->b_bcount - bp->b_resid, bp->b_resid);
3417 3418                          }
3418 3419                          mutex_enter(&rp->r_statelock);
3419 3420                          if (bp->b_resid == bp->b_bcount &&
3420 3421                              offset >= rp->r_size) {
3421 3422                                  /*
3422 3423                                   * We didn't read anything at all as we are
3423 3424                                   * past EOF.  Return an error indicator back
3424 3425                                   * but don't destroy the pages (yet).
3425 3426                                   */
3426 3427                                  error = NFS_EOF;
3427 3428                          }
3428 3429                          mutex_exit(&rp->r_statelock);
3429 3430                  } else if (error == EACCES) {
3430 3431                          mutex_enter(&rp->r_statelock);
3431 3432                          if (cred != cr) {
3432 3433                                  if (rp->r_cred != NULL)
3433 3434                                          crfree(rp->r_cred);
3434 3435                                  rp->r_cred = cr;
3435 3436                                  crhold(cr);
3436 3437                                  cred = cr;
3437 3438                                  crhold(cred);
3438 3439                                  mutex_exit(&rp->r_statelock);
3439 3440                                  goto read_again;
3440 3441                          }
3441 3442                          mutex_exit(&rp->r_statelock);
3442 3443                  }
3443 3444          } else {
3444 3445                  if (!(rp->r_flags & RSTALE)) {
3445 3446                          mutex_enter(&rp->r_statelock);
3446 3447                          if (rp->r_cred != NULL) {
3447 3448                                  cred = rp->r_cred;
3448 3449                                  crhold(cred);
3449 3450                          } else {
3450 3451                                  rp->r_cred = cr;
3451 3452                                  crhold(cr);
3452 3453                                  cred = cr;
3453 3454                                  crhold(cred);
3454 3455                          }
3455 3456                          mutex_exit(&rp->r_statelock);
3456 3457                  write_again:
3457 3458                          mutex_enter(&rp->r_statelock);
3458 3459                          count = MIN(bp->b_bcount, rp->r_size - offset);
3459 3460                          mutex_exit(&rp->r_statelock);
3460 3461                          if (count < 0)
3461 3462                                  cmn_err(CE_PANIC, "nfs_bio: write count < 0");
3462 3463  #ifdef DEBUG
3463 3464                          if (count == 0) {
3464 3465                                  zcmn_err(getzoneid(), CE_WARN,
3465 3466                                      "nfs_bio: zero length write at %d",
3466 3467                                      offset);
3467 3468                                  nfs_printfhandle(&rp->r_fh);
3468 3469                                  if (nfs_bio_do_stop)
3469 3470                                          debug_enter("nfs_bio");
3470 3471                          }
3471 3472  #endif
3472 3473                          error = nfswrite(bp->b_vp, bp->b_un.b_addr, offset,
3473 3474                              count, cred);
3474 3475                          if (error == EACCES) {
3475 3476                                  mutex_enter(&rp->r_statelock);
3476 3477                                  if (cred != cr) {
3477 3478                                          if (rp->r_cred != NULL)
3478 3479                                                  crfree(rp->r_cred);
3479 3480                                          rp->r_cred = cr;
3480 3481                                          crhold(cr);
3481 3482                                          crfree(cred);
3482 3483                                          cred = cr;
3483 3484                                          crhold(cred);
3484 3485                                          mutex_exit(&rp->r_statelock);
3485 3486                                          goto write_again;
3486 3487                                  }
3487 3488                                  mutex_exit(&rp->r_statelock);
3488 3489                          }
3489 3490                          bp->b_error = error;
3490 3491                          if (error && error != EINTR) {
3491 3492                                  /*
3492 3493                                   * Don't print EDQUOT errors on the console.
3493 3494                                   * Don't print asynchronous EACCES errors.
3494 3495                                   * Don't print EFBIG errors.
3495 3496                                   * Print all other write errors.
3496 3497                                   */
3497 3498                                  if (error != EDQUOT && error != EFBIG &&
3498 3499                                      (error != EACCES ||
3499 3500                                      !(bp->b_flags & B_ASYNC)))
3500 3501                                          nfs_write_error(bp->b_vp, error, cred);
3501 3502                                  /*
3502 3503                                   * Update r_error and r_flags as appropriate.
3503 3504                                   * If the error was ESTALE, then mark the
3504 3505                                   * rnode as not being writeable and save
3505 3506                                   * the error status.  Otherwise, save any
3506 3507                                   * errors which occur from asynchronous
3507 3508                                   * page invalidations.  Any errors occurring
3508 3509                                   * from other operations should be saved
3509 3510                                   * by the caller.
3510 3511                                   */
3511 3512                                  mutex_enter(&rp->r_statelock);
3512 3513                                  if (error == ESTALE) {
3513 3514                                          rp->r_flags |= RSTALE;
3514 3515                                          if (!rp->r_error)
3515 3516                                                  rp->r_error = error;
3516 3517                                  } else if (!rp->r_error &&
3517 3518                                      (bp->b_flags &
3518 3519                                      (B_INVAL|B_FORCE|B_ASYNC)) ==
3519 3520                                      (B_INVAL|B_FORCE|B_ASYNC)) {
3520 3521                                          rp->r_error = error;
3521 3522                                  }
3522 3523                                  mutex_exit(&rp->r_statelock);
3523 3524                          }
3524 3525                          crfree(cred);
3525 3526                  } else {
3526 3527                          error = rp->r_error;
3527 3528                          /*
3528 3529                           * A close may have cleared r_error, if so,
3529 3530                           * propagate ESTALE error return properly
3530 3531                           */
3531 3532                          if (error == 0)
3532 3533                                  error = ESTALE;
3533 3534                  }
3534 3535          }
3535 3536  
3536 3537          if (error != 0 && error != NFS_EOF)
3537 3538                  bp->b_flags |= B_ERROR;
3538 3539  
3539 3540          DTRACE_IO1(done, struct buf *, bp);
3540 3541  
3541 3542          return (error);
3542 3543  }
3543 3544  
3544 3545  /* ARGSUSED */
3545 3546  static int
3546 3547  nfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
3547 3548  {
3548 3549          struct nfs_fid *fp;
3549 3550          rnode_t *rp;
3550 3551  
3551 3552          rp = VTOR(vp);
3552 3553  
3553 3554          if (fidp->fid_len < (sizeof (struct nfs_fid) - sizeof (short))) {
3554 3555                  fidp->fid_len = sizeof (struct nfs_fid) - sizeof (short);
3555 3556                  return (ENOSPC);
3556 3557          }
3557 3558          fp = (struct nfs_fid *)fidp;
3558 3559          fp->nf_pad = 0;
3559 3560          fp->nf_len = sizeof (struct nfs_fid) - sizeof (short);
3560 3561          bcopy(rp->r_fh.fh_buf, fp->nf_data, NFS_FHSIZE);
3561 3562          return (0);
3562 3563  }
3563 3564  
3564 3565  /* ARGSUSED2 */
3565 3566  static int
3566 3567  nfs_rwlock(vnode_t *vp, int write_lock, caller_context_t *ctp)
3567 3568  {
3568 3569          rnode_t *rp = VTOR(vp);
3569 3570  
3570 3571          if (!write_lock) {
3571 3572                  (void) nfs_rw_enter_sig(&rp->r_rwlock, RW_READER, FALSE);
3572 3573                  return (V_WRITELOCK_FALSE);
3573 3574          }
3574 3575  
3575 3576          if ((rp->r_flags & RDIRECTIO) || (VTOMI(vp)->mi_flags & MI_DIRECTIO)) {
3576 3577                  (void) nfs_rw_enter_sig(&rp->r_rwlock, RW_READER, FALSE);
3577 3578                  if (rp->r_mapcnt == 0 && !vn_has_cached_data(vp))
3578 3579                          return (V_WRITELOCK_FALSE);
3579 3580                  nfs_rw_exit(&rp->r_rwlock);
3580 3581          }
3581 3582  
3582 3583          (void) nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, FALSE);
3583 3584          return (V_WRITELOCK_TRUE);
3584 3585  }
3585 3586  
3586 3587  /* ARGSUSED */
3587 3588  static void
3588 3589  nfs_rwunlock(vnode_t *vp, int write_lock, caller_context_t *ctp)
3589 3590  {
3590 3591          rnode_t *rp = VTOR(vp);
3591 3592  
3592 3593          nfs_rw_exit(&rp->r_rwlock);
3593 3594  }
3594 3595  
3595 3596  /* ARGSUSED */
3596 3597  static int
3597 3598  nfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, caller_context_t *ct)
3598 3599  {
3599 3600  
3600 3601          /*
3601 3602           * Because we stuff the readdir cookie into the offset field
3602 3603           * someone may attempt to do an lseek with the cookie which
3603 3604           * we want to succeed.
3604 3605           */
3605 3606          if (vp->v_type == VDIR)
  
    | 
      ↓ open down ↓ | 
    239 lines elided | 
    
      ↑ open up ↑ | 
  
3606 3607                  return (0);
3607 3608          if (*noffp < 0 || *noffp > MAXOFF32_T)
3608 3609                  return (EINVAL);
3609 3610          return (0);
3610 3611  }
3611 3612  
3612 3613  /*
3613 3614   * number of NFS_MAXDATA blocks to read ahead
3614 3615   * optimized for 100 base-T.
3615 3616   */
3616      -static int nfs_nra = 4;
     3617 +volatile int nfs_nra = 4;
3617 3618  
3618 3619  #ifdef DEBUG
3619 3620  static int nfs_lostpage = 0;    /* number of times we lost original page */
3620 3621  #endif
3621 3622  
3622 3623  /*
3623 3624   * Return all the pages from [off..off+len) in file
3624 3625   */
3625 3626  /* ARGSUSED */
3626 3627  static int
3627 3628  nfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
3628 3629          page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
3629 3630          enum seg_rw rw, cred_t *cr, caller_context_t *ct)
3630 3631  {
3631 3632          rnode_t *rp;
3632 3633          int error;
3633 3634          mntinfo_t *mi;
3634 3635  
3635 3636          if (vp->v_flag & VNOMAP)
3636 3637                  return (ENOSYS);
3637 3638  
3638 3639          ASSERT(off <= MAXOFF32_T);
3639 3640          if (nfs_zone() != VTOMI(vp)->mi_zone)
3640 3641                  return (EIO);
3641 3642          if (protp != NULL)
3642 3643                  *protp = PROT_ALL;
3643 3644  
3644 3645          /*
3645 3646           * Now valididate that the caches are up to date.
3646 3647           */
3647 3648          error = nfs_validate_caches(vp, cr);
3648 3649          if (error)
3649 3650                  return (error);
3650 3651  
3651 3652          rp = VTOR(vp);
3652 3653          mi = VTOMI(vp);
3653 3654  retry:
3654 3655          mutex_enter(&rp->r_statelock);
3655 3656  
3656 3657          /*
3657 3658           * Don't create dirty pages faster than they
3658 3659           * can be cleaned so that the system doesn't
3659 3660           * get imbalanced.  If the async queue is
3660 3661           * maxed out, then wait for it to drain before
3661 3662           * creating more dirty pages.  Also, wait for
3662 3663           * any threads doing pagewalks in the vop_getattr
3663 3664           * entry points so that they don't block for
3664 3665           * long periods.
3665 3666           */
3666 3667          if (rw == S_CREATE) {
3667 3668                  while ((mi->mi_max_threads != 0 &&
3668 3669                      rp->r_awcount > 2 * mi->mi_max_threads) ||
3669 3670                      rp->r_gcount > 0)
3670 3671                          cv_wait(&rp->r_cv, &rp->r_statelock);
3671 3672          }
3672 3673  
3673 3674          /*
3674 3675           * If we are getting called as a side effect of an nfs_write()
3675 3676           * operation the local file size might not be extended yet.
3676 3677           * In this case we want to be able to return pages of zeroes.
3677 3678           */
3678 3679          if (off + len > rp->r_size + PAGEOFFSET && seg != segkmap) {
3679 3680                  mutex_exit(&rp->r_statelock);
3680 3681                  return (EFAULT);                /* beyond EOF */
3681 3682          }
3682 3683  
3683 3684          mutex_exit(&rp->r_statelock);
3684 3685  
3685 3686          error = pvn_getpages(nfs_getapage, vp, off, len, protp, pl, plsz,
3686 3687              seg, addr, rw, cr);
3687 3688  
3688 3689          switch (error) {
3689 3690          case NFS_EOF:
3690 3691                  nfs_purge_caches(vp, NFS_NOPURGE_DNLC, cr);
3691 3692                  goto retry;
3692 3693          case ESTALE:
3693 3694                  PURGE_STALE_FH(error, vp, cr);
3694 3695          }
3695 3696  
3696 3697          return (error);
3697 3698  }
3698 3699  
3699 3700  /*
3700 3701   * Called from pvn_getpages to get a particular page.
3701 3702   */
3702 3703  /* ARGSUSED */
3703 3704  static int
3704 3705  nfs_getapage(vnode_t *vp, u_offset_t off, size_t len, uint_t *protp,
3705 3706          page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
3706 3707          enum seg_rw rw, cred_t *cr)
3707 3708  {
3708 3709          rnode_t *rp;
3709 3710          uint_t bsize;
3710 3711          struct buf *bp;
3711 3712          page_t *pp;
3712 3713          u_offset_t lbn;
3713 3714          u_offset_t io_off;
3714 3715          u_offset_t blkoff;
3715 3716          u_offset_t rablkoff;
3716 3717          size_t io_len;
3717 3718          uint_t blksize;
3718 3719          int error;
3719 3720          int readahead;
3720 3721          int readahead_issued = 0;
3721 3722          int ra_window; /* readahead window */
3722 3723          page_t *pagefound;
3723 3724  
3724 3725          if (nfs_zone() != VTOMI(vp)->mi_zone)
3725 3726                  return (EIO);
3726 3727          rp = VTOR(vp);
3727 3728          bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
3728 3729  
3729 3730  reread:
3730 3731          bp = NULL;
3731 3732          pp = NULL;
3732 3733          pagefound = NULL;
3733 3734  
3734 3735          if (pl != NULL)
3735 3736                  pl[0] = NULL;
3736 3737  
3737 3738          error = 0;
3738 3739          lbn = off / bsize;
3739 3740          blkoff = lbn * bsize;
3740 3741  
3741 3742          /*
3742 3743           * Queueing up the readahead before doing the synchronous read
3743 3744           * results in a significant increase in read throughput because
3744 3745           * of the increased parallelism between the async threads and
3745 3746           * the process context.
3746 3747           */
3747 3748          if ((off & ((vp->v_vfsp->vfs_bsize) - 1)) == 0 &&
3748 3749              rw != S_CREATE &&
3749 3750              !(vp->v_flag & VNOCACHE)) {
3750 3751                  mutex_enter(&rp->r_statelock);
3751 3752  
3752 3753                  /*
3753 3754                   * Calculate the number of readaheads to do.
3754 3755                   * a) No readaheads at offset = 0.
3755 3756                   * b) Do maximum(nfs_nra) readaheads when the readahead
3756 3757                   *    window is closed.
3757 3758                   * c) Do readaheads between 1 to (nfs_nra - 1) depending
3758 3759                   *    upon how far the readahead window is open or close.
3759 3760                   * d) No readaheads if rp->r_nextr is not within the scope
3760 3761                   *    of the readahead window (random i/o).
3761 3762                   */
3762 3763  
3763 3764                  if (off == 0)
3764 3765                          readahead = 0;
3765 3766                  else if (blkoff == rp->r_nextr)
3766 3767                          readahead = nfs_nra;
3767 3768                  else if (rp->r_nextr > blkoff &&
3768 3769                      ((ra_window = (rp->r_nextr - blkoff) / bsize)
3769 3770                      <= (nfs_nra - 1)))
3770 3771                          readahead = nfs_nra - ra_window;
3771 3772                  else
3772 3773                          readahead = 0;
3773 3774  
3774 3775                  rablkoff = rp->r_nextr;
3775 3776                  while (readahead > 0 && rablkoff + bsize < rp->r_size) {
3776 3777                          mutex_exit(&rp->r_statelock);
3777 3778                          if (nfs_async_readahead(vp, rablkoff + bsize,
3778 3779                              addr + (rablkoff + bsize - off), seg, cr,
3779 3780                              nfs_readahead) < 0) {
3780 3781                                  mutex_enter(&rp->r_statelock);
3781 3782                                  break;
3782 3783                          }
3783 3784                          readahead--;
3784 3785                          rablkoff += bsize;
3785 3786                          /*
3786 3787                           * Indicate that we did a readahead so
3787 3788                           * readahead offset is not updated
3788 3789                           * by the synchronous read below.
3789 3790                           */
3790 3791                          readahead_issued = 1;
3791 3792                          mutex_enter(&rp->r_statelock);
3792 3793                          /*
3793 3794                           * set readahead offset to
3794 3795                           * offset of last async readahead
3795 3796                           * request.
3796 3797                           */
3797 3798                          rp->r_nextr = rablkoff;
3798 3799                  }
3799 3800                  mutex_exit(&rp->r_statelock);
3800 3801          }
3801 3802  
3802 3803  again:
3803 3804          if ((pagefound = page_exists(vp, off)) == NULL) {
3804 3805                  if (pl == NULL) {
3805 3806                          (void) nfs_async_readahead(vp, blkoff, addr, seg, cr,
3806 3807                              nfs_readahead);
3807 3808                  } else if (rw == S_CREATE) {
3808 3809                          /*
3809 3810                           * Block for this page is not allocated, or the offset
3810 3811                           * is beyond the current allocation size, or we're
3811 3812                           * allocating a swap slot and the page was not found,
3812 3813                           * so allocate it and return a zero page.
3813 3814                           */
3814 3815                          if ((pp = page_create_va(vp, off,
3815 3816                              PAGESIZE, PG_WAIT, seg, addr)) == NULL)
3816 3817                                  cmn_err(CE_PANIC, "nfs_getapage: page_create");
3817 3818                          io_len = PAGESIZE;
3818 3819                          mutex_enter(&rp->r_statelock);
3819 3820                          rp->r_nextr = off + PAGESIZE;
3820 3821                          mutex_exit(&rp->r_statelock);
3821 3822                  } else {
3822 3823                          /*
3823 3824                           * Need to go to server to get a BLOCK, exception to
3824 3825                           * that being while reading at offset = 0 or doing
3825 3826                           * random i/o, in that case read only a PAGE.
3826 3827                           */
3827 3828                          mutex_enter(&rp->r_statelock);
3828 3829                          if (blkoff < rp->r_size &&
3829 3830                              blkoff + bsize >= rp->r_size) {
3830 3831                                  /*
3831 3832                                   * If only a block or less is left in
3832 3833                                   * the file, read all that is remaining.
3833 3834                                   */
3834 3835                                  if (rp->r_size <= off) {
3835 3836                                          /*
3836 3837                                           * Trying to access beyond EOF,
3837 3838                                           * set up to get at least one page.
3838 3839                                           */
3839 3840                                          blksize = off + PAGESIZE - blkoff;
3840 3841                                  } else
3841 3842                                          blksize = rp->r_size - blkoff;
3842 3843                          } else if ((off == 0) ||
3843 3844                              (off != rp->r_nextr && !readahead_issued)) {
3844 3845                                  blksize = PAGESIZE;
3845 3846                                  blkoff = off; /* block = page here */
3846 3847                          } else
3847 3848                                  blksize = bsize;
3848 3849                          mutex_exit(&rp->r_statelock);
3849 3850  
3850 3851                          pp = pvn_read_kluster(vp, off, seg, addr, &io_off,
3851 3852                              &io_len, blkoff, blksize, 0);
3852 3853  
3853 3854                          /*
3854 3855                           * Some other thread has entered the page,
3855 3856                           * so just use it.
3856 3857                           */
3857 3858                          if (pp == NULL)
3858 3859                                  goto again;
3859 3860  
3860 3861                          /*
3861 3862                           * Now round the request size up to page boundaries.
3862 3863                           * This ensures that the entire page will be
3863 3864                           * initialized to zeroes if EOF is encountered.
3864 3865                           */
3865 3866                          io_len = ptob(btopr(io_len));
3866 3867  
3867 3868                          bp = pageio_setup(pp, io_len, vp, B_READ);
3868 3869                          ASSERT(bp != NULL);
3869 3870  
3870 3871                          /*
3871 3872                           * pageio_setup should have set b_addr to 0.  This
3872 3873                           * is correct since we want to do I/O on a page
3873 3874                           * boundary.  bp_mapin will use this addr to calculate
3874 3875                           * an offset, and then set b_addr to the kernel virtual
3875 3876                           * address it allocated for us.
3876 3877                           */
3877 3878                          ASSERT(bp->b_un.b_addr == 0);
3878 3879  
3879 3880                          bp->b_edev = 0;
3880 3881                          bp->b_dev = 0;
3881 3882                          bp->b_lblkno = lbtodb(io_off);
3882 3883                          bp->b_file = vp;
3883 3884                          bp->b_offset = (offset_t)off;
3884 3885                          bp_mapin(bp);
3885 3886  
3886 3887                          /*
3887 3888                           * If doing a write beyond what we believe is EOF,
3888 3889                           * don't bother trying to read the pages from the
3889 3890                           * server, we'll just zero the pages here.  We
3890 3891                           * don't check that the rw flag is S_WRITE here
3891 3892                           * because some implementations may attempt a
3892 3893                           * read access to the buffer before copying data.
3893 3894                           */
3894 3895                          mutex_enter(&rp->r_statelock);
3895 3896                          if (io_off >= rp->r_size && seg == segkmap) {
3896 3897                                  mutex_exit(&rp->r_statelock);
3897 3898                                  bzero(bp->b_un.b_addr, io_len);
3898 3899                          } else {
3899 3900                                  mutex_exit(&rp->r_statelock);
3900 3901                                  error = nfs_bio(bp, cr);
3901 3902                          }
3902 3903  
3903 3904                          /*
3904 3905                           * Unmap the buffer before freeing it.
3905 3906                           */
3906 3907                          bp_mapout(bp);
3907 3908                          pageio_done(bp);
3908 3909  
3909 3910                          if (error == NFS_EOF) {
3910 3911                                  /*
3911 3912                                   * If doing a write system call just return
3912 3913                                   * zeroed pages, else user tried to get pages
3913 3914                                   * beyond EOF, return error.  We don't check
3914 3915                                   * that the rw flag is S_WRITE here because
3915 3916                                   * some implementations may attempt a read
3916 3917                                   * access to the buffer before copying data.
3917 3918                                   */
3918 3919                                  if (seg == segkmap)
3919 3920                                          error = 0;
3920 3921                                  else
3921 3922                                          error = EFAULT;
3922 3923                          }
3923 3924  
3924 3925                          if (!readahead_issued && !error) {
3925 3926                                  mutex_enter(&rp->r_statelock);
3926 3927                                  rp->r_nextr = io_off + io_len;
3927 3928                                  mutex_exit(&rp->r_statelock);
3928 3929                          }
3929 3930                  }
3930 3931          }
3931 3932  
3932 3933  out:
3933 3934          if (pl == NULL)
3934 3935                  return (error);
3935 3936  
3936 3937          if (error) {
3937 3938                  if (pp != NULL)
3938 3939                          pvn_read_done(pp, B_ERROR);
3939 3940                  return (error);
3940 3941          }
3941 3942  
3942 3943          if (pagefound) {
3943 3944                  se_t se = (rw == S_CREATE ? SE_EXCL : SE_SHARED);
3944 3945  
3945 3946                  /*
3946 3947                   * Page exists in the cache, acquire the appropriate lock.
3947 3948                   * If this fails, start all over again.
3948 3949                   */
3949 3950                  if ((pp = page_lookup(vp, off, se)) == NULL) {
3950 3951  #ifdef DEBUG
3951 3952                          nfs_lostpage++;
3952 3953  #endif
3953 3954                          goto reread;
3954 3955                  }
3955 3956                  pl[0] = pp;
3956 3957                  pl[1] = NULL;
3957 3958                  return (0);
3958 3959          }
3959 3960  
3960 3961          if (pp != NULL)
3961 3962                  pvn_plist_init(pp, pl, plsz, off, io_len, rw);
3962 3963  
3963 3964          return (error);
3964 3965  }
3965 3966  
3966 3967  static void
3967 3968  nfs_readahead(vnode_t *vp, u_offset_t blkoff, caddr_t addr, struct seg *seg,
3968 3969          cred_t *cr)
3969 3970  {
3970 3971          int error;
3971 3972          page_t *pp;
3972 3973          u_offset_t io_off;
3973 3974          size_t io_len;
3974 3975          struct buf *bp;
3975 3976          uint_t bsize, blksize;
3976 3977          rnode_t *rp = VTOR(vp);
3977 3978  
3978 3979          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
3979 3980  
3980 3981          bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
3981 3982  
3982 3983          mutex_enter(&rp->r_statelock);
3983 3984          if (blkoff < rp->r_size && blkoff + bsize > rp->r_size) {
3984 3985                  /*
3985 3986                   * If less than a block left in file read less
3986 3987                   * than a block.
3987 3988                   */
3988 3989                  blksize = rp->r_size - blkoff;
3989 3990          } else
3990 3991                  blksize = bsize;
3991 3992          mutex_exit(&rp->r_statelock);
3992 3993  
3993 3994          pp = pvn_read_kluster(vp, blkoff, segkmap, addr,
3994 3995              &io_off, &io_len, blkoff, blksize, 1);
3995 3996          /*
3996 3997           * The isra flag passed to the kluster function is 1, we may have
3997 3998           * gotten a return value of NULL for a variety of reasons (# of free
3998 3999           * pages < minfree, someone entered the page on the vnode etc). In all
3999 4000           * cases, we want to punt on the readahead.
4000 4001           */
4001 4002          if (pp == NULL)
4002 4003                  return;
4003 4004  
4004 4005          /*
4005 4006           * Now round the request size up to page boundaries.
4006 4007           * This ensures that the entire page will be
4007 4008           * initialized to zeroes if EOF is encountered.
4008 4009           */
4009 4010          io_len = ptob(btopr(io_len));
4010 4011  
4011 4012          bp = pageio_setup(pp, io_len, vp, B_READ);
4012 4013          ASSERT(bp != NULL);
4013 4014  
4014 4015          /*
4015 4016           * pageio_setup should have set b_addr to 0.  This is correct since
4016 4017           * we want to do I/O on a page boundary. bp_mapin() will use this addr
4017 4018           * to calculate an offset, and then set b_addr to the kernel virtual
4018 4019           * address it allocated for us.
4019 4020           */
4020 4021          ASSERT(bp->b_un.b_addr == 0);
4021 4022  
4022 4023          bp->b_edev = 0;
4023 4024          bp->b_dev = 0;
4024 4025          bp->b_lblkno = lbtodb(io_off);
4025 4026          bp->b_file = vp;
4026 4027          bp->b_offset = (offset_t)blkoff;
4027 4028          bp_mapin(bp);
4028 4029  
4029 4030          /*
4030 4031           * If doing a write beyond what we believe is EOF, don't bother trying
4031 4032           * to read the pages from the server, we'll just zero the pages here.
4032 4033           * We don't check that the rw flag is S_WRITE here because some
4033 4034           * implementations may attempt a read access to the buffer before
4034 4035           * copying data.
4035 4036           */
4036 4037          mutex_enter(&rp->r_statelock);
4037 4038          if (io_off >= rp->r_size && seg == segkmap) {
4038 4039                  mutex_exit(&rp->r_statelock);
4039 4040                  bzero(bp->b_un.b_addr, io_len);
4040 4041                  error = 0;
4041 4042          } else {
4042 4043                  mutex_exit(&rp->r_statelock);
4043 4044                  error = nfs_bio(bp, cr);
4044 4045                  if (error == NFS_EOF)
4045 4046                          error = 0;
4046 4047          }
4047 4048  
4048 4049          /*
4049 4050           * Unmap the buffer before freeing it.
4050 4051           */
4051 4052          bp_mapout(bp);
4052 4053          pageio_done(bp);
4053 4054  
4054 4055          pvn_read_done(pp, error ? B_READ | B_ERROR : B_READ);
4055 4056  
4056 4057          /*
4057 4058           * In case of error set readahead offset
4058 4059           * to the lowest offset.
4059 4060           * pvn_read_done() calls VN_DISPOSE to destroy the pages
4060 4061           */
4061 4062          if (error && rp->r_nextr > io_off) {
4062 4063                  mutex_enter(&rp->r_statelock);
4063 4064                  if (rp->r_nextr > io_off)
4064 4065                          rp->r_nextr = io_off;
4065 4066                  mutex_exit(&rp->r_statelock);
4066 4067          }
4067 4068  }
4068 4069  
4069 4070  /*
4070 4071   * Flags are composed of {B_INVAL, B_FREE, B_DONTNEED, B_FORCE}
4071 4072   * If len == 0, do from off to EOF.
4072 4073   *
4073 4074   * The normal cases should be len == 0 && off == 0 (entire vp list),
4074 4075   * len == MAXBSIZE (from segmap_release actions), and len == PAGESIZE
4075 4076   * (from pageout).
4076 4077   */
4077 4078  /* ARGSUSED */
4078 4079  static int
4079 4080  nfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr,
4080 4081          caller_context_t *ct)
4081 4082  {
4082 4083          int error;
4083 4084          rnode_t *rp;
4084 4085  
4085 4086          ASSERT(cr != NULL);
4086 4087  
4087 4088          /*
4088 4089           * XXX - Why should this check be made here?
4089 4090           */
4090 4091          if (vp->v_flag & VNOMAP)
4091 4092                  return (ENOSYS);
4092 4093  
4093 4094          if (len == 0 && !(flags & B_INVAL) && vn_is_readonly(vp))
4094 4095                  return (0);
4095 4096  
4096 4097          if (!(flags & B_ASYNC) && nfs_zone() != VTOMI(vp)->mi_zone)
4097 4098                  return (EIO);
4098 4099          ASSERT(off <= MAXOFF32_T);
4099 4100  
4100 4101          rp = VTOR(vp);
4101 4102          mutex_enter(&rp->r_statelock);
4102 4103          rp->r_count++;
4103 4104          mutex_exit(&rp->r_statelock);
4104 4105          error = nfs_putpages(vp, off, len, flags, cr);
4105 4106          mutex_enter(&rp->r_statelock);
4106 4107          rp->r_count--;
4107 4108          cv_broadcast(&rp->r_cv);
4108 4109          mutex_exit(&rp->r_statelock);
4109 4110  
4110 4111          return (error);
4111 4112  }
4112 4113  
4113 4114  /*
4114 4115   * Write out a single page, possibly klustering adjacent dirty pages.
4115 4116   */
4116 4117  int
4117 4118  nfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
4118 4119          int flags, cred_t *cr)
4119 4120  {
4120 4121          u_offset_t io_off;
4121 4122          u_offset_t lbn_off;
4122 4123          u_offset_t lbn;
4123 4124          size_t io_len;
4124 4125          uint_t bsize;
4125 4126          int error;
4126 4127          rnode_t *rp;
4127 4128  
4128 4129          ASSERT(!vn_is_readonly(vp));
4129 4130          ASSERT(pp != NULL);
4130 4131          ASSERT(cr != NULL);
4131 4132          ASSERT((flags & B_ASYNC) || nfs_zone() == VTOMI(vp)->mi_zone);
4132 4133  
4133 4134          rp = VTOR(vp);
4134 4135          ASSERT(rp->r_count > 0);
4135 4136  
4136 4137          ASSERT(pp->p_offset <= MAXOFF32_T);
4137 4138  
4138 4139          bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
4139 4140          lbn = pp->p_offset / bsize;
4140 4141          lbn_off = lbn * bsize;
4141 4142  
4142 4143          /*
4143 4144           * Find a kluster that fits in one block, or in
4144 4145           * one page if pages are bigger than blocks.  If
4145 4146           * there is less file space allocated than a whole
4146 4147           * page, we'll shorten the i/o request below.
4147 4148           */
4148 4149          pp = pvn_write_kluster(vp, pp, &io_off, &io_len, lbn_off,
4149 4150              roundup(bsize, PAGESIZE), flags);
4150 4151  
4151 4152          /*
4152 4153           * pvn_write_kluster shouldn't have returned a page with offset
4153 4154           * behind the original page we were given.  Verify that.
4154 4155           */
4155 4156          ASSERT((pp->p_offset / bsize) >= lbn);
4156 4157  
4157 4158          /*
4158 4159           * Now pp will have the list of kept dirty pages marked for
4159 4160           * write back.  It will also handle invalidation and freeing
4160 4161           * of pages that are not dirty.  Check for page length rounding
4161 4162           * problems.
4162 4163           */
4163 4164          if (io_off + io_len > lbn_off + bsize) {
4164 4165                  ASSERT((io_off + io_len) - (lbn_off + bsize) < PAGESIZE);
4165 4166                  io_len = lbn_off + bsize - io_off;
4166 4167          }
4167 4168          /*
4168 4169           * The RMODINPROGRESS flag makes sure that nfs(3)_bio() sees a
4169 4170           * consistent value of r_size. RMODINPROGRESS is set in writerp().
4170 4171           * When RMODINPROGRESS is set it indicates that a uiomove() is in
4171 4172           * progress and the r_size has not been made consistent with the
4172 4173           * new size of the file. When the uiomove() completes the r_size is
4173 4174           * updated and the RMODINPROGRESS flag is cleared.
4174 4175           *
4175 4176           * The RMODINPROGRESS flag makes sure that nfs(3)_bio() sees a
4176 4177           * consistent value of r_size. Without this handshaking, it is
4177 4178           * possible that nfs(3)_bio() picks  up the old value of r_size
4178 4179           * before the uiomove() in writerp() completes. This will result
4179 4180           * in the write through nfs(3)_bio() being dropped.
4180 4181           *
4181 4182           * More precisely, there is a window between the time the uiomove()
4182 4183           * completes and the time the r_size is updated. If a VOP_PUTPAGE()
4183 4184           * operation intervenes in this window, the page will be picked up,
4184 4185           * because it is dirty (it will be unlocked, unless it was
4185 4186           * pagecreate'd). When the page is picked up as dirty, the dirty
4186 4187           * bit is reset (pvn_getdirty()). In nfs(3)write(), r_size is
4187 4188           * checked. This will still be the old size. Therefore the page will
4188 4189           * not be written out. When segmap_release() calls VOP_PUTPAGE(),
4189 4190           * the page will be found to be clean and the write will be dropped.
4190 4191           */
4191 4192          if (rp->r_flags & RMODINPROGRESS) {
4192 4193                  mutex_enter(&rp->r_statelock);
4193 4194                  if ((rp->r_flags & RMODINPROGRESS) &&
4194 4195                      rp->r_modaddr + MAXBSIZE > io_off &&
4195 4196                      rp->r_modaddr < io_off + io_len) {
4196 4197                          page_t *plist;
4197 4198                          /*
4198 4199                           * A write is in progress for this region of the file.
4199 4200                           * If we did not detect RMODINPROGRESS here then this
4200 4201                           * path through nfs_putapage() would eventually go to
4201 4202                           * nfs(3)_bio() and may not write out all of the data
4202 4203                           * in the pages. We end up losing data. So we decide
4203 4204                           * to set the modified bit on each page in the page
4204 4205                           * list and mark the rnode with RDIRTY. This write
4205 4206                           * will be restarted at some later time.
4206 4207                           */
4207 4208                          plist = pp;
4208 4209                          while (plist != NULL) {
4209 4210                                  pp = plist;
4210 4211                                  page_sub(&plist, pp);
4211 4212                                  hat_setmod(pp);
4212 4213                                  page_io_unlock(pp);
4213 4214                                  page_unlock(pp);
4214 4215                          }
4215 4216                          rp->r_flags |= RDIRTY;
4216 4217                          mutex_exit(&rp->r_statelock);
4217 4218                          if (offp)
4218 4219                                  *offp = io_off;
4219 4220                          if (lenp)
4220 4221                                  *lenp = io_len;
4221 4222                          return (0);
4222 4223                  }
4223 4224                  mutex_exit(&rp->r_statelock);
4224 4225          }
4225 4226  
4226 4227          if (flags & B_ASYNC) {
4227 4228                  error = nfs_async_putapage(vp, pp, io_off, io_len, flags, cr,
4228 4229                      nfs_sync_putapage);
4229 4230          } else
4230 4231                  error = nfs_sync_putapage(vp, pp, io_off, io_len, flags, cr);
4231 4232  
4232 4233          if (offp)
4233 4234                  *offp = io_off;
4234 4235          if (lenp)
4235 4236                  *lenp = io_len;
4236 4237          return (error);
4237 4238  }
4238 4239  
4239 4240  static int
4240 4241  nfs_sync_putapage(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len,
4241 4242          int flags, cred_t *cr)
4242 4243  {
4243 4244          int error;
4244 4245          rnode_t *rp;
4245 4246  
4246 4247          flags |= B_WRITE;
4247 4248  
4248 4249          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
4249 4250          error = nfs_rdwrlbn(vp, pp, io_off, io_len, flags, cr);
4250 4251  
4251 4252          rp = VTOR(vp);
4252 4253  
4253 4254          if ((error == ENOSPC || error == EDQUOT || error == EACCES) &&
4254 4255              (flags & (B_INVAL|B_FORCE)) != (B_INVAL|B_FORCE)) {
4255 4256                  if (!(rp->r_flags & ROUTOFSPACE)) {
4256 4257                          mutex_enter(&rp->r_statelock);
4257 4258                          rp->r_flags |= ROUTOFSPACE;
4258 4259                          mutex_exit(&rp->r_statelock);
4259 4260                  }
4260 4261                  flags |= B_ERROR;
4261 4262                  pvn_write_done(pp, flags);
4262 4263                  /*
4263 4264                   * If this was not an async thread, then try again to
4264 4265                   * write out the pages, but this time, also destroy
4265 4266                   * them whether or not the write is successful.  This
4266 4267                   * will prevent memory from filling up with these
4267 4268                   * pages and destroying them is the only alternative
4268 4269                   * if they can't be written out.
4269 4270                   *
4270 4271                   * Don't do this if this is an async thread because
4271 4272                   * when the pages are unlocked in pvn_write_done,
4272 4273                   * some other thread could have come along, locked
4273 4274                   * them, and queued for an async thread.  It would be
4274 4275                   * possible for all of the async threads to be tied
4275 4276                   * up waiting to lock the pages again and they would
4276 4277                   * all already be locked and waiting for an async
4277 4278                   * thread to handle them.  Deadlock.
4278 4279                   */
4279 4280                  if (!(flags & B_ASYNC)) {
4280 4281                          error = nfs_putpage(vp, io_off, io_len,
4281 4282                              B_INVAL | B_FORCE, cr, NULL);
4282 4283                  }
4283 4284          } else {
4284 4285                  if (error)
4285 4286                          flags |= B_ERROR;
4286 4287                  else if (rp->r_flags & ROUTOFSPACE) {
4287 4288                          mutex_enter(&rp->r_statelock);
4288 4289                          rp->r_flags &= ~ROUTOFSPACE;
4289 4290                          mutex_exit(&rp->r_statelock);
4290 4291                  }
4291 4292                  pvn_write_done(pp, flags);
4292 4293          }
4293 4294  
4294 4295          return (error);
4295 4296  }
4296 4297  
4297 4298  /* ARGSUSED */
4298 4299  static int
4299 4300  nfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
4300 4301          size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
4301 4302          caller_context_t *ct)
4302 4303  {
4303 4304          struct segvn_crargs vn_a;
4304 4305          int error;
4305 4306          rnode_t *rp;
4306 4307          struct vattr va;
4307 4308  
4308 4309          if (nfs_zone() != VTOMI(vp)->mi_zone)
4309 4310                  return (EIO);
4310 4311  
4311 4312          if (vp->v_flag & VNOMAP)
4312 4313                  return (ENOSYS);
4313 4314  
4314 4315          if (off > MAXOFF32_T)
4315 4316                  return (EFBIG);
4316 4317  
4317 4318          if (off < 0 || off + len < 0)
4318 4319                  return (ENXIO);
4319 4320  
4320 4321          if (vp->v_type != VREG)
4321 4322                  return (ENODEV);
4322 4323  
4323 4324          /*
4324 4325           * If there is cached data and if close-to-open consistency
4325 4326           * checking is not turned off and if the file system is not
4326 4327           * mounted readonly, then force an over the wire getattr.
4327 4328           * Otherwise, just invoke nfsgetattr to get a copy of the
4328 4329           * attributes.  The attribute cache will be used unless it
4329 4330           * is timed out and if it is, then an over the wire getattr
4330 4331           * will be issued.
4331 4332           */
4332 4333          va.va_mask = AT_ALL;
4333 4334          if (vn_has_cached_data(vp) &&
4334 4335              !(VTOMI(vp)->mi_flags & MI_NOCTO) && !vn_is_readonly(vp))
4335 4336                  error = nfs_getattr_otw(vp, &va, cr);
4336 4337          else
4337 4338                  error = nfsgetattr(vp, &va, cr);
4338 4339          if (error)
4339 4340                  return (error);
4340 4341  
4341 4342          /*
4342 4343           * Check to see if the vnode is currently marked as not cachable.
4343 4344           * This means portions of the file are locked (through VOP_FRLOCK).
4344 4345           * In this case the map request must be refused.  We use
4345 4346           * rp->r_lkserlock to avoid a race with concurrent lock requests.
4346 4347           */
4347 4348          rp = VTOR(vp);
4348 4349  
4349 4350          /*
4350 4351           * Atomically increment r_inmap after acquiring r_rwlock. The
4351 4352           * idea here is to acquire r_rwlock to block read/write and
4352 4353           * not to protect r_inmap. r_inmap will inform nfs_read/write()
4353 4354           * that we are in nfs_map(). Now, r_rwlock is acquired in order
4354 4355           * and we can prevent the deadlock that would have occurred
4355 4356           * when nfs_addmap() would have acquired it out of order.
4356 4357           *
4357 4358           * Since we are not protecting r_inmap by any lock, we do not
4358 4359           * hold any lock when we decrement it. We atomically decrement
4359 4360           * r_inmap after we release r_lkserlock.
4360 4361           */
4361 4362  
4362 4363          if (nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, INTR(vp)))
4363 4364                  return (EINTR);
4364 4365          atomic_inc_uint(&rp->r_inmap);
4365 4366          nfs_rw_exit(&rp->r_rwlock);
4366 4367  
4367 4368          if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_READER, INTR(vp))) {
4368 4369                  atomic_dec_uint(&rp->r_inmap);
4369 4370                  return (EINTR);
4370 4371          }
4371 4372          if (vp->v_flag & VNOCACHE) {
4372 4373                  error = EAGAIN;
4373 4374                  goto done;
4374 4375          }
4375 4376  
4376 4377          /*
4377 4378           * Don't allow concurrent locks and mapping if mandatory locking is
4378 4379           * enabled.
4379 4380           */
4380 4381          if ((flk_has_remote_locks(vp) || lm_has_sleep(vp)) &&
4381 4382              MANDLOCK(vp, va.va_mode)) {
4382 4383                  error = EAGAIN;
4383 4384                  goto done;
4384 4385          }
4385 4386  
4386 4387          as_rangelock(as);
4387 4388          error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
4388 4389          if (error != 0) {
4389 4390                  as_rangeunlock(as);
4390 4391                  goto done;
4391 4392          }
4392 4393  
4393 4394          vn_a.vp = vp;
4394 4395          vn_a.offset = off;
4395 4396          vn_a.type = (flags & MAP_TYPE);
4396 4397          vn_a.prot = (uchar_t)prot;
4397 4398          vn_a.maxprot = (uchar_t)maxprot;
4398 4399          vn_a.flags = (flags & ~MAP_TYPE);
4399 4400          vn_a.cred = cr;
4400 4401          vn_a.amp = NULL;
4401 4402          vn_a.szc = 0;
4402 4403          vn_a.lgrp_mem_policy_flags = 0;
4403 4404  
4404 4405          error = as_map(as, *addrp, len, segvn_create, &vn_a);
4405 4406          as_rangeunlock(as);
4406 4407  
4407 4408  done:
4408 4409          nfs_rw_exit(&rp->r_lkserlock);
4409 4410          atomic_dec_uint(&rp->r_inmap);
4410 4411          return (error);
4411 4412  }
4412 4413  
4413 4414  /* ARGSUSED */
4414 4415  static int
4415 4416  nfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4416 4417          size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
4417 4418          caller_context_t *ct)
4418 4419  {
4419 4420          rnode_t *rp;
4420 4421  
4421 4422          if (vp->v_flag & VNOMAP)
4422 4423                  return (ENOSYS);
4423 4424          if (nfs_zone() != VTOMI(vp)->mi_zone)
4424 4425                  return (EIO);
4425 4426  
4426 4427          rp = VTOR(vp);
4427 4428          atomic_add_long((ulong_t *)&rp->r_mapcnt, btopr(len));
4428 4429  
4429 4430          return (0);
4430 4431  }
4431 4432  
4432 4433  /* ARGSUSED */
4433 4434  static int
4434 4435  nfs_frlock(vnode_t *vp, int cmd, struct flock64 *bfp, int flag, offset_t offset,
4435 4436          struct flk_callback *flk_cbp, cred_t *cr, caller_context_t *ct)
4436 4437  {
4437 4438          netobj lm_fh;
4438 4439          int rc;
4439 4440          u_offset_t start, end;
4440 4441          rnode_t *rp;
4441 4442          int error = 0, intr = INTR(vp);
4442 4443  
4443 4444          /* check for valid cmd parameter */
4444 4445          if (cmd != F_GETLK && cmd != F_SETLK && cmd != F_SETLKW)
4445 4446                  return (EINVAL);
4446 4447          if (nfs_zone() != VTOMI(vp)->mi_zone)
4447 4448                  return (EIO);
4448 4449  
4449 4450          /* Verify l_type. */
4450 4451          switch (bfp->l_type) {
4451 4452          case F_RDLCK:
4452 4453                  if (cmd != F_GETLK && !(flag & FREAD))
4453 4454                          return (EBADF);
4454 4455                  break;
4455 4456          case F_WRLCK:
4456 4457                  if (cmd != F_GETLK && !(flag & FWRITE))
4457 4458                          return (EBADF);
4458 4459                  break;
4459 4460          case F_UNLCK:
4460 4461                  intr = 0;
4461 4462                  break;
4462 4463  
4463 4464          default:
4464 4465                  return (EINVAL);
4465 4466          }
4466 4467  
4467 4468          /* check the validity of the lock range */
4468 4469          if (rc = flk_convert_lock_data(vp, bfp, &start, &end, offset))
4469 4470                  return (rc);
4470 4471          if (rc = flk_check_lock_data(start, end, MAXOFF32_T))
4471 4472                  return (rc);
4472 4473  
4473 4474          /*
4474 4475           * If the filesystem is mounted using local locking, pass the
4475 4476           * request off to the local locking code.
4476 4477           */
4477 4478          if (VTOMI(vp)->mi_flags & MI_LLOCK) {
4478 4479                  if (offset > MAXOFF32_T)
4479 4480                          return (EFBIG);
4480 4481                  if (cmd == F_SETLK || cmd == F_SETLKW) {
4481 4482                          /*
4482 4483                           * For complete safety, we should be holding
4483 4484                           * r_lkserlock.  However, we can't call
4484 4485                           * lm_safelock and then fs_frlock while
4485 4486                           * holding r_lkserlock, so just invoke
4486 4487                           * lm_safelock and expect that this will
4487 4488                           * catch enough of the cases.
4488 4489                           */
4489 4490                          if (!lm_safelock(vp, bfp, cr))
4490 4491                                  return (EAGAIN);
4491 4492                  }
4492 4493                  return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
4493 4494          }
4494 4495  
4495 4496          rp = VTOR(vp);
4496 4497  
4497 4498          /*
4498 4499           * Check whether the given lock request can proceed, given the
4499 4500           * current file mappings.
4500 4501           */
4501 4502          if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_WRITER, intr))
4502 4503                  return (EINTR);
4503 4504          if (cmd == F_SETLK || cmd == F_SETLKW) {
4504 4505                  if (!lm_safelock(vp, bfp, cr)) {
4505 4506                          rc = EAGAIN;
4506 4507                          goto done;
4507 4508                  }
4508 4509          }
4509 4510  
4510 4511          /*
4511 4512           * Flush the cache after waiting for async I/O to finish.  For new
4512 4513           * locks, this is so that the process gets the latest bits from the
4513 4514           * server.  For unlocks, this is so that other clients see the
4514 4515           * latest bits once the file has been unlocked.  If currently dirty
4515 4516           * pages can't be flushed, then don't allow a lock to be set.  But
4516 4517           * allow unlocks to succeed, to avoid having orphan locks on the
4517 4518           * server.
4518 4519           */
4519 4520          if (cmd != F_GETLK) {
4520 4521                  mutex_enter(&rp->r_statelock);
4521 4522                  while (rp->r_count > 0) {
4522 4523                          if (intr) {
4523 4524                                  klwp_t *lwp = ttolwp(curthread);
4524 4525  
4525 4526                                  if (lwp != NULL)
4526 4527                                          lwp->lwp_nostop++;
4527 4528                                  if (cv_wait_sig(&rp->r_cv, &rp->r_statelock)
4528 4529                                      == 0) {
4529 4530                                          if (lwp != NULL)
4530 4531                                                  lwp->lwp_nostop--;
4531 4532                                          rc = EINTR;
4532 4533                                          break;
4533 4534                                  }
4534 4535                                  if (lwp != NULL)
4535 4536                                          lwp->lwp_nostop--;
4536 4537                          } else
4537 4538                          cv_wait(&rp->r_cv, &rp->r_statelock);
4538 4539                  }
4539 4540                  mutex_exit(&rp->r_statelock);
4540 4541                  if (rc != 0)
4541 4542                          goto done;
4542 4543                  error = nfs_putpage(vp, (offset_t)0, 0, B_INVAL, cr, ct);
4543 4544                  if (error) {
4544 4545                          if (error == ENOSPC || error == EDQUOT) {
4545 4546                                  mutex_enter(&rp->r_statelock);
4546 4547                                  if (!rp->r_error)
4547 4548                                          rp->r_error = error;
4548 4549                                  mutex_exit(&rp->r_statelock);
4549 4550                          }
4550 4551                          if (bfp->l_type != F_UNLCK) {
4551 4552                                  rc = ENOLCK;
4552 4553                                  goto done;
4553 4554                          }
4554 4555                  }
4555 4556          }
4556 4557  
4557 4558          lm_fh.n_len = sizeof (fhandle_t);
4558 4559          lm_fh.n_bytes = (char *)VTOFH(vp);
4559 4560  
4560 4561          /*
4561 4562           * Call the lock manager to do the real work of contacting
4562 4563           * the server and obtaining the lock.
4563 4564           */
4564 4565          rc = lm_frlock(vp, cmd, bfp, flag, offset, cr, &lm_fh, flk_cbp);
4565 4566  
4566 4567          if (rc == 0)
4567 4568                  nfs_lockcompletion(vp, cmd);
4568 4569  
4569 4570  done:
4570 4571          nfs_rw_exit(&rp->r_lkserlock);
4571 4572          return (rc);
4572 4573  }
4573 4574  
4574 4575  /*
4575 4576   * Free storage space associated with the specified vnode.  The portion
4576 4577   * to be freed is specified by bfp->l_start and bfp->l_len (already
4577 4578   * normalized to a "whence" of 0).
4578 4579   *
4579 4580   * This is an experimental facility whose continued existence is not
4580 4581   * guaranteed.  Currently, we only support the special case
4581 4582   * of l_len == 0, meaning free to end of file.
4582 4583   */
4583 4584  /* ARGSUSED */
4584 4585  static int
4585 4586  nfs_space(vnode_t *vp, int cmd, struct flock64 *bfp, int flag,
4586 4587          offset_t offset, cred_t *cr, caller_context_t *ct)
4587 4588  {
4588 4589          int error;
4589 4590  
4590 4591          ASSERT(vp->v_type == VREG);
4591 4592          if (cmd != F_FREESP)
4592 4593                  return (EINVAL);
4593 4594  
4594 4595          if (offset > MAXOFF32_T)
4595 4596                  return (EFBIG);
4596 4597  
4597 4598          if ((bfp->l_start > MAXOFF32_T) || (bfp->l_end > MAXOFF32_T) ||
4598 4599              (bfp->l_len > MAXOFF32_T))
4599 4600                  return (EFBIG);
4600 4601  
4601 4602          if (nfs_zone() != VTOMI(vp)->mi_zone)
4602 4603                  return (EIO);
4603 4604  
4604 4605          error = convoff(vp, bfp, 0, offset);
4605 4606          if (!error) {
4606 4607                  ASSERT(bfp->l_start >= 0);
4607 4608                  if (bfp->l_len == 0) {
4608 4609                          struct vattr va;
4609 4610  
4610 4611                          /*
4611 4612                           * ftruncate should not change the ctime and
4612 4613                           * mtime if we truncate the file to its
4613 4614                           * previous size.
4614 4615                           */
4615 4616                          va.va_mask = AT_SIZE;
4616 4617                          error = nfsgetattr(vp, &va, cr);
4617 4618                          if (error || va.va_size == bfp->l_start)
4618 4619                                  return (error);
4619 4620                          va.va_mask = AT_SIZE;
4620 4621                          va.va_size = bfp->l_start;
4621 4622                          error = nfssetattr(vp, &va, 0, cr);
4622 4623  
4623 4624                          if (error == 0 && bfp->l_start == 0)
4624 4625                                  vnevent_truncate(vp, ct);
4625 4626                  } else
4626 4627                          error = EINVAL;
4627 4628          }
4628 4629  
4629 4630          return (error);
4630 4631  }
4631 4632  
4632 4633  /* ARGSUSED */
4633 4634  static int
4634 4635  nfs_realvp(vnode_t *vp, vnode_t **vpp, caller_context_t *ct)
4635 4636  {
4636 4637  
4637 4638          return (EINVAL);
4638 4639  }
4639 4640  
4640 4641  /*
4641 4642   * Setup and add an address space callback to do the work of the delmap call.
4642 4643   * The callback will (and must be) deleted in the actual callback function.
4643 4644   *
4644 4645   * This is done in order to take care of the problem that we have with holding
4645 4646   * the address space's a_lock for a long period of time (e.g. if the NFS server
4646 4647   * is down).  Callbacks will be executed in the address space code while the
4647 4648   * a_lock is not held.  Holding the address space's a_lock causes things such
4648 4649   * as ps and fork to hang because they are trying to acquire this lock as well.
4649 4650   */
4650 4651  /* ARGSUSED */
4651 4652  static int
4652 4653  nfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4653 4654          size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr,
4654 4655          caller_context_t *ct)
4655 4656  {
4656 4657          int                     caller_found;
4657 4658          int                     error;
4658 4659          rnode_t                 *rp;
4659 4660          nfs_delmap_args_t       *dmapp;
4660 4661          nfs_delmapcall_t        *delmap_call;
4661 4662  
4662 4663          if (vp->v_flag & VNOMAP)
4663 4664                  return (ENOSYS);
4664 4665          /*
4665 4666           * A process may not change zones if it has NFS pages mmap'ed
4666 4667           * in, so we can't legitimately get here from the wrong zone.
4667 4668           */
4668 4669          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
4669 4670  
4670 4671          rp = VTOR(vp);
4671 4672  
4672 4673          /*
4673 4674           * The way that the address space of this process deletes its mapping
4674 4675           * of this file is via the following call chains:
4675 4676           * - as_free()->SEGOP_UNMAP()/segvn_unmap()->VOP_DELMAP()/nfs_delmap()
4676 4677           * - as_unmap()->SEGOP_UNMAP()/segvn_unmap()->VOP_DELMAP()/nfs_delmap()
4677 4678           *
4678 4679           * With the use of address space callbacks we are allowed to drop the
4679 4680           * address space lock, a_lock, while executing the NFS operations that
4680 4681           * need to go over the wire.  Returning EAGAIN to the caller of this
4681 4682           * function is what drives the execution of the callback that we add
4682 4683           * below.  The callback will be executed by the address space code
4683 4684           * after dropping the a_lock.  When the callback is finished, since
4684 4685           * we dropped the a_lock, it must be re-acquired and segvn_unmap()
4685 4686           * is called again on the same segment to finish the rest of the work
4686 4687           * that needs to happen during unmapping.
4687 4688           *
4688 4689           * This action of calling back into the segment driver causes
4689 4690           * nfs_delmap() to get called again, but since the callback was
4690 4691           * already executed at this point, it already did the work and there
4691 4692           * is nothing left for us to do.
4692 4693           *
4693 4694           * To Summarize:
4694 4695           * - The first time nfs_delmap is called by the current thread is when
4695 4696           * we add the caller associated with this delmap to the delmap caller
4696 4697           * list, add the callback, and return EAGAIN.
4697 4698           * - The second time in this call chain when nfs_delmap is called we
4698 4699           * will find this caller in the delmap caller list and realize there
4699 4700           * is no more work to do thus removing this caller from the list and
4700 4701           * returning the error that was set in the callback execution.
4701 4702           */
4702 4703          caller_found = nfs_find_and_delete_delmapcall(rp, &error);
4703 4704          if (caller_found) {
4704 4705                  /*
4705 4706                   * 'error' is from the actual delmap operations.  To avoid
4706 4707                   * hangs, we need to handle the return of EAGAIN differently
4707 4708                   * since this is what drives the callback execution.
4708 4709                   * In this case, we don't want to return EAGAIN and do the
4709 4710                   * callback execution because there are none to execute.
4710 4711                   */
4711 4712                  if (error == EAGAIN)
4712 4713                          return (0);
4713 4714                  else
4714 4715                          return (error);
4715 4716          }
4716 4717  
4717 4718          /* current caller was not in the list */
4718 4719          delmap_call = nfs_init_delmapcall();
4719 4720  
4720 4721          mutex_enter(&rp->r_statelock);
4721 4722          list_insert_tail(&rp->r_indelmap, delmap_call);
4722 4723          mutex_exit(&rp->r_statelock);
4723 4724  
4724 4725          dmapp = kmem_alloc(sizeof (nfs_delmap_args_t), KM_SLEEP);
4725 4726  
4726 4727          dmapp->vp = vp;
4727 4728          dmapp->off = off;
4728 4729          dmapp->addr = addr;
4729 4730          dmapp->len = len;
4730 4731          dmapp->prot = prot;
4731 4732          dmapp->maxprot = maxprot;
4732 4733          dmapp->flags = flags;
4733 4734          dmapp->cr = cr;
4734 4735          dmapp->caller = delmap_call;
4735 4736  
4736 4737          error = as_add_callback(as, nfs_delmap_callback, dmapp,
4737 4738              AS_UNMAP_EVENT, addr, len, KM_SLEEP);
4738 4739  
4739 4740          return (error ? error : EAGAIN);
4740 4741  }
4741 4742  
4742 4743  /*
4743 4744   * Remove some pages from an mmap'd vnode.  Just update the
4744 4745   * count of pages.  If doing close-to-open, then flush all
4745 4746   * of the pages associated with this file.  Otherwise, start
4746 4747   * an asynchronous page flush to write out any dirty pages.
4747 4748   * This will also associate a credential with the rnode which
4748 4749   * can be used to write the pages.
4749 4750   */
4750 4751  /* ARGSUSED */
4751 4752  static void
4752 4753  nfs_delmap_callback(struct as *as, void *arg, uint_t event)
4753 4754  {
4754 4755          int                     error;
4755 4756          rnode_t                 *rp;
4756 4757          mntinfo_t               *mi;
4757 4758          nfs_delmap_args_t       *dmapp = (nfs_delmap_args_t *)arg;
4758 4759  
4759 4760          rp = VTOR(dmapp->vp);
4760 4761          mi = VTOMI(dmapp->vp);
4761 4762  
4762 4763          atomic_add_long((ulong_t *)&rp->r_mapcnt, -btopr(dmapp->len));
4763 4764          ASSERT(rp->r_mapcnt >= 0);
4764 4765  
4765 4766          /*
4766 4767           * Initiate a page flush if there are pages, the file system
4767 4768           * was not mounted readonly, the segment was mapped shared, and
4768 4769           * the pages themselves were writeable.
4769 4770           */
4770 4771          if (vn_has_cached_data(dmapp->vp) && !vn_is_readonly(dmapp->vp) &&
4771 4772              dmapp->flags == MAP_SHARED && (dmapp->maxprot & PROT_WRITE)) {
4772 4773                  mutex_enter(&rp->r_statelock);
4773 4774                  rp->r_flags |= RDIRTY;
4774 4775                  mutex_exit(&rp->r_statelock);
4775 4776                  /*
4776 4777                   * If this is a cross-zone access a sync putpage won't work, so
4777 4778                   * the best we can do is try an async putpage.  That seems
4778 4779                   * better than something more draconian such as discarding the
4779 4780                   * dirty pages.
4780 4781                   */
4781 4782                  if ((mi->mi_flags & MI_NOCTO) ||
4782 4783                      nfs_zone() != mi->mi_zone)
4783 4784                          error = nfs_putpage(dmapp->vp, dmapp->off, dmapp->len,
4784 4785                              B_ASYNC, dmapp->cr, NULL);
4785 4786                  else
4786 4787                          error = nfs_putpage(dmapp->vp, dmapp->off, dmapp->len,
4787 4788                              0, dmapp->cr, NULL);
4788 4789                  if (!error) {
4789 4790                          mutex_enter(&rp->r_statelock);
4790 4791                          error = rp->r_error;
4791 4792                          rp->r_error = 0;
4792 4793                          mutex_exit(&rp->r_statelock);
4793 4794                  }
4794 4795          } else
4795 4796                  error = 0;
4796 4797  
4797 4798          if ((rp->r_flags & RDIRECTIO) || (mi->mi_flags & MI_DIRECTIO))
4798 4799                  (void) nfs_putpage(dmapp->vp, dmapp->off, dmapp->len,
4799 4800                      B_INVAL, dmapp->cr, NULL);
4800 4801  
4801 4802          dmapp->caller->error = error;
4802 4803          (void) as_delete_callback(as, arg);
4803 4804          kmem_free(dmapp, sizeof (nfs_delmap_args_t));
4804 4805  }
4805 4806  
4806 4807  /* ARGSUSED */
4807 4808  static int
4808 4809  nfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
4809 4810          caller_context_t *ct)
4810 4811  {
4811 4812          int error = 0;
4812 4813  
4813 4814          if (nfs_zone() != VTOMI(vp)->mi_zone)
4814 4815                  return (EIO);
4815 4816          /*
4816 4817           * This looks a little weird because it's written in a general
4817 4818           * manner but we make little use of cases.  If cntl() ever gets
4818 4819           * widely used, the outer switch will make more sense.
4819 4820           */
4820 4821  
4821 4822          switch (cmd) {
4822 4823  
4823 4824          /*
4824 4825           * Large file spec - need to base answer new query with
4825 4826           * hardcoded constant based on the protocol.
4826 4827           */
4827 4828          case _PC_FILESIZEBITS:
4828 4829                  *valp = 32;
4829 4830                  return (0);
4830 4831  
4831 4832          case _PC_LINK_MAX:
4832 4833          case _PC_NAME_MAX:
4833 4834          case _PC_PATH_MAX:
4834 4835          case _PC_SYMLINK_MAX:
4835 4836          case _PC_CHOWN_RESTRICTED:
4836 4837          case _PC_NO_TRUNC: {
4837 4838                  mntinfo_t *mi;
4838 4839                  struct pathcnf *pc;
4839 4840  
4840 4841                  if ((mi = VTOMI(vp)) == NULL || (pc = mi->mi_pathconf) == NULL)
4841 4842                          return (EINVAL);
4842 4843                  error = _PC_ISSET(cmd, pc->pc_mask);    /* error or bool */
4843 4844                  switch (cmd) {
4844 4845                  case _PC_LINK_MAX:
4845 4846                          *valp = pc->pc_link_max;
4846 4847                          break;
4847 4848                  case _PC_NAME_MAX:
4848 4849                          *valp = pc->pc_name_max;
4849 4850                          break;
4850 4851                  case _PC_PATH_MAX:
4851 4852                  case _PC_SYMLINK_MAX:
4852 4853                          *valp = pc->pc_path_max;
4853 4854                          break;
4854 4855                  case _PC_CHOWN_RESTRICTED:
4855 4856                          /*
4856 4857                           * if we got here, error is really a boolean which
4857 4858                           * indicates whether cmd is set or not.
4858 4859                           */
4859 4860                          *valp = error ? 1 : 0;  /* see above */
4860 4861                          error = 0;
4861 4862                          break;
4862 4863                  case _PC_NO_TRUNC:
4863 4864                          /*
4864 4865                           * if we got here, error is really a boolean which
4865 4866                           * indicates whether cmd is set or not.
4866 4867                           */
4867 4868                          *valp = error ? 1 : 0;  /* see above */
4868 4869                          error = 0;
4869 4870                          break;
4870 4871                  }
4871 4872                  return (error ? EINVAL : 0);
4872 4873                  }
4873 4874  
4874 4875          case _PC_XATTR_EXISTS:
4875 4876                  *valp = 0;
4876 4877                  if (vp->v_vfsp->vfs_flag & VFS_XATTR) {
4877 4878                          vnode_t *avp;
4878 4879                          rnode_t *rp;
4879 4880                          mntinfo_t *mi = VTOMI(vp);
4880 4881  
4881 4882                          if (!(mi->mi_flags & MI_EXTATTR))
4882 4883                                  return (0);
4883 4884  
4884 4885                          rp = VTOR(vp);
4885 4886                          if (nfs_rw_enter_sig(&rp->r_rwlock, RW_READER,
4886 4887                              INTR(vp)))
4887 4888                                  return (EINTR);
4888 4889  
4889 4890                          error = nfslookup_dnlc(vp, XATTR_DIR_NAME, &avp, cr);
4890 4891                          if (error || avp == NULL)
4891 4892                                  error = acl_getxattrdir2(vp, &avp, 0, cr, 0);
4892 4893  
4893 4894                          nfs_rw_exit(&rp->r_rwlock);
4894 4895  
4895 4896                          if (error == 0 && avp != NULL) {
4896 4897                                  error = do_xattr_exists_check(avp, valp, cr);
4897 4898                                  VN_RELE(avp);
4898 4899                          }
4899 4900                  }
4900 4901                  return (error ? EINVAL : 0);
4901 4902  
4902 4903          case _PC_ACL_ENABLED:
4903 4904                  *valp = _ACL_ACLENT_ENABLED;
4904 4905                  return (0);
4905 4906  
4906 4907          default:
4907 4908                  return (EINVAL);
4908 4909          }
4909 4910  }
4910 4911  
4911 4912  /*
4912 4913   * Called by async thread to do synchronous pageio. Do the i/o, wait
4913 4914   * for it to complete, and cleanup the page list when done.
4914 4915   */
4915 4916  static int
4916 4917  nfs_sync_pageio(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len,
4917 4918          int flags, cred_t *cr)
4918 4919  {
4919 4920          int error;
4920 4921  
4921 4922          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
4922 4923          error = nfs_rdwrlbn(vp, pp, io_off, io_len, flags, cr);
4923 4924          if (flags & B_READ)
4924 4925                  pvn_read_done(pp, (error ? B_ERROR : 0) | flags);
4925 4926          else
4926 4927                  pvn_write_done(pp, (error ? B_ERROR : 0) | flags);
4927 4928          return (error);
4928 4929  }
4929 4930  
4930 4931  /* ARGSUSED */
4931 4932  static int
4932 4933  nfs_pageio(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len,
4933 4934          int flags, cred_t *cr, caller_context_t *ct)
4934 4935  {
4935 4936          int error;
4936 4937          rnode_t *rp;
4937 4938  
4938 4939          if (pp == NULL)
4939 4940                  return (EINVAL);
4940 4941  
4941 4942          if (io_off > MAXOFF32_T)
4942 4943                  return (EFBIG);
4943 4944          if (nfs_zone() != VTOMI(vp)->mi_zone)
4944 4945                  return (EIO);
4945 4946          rp = VTOR(vp);
4946 4947          mutex_enter(&rp->r_statelock);
4947 4948          rp->r_count++;
4948 4949          mutex_exit(&rp->r_statelock);
4949 4950  
4950 4951          if (flags & B_ASYNC) {
4951 4952                  error = nfs_async_pageio(vp, pp, io_off, io_len, flags, cr,
4952 4953                      nfs_sync_pageio);
4953 4954          } else
4954 4955                  error = nfs_rdwrlbn(vp, pp, io_off, io_len, flags, cr);
4955 4956          mutex_enter(&rp->r_statelock);
4956 4957          rp->r_count--;
4957 4958          cv_broadcast(&rp->r_cv);
4958 4959          mutex_exit(&rp->r_statelock);
4959 4960          return (error);
4960 4961  }
4961 4962  
4962 4963  /* ARGSUSED */
4963 4964  static int
4964 4965  nfs_setsecattr(vnode_t *vp, vsecattr_t *vsecattr, int flag, cred_t *cr,
4965 4966          caller_context_t *ct)
4966 4967  {
4967 4968          int error;
4968 4969          mntinfo_t *mi;
4969 4970  
4970 4971          mi = VTOMI(vp);
4971 4972  
4972 4973          if (nfs_zone() != mi->mi_zone)
4973 4974                  return (EIO);
4974 4975          if (mi->mi_flags & MI_ACL) {
4975 4976                  error = acl_setacl2(vp, vsecattr, flag, cr);
4976 4977                  if (mi->mi_flags & MI_ACL)
4977 4978                          return (error);
4978 4979          }
4979 4980  
4980 4981          return (ENOSYS);
4981 4982  }
4982 4983  
4983 4984  /* ARGSUSED */
4984 4985  static int
4985 4986  nfs_getsecattr(vnode_t *vp, vsecattr_t *vsecattr, int flag, cred_t *cr,
4986 4987          caller_context_t *ct)
4987 4988  {
4988 4989          int error;
4989 4990          mntinfo_t *mi;
4990 4991  
4991 4992          mi = VTOMI(vp);
4992 4993  
4993 4994          if (nfs_zone() != mi->mi_zone)
4994 4995                  return (EIO);
4995 4996          if (mi->mi_flags & MI_ACL) {
4996 4997                  error = acl_getacl2(vp, vsecattr, flag, cr);
4997 4998                  if (mi->mi_flags & MI_ACL)
4998 4999                          return (error);
4999 5000          }
5000 5001  
5001 5002          return (fs_fab_acl(vp, vsecattr, flag, cr, ct));
5002 5003  }
5003 5004  
5004 5005  /* ARGSUSED */
5005 5006  static int
5006 5007  nfs_shrlock(vnode_t *vp, int cmd, struct shrlock *shr, int flag, cred_t *cr,
5007 5008          caller_context_t *ct)
5008 5009  {
5009 5010          int error;
5010 5011          struct shrlock nshr;
5011 5012          struct nfs_owner nfs_owner;
5012 5013          netobj lm_fh;
5013 5014  
5014 5015          if (nfs_zone() != VTOMI(vp)->mi_zone)
5015 5016                  return (EIO);
5016 5017  
5017 5018          /*
5018 5019           * check for valid cmd parameter
5019 5020           */
5020 5021          if (cmd != F_SHARE && cmd != F_UNSHARE && cmd != F_HASREMOTELOCKS)
5021 5022                  return (EINVAL);
5022 5023  
5023 5024          /*
5024 5025           * Check access permissions
5025 5026           */
5026 5027          if (cmd == F_SHARE &&
5027 5028              (((shr->s_access & F_RDACC) && !(flag & FREAD)) ||
5028 5029              ((shr->s_access & F_WRACC) && !(flag & FWRITE))))
5029 5030                  return (EBADF);
5030 5031  
5031 5032          /*
5032 5033           * If the filesystem is mounted using local locking, pass the
5033 5034           * request off to the local share code.
5034 5035           */
5035 5036          if (VTOMI(vp)->mi_flags & MI_LLOCK)
5036 5037                  return (fs_shrlock(vp, cmd, shr, flag, cr, ct));
5037 5038  
5038 5039          switch (cmd) {
5039 5040          case F_SHARE:
5040 5041          case F_UNSHARE:
5041 5042                  lm_fh.n_len = sizeof (fhandle_t);
5042 5043                  lm_fh.n_bytes = (char *)VTOFH(vp);
5043 5044  
5044 5045                  /*
5045 5046                   * If passed an owner that is too large to fit in an
5046 5047                   * nfs_owner it is likely a recursive call from the
5047 5048                   * lock manager client and pass it straight through.  If
5048 5049                   * it is not a nfs_owner then simply return an error.
5049 5050                   */
5050 5051                  if (shr->s_own_len > sizeof (nfs_owner.lowner)) {
5051 5052                          if (((struct nfs_owner *)shr->s_owner)->magic !=
5052 5053                              NFS_OWNER_MAGIC)
5053 5054                                  return (EINVAL);
5054 5055  
5055 5056                          if (error = lm_shrlock(vp, cmd, shr, flag, &lm_fh)) {
5056 5057                                  error = set_errno(error);
5057 5058                          }
5058 5059                          return (error);
5059 5060                  }
5060 5061                  /*
5061 5062                   * Remote share reservations owner is a combination of
5062 5063                   * a magic number, hostname, and the local owner
5063 5064                   */
5064 5065                  bzero(&nfs_owner, sizeof (nfs_owner));
5065 5066                  nfs_owner.magic = NFS_OWNER_MAGIC;
5066 5067                  (void) strncpy(nfs_owner.hname, uts_nodename(),
5067 5068                      sizeof (nfs_owner.hname));
5068 5069                  bcopy(shr->s_owner, nfs_owner.lowner, shr->s_own_len);
5069 5070                  nshr.s_access = shr->s_access;
5070 5071                  nshr.s_deny = shr->s_deny;
5071 5072                  nshr.s_sysid = 0;
5072 5073                  nshr.s_pid = ttoproc(curthread)->p_pid;
5073 5074                  nshr.s_own_len = sizeof (nfs_owner);
5074 5075                  nshr.s_owner = (caddr_t)&nfs_owner;
5075 5076  
5076 5077                  if (error = lm_shrlock(vp, cmd, &nshr, flag, &lm_fh)) {
5077 5078                          error = set_errno(error);
5078 5079                  }
5079 5080  
5080 5081                  break;
5081 5082  
5082 5083          case F_HASREMOTELOCKS:
5083 5084                  /*
5084 5085                   * NFS client can't store remote locks itself
5085 5086                   */
5086 5087                  shr->s_access = 0;
5087 5088                  error = 0;
5088 5089                  break;
5089 5090  
5090 5091          default:
5091 5092                  error = EINVAL;
5092 5093                  break;
5093 5094          }
5094 5095  
5095 5096          return (error);
5096 5097  }
  
    | 
      ↓ open down ↓ | 
    1470 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX