Print this page
    
OS-5148 ftruncate at offset should emit proper events
Reviewed by: Bryan Cantrill <bryan@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
OS-3294 add support for inotify
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/common/fs/nfs/nfs3_vnops.c
          +++ new/usr/src/uts/common/fs/nfs/nfs3_vnops.c
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  
    | 
      ↓ open down ↓ | 
    21 lines elided | 
    
      ↑ open up ↑ | 
  
  22   22   * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
  23   23   * Use is subject to license terms.
  24   24   */
  25   25  
  26   26  /*
  27   27   *      Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T.
  28   28   *      All rights reserved.
  29   29   */
  30   30  
  31   31  /*
  32      - * Copyright (c) 2013, Joyent, Inc. All rights reserved.
       32 + * Copyright (c) 2014, Joyent, Inc. All rights reserved.
  33   33   * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
  34   34   */
  35   35  
  36   36  #include <sys/param.h>
  37   37  #include <sys/types.h>
  38   38  #include <sys/systm.h>
  39   39  #include <sys/cred.h>
  40   40  #include <sys/time.h>
  41   41  #include <sys/vnode.h>
  42   42  #include <sys/vfs.h>
  43   43  #include <sys/vfs_opreg.h>
  44   44  #include <sys/file.h>
  45   45  #include <sys/filio.h>
  46   46  #include <sys/uio.h>
  47   47  #include <sys/buf.h>
  48   48  #include <sys/mman.h>
  49   49  #include <sys/pathname.h>
  50   50  #include <sys/dirent.h>
  51   51  #include <sys/debug.h>
  52   52  #include <sys/vmsystm.h>
  53   53  #include <sys/fcntl.h>
  54   54  #include <sys/flock.h>
  55   55  #include <sys/swap.h>
  56   56  #include <sys/errno.h>
  57   57  #include <sys/strsubr.h>
  58   58  #include <sys/sysmacros.h>
  59   59  #include <sys/kmem.h>
  60   60  #include <sys/cmn_err.h>
  61   61  #include <sys/pathconf.h>
  62   62  #include <sys/utsname.h>
  63   63  #include <sys/dnlc.h>
  64   64  #include <sys/acl.h>
  65   65  #include <sys/systeminfo.h>
  66   66  #include <sys/atomic.h>
  67   67  #include <sys/policy.h>
  68   68  #include <sys/sdt.h>
  69   69  #include <sys/zone.h>
  70   70  
  71   71  #include <rpc/types.h>
  72   72  #include <rpc/auth.h>
  73   73  #include <rpc/clnt.h>
  74   74  #include <rpc/rpc_rdma.h>
  75   75  
  76   76  #include <nfs/nfs.h>
  77   77  #include <nfs/nfs_clnt.h>
  78   78  #include <nfs/rnode.h>
  79   79  #include <nfs/nfs_acl.h>
  80   80  #include <nfs/lm.h>
  81   81  
  82   82  #include <vm/hat.h>
  83   83  #include <vm/as.h>
  84   84  #include <vm/page.h>
  85   85  #include <vm/pvn.h>
  86   86  #include <vm/seg.h>
  87   87  #include <vm/seg_map.h>
  88   88  #include <vm/seg_kpm.h>
  89   89  #include <vm/seg_vn.h>
  90   90  
  91   91  #include <fs/fs_subr.h>
  92   92  
  93   93  #include <sys/ddi.h>
  94   94  
  95   95  static int      nfs3_rdwrlbn(vnode_t *, page_t *, u_offset_t, size_t, int,
  96   96                          cred_t *);
  97   97  static int      nfs3write(vnode_t *, caddr_t, u_offset_t, int, cred_t *,
  98   98                          stable_how *);
  99   99  static int      nfs3read(vnode_t *, caddr_t, offset_t, int, size_t *, cred_t *);
 100  100  static int      nfs3setattr(vnode_t *, struct vattr *, int, cred_t *);
 101  101  static int      nfs3_accessx(void *, int, cred_t *);
 102  102  static int      nfs3lookup_dnlc(vnode_t *, char *, vnode_t **, cred_t *);
 103  103  static int      nfs3lookup_otw(vnode_t *, char *, vnode_t **, cred_t *, int);
 104  104  static int      nfs3create(vnode_t *, char *, struct vattr *, enum vcexcl,
 105  105                          int, vnode_t **, cred_t *, int);
 106  106  static int      nfs3excl_create_settimes(vnode_t *, struct vattr *, cred_t *);
 107  107  static int      nfs3mknod(vnode_t *, char *, struct vattr *, enum vcexcl,
 108  108                          int, vnode_t **, cred_t *);
 109  109  static int      nfs3rename(vnode_t *, char *, vnode_t *, char *, cred_t *,
 110  110                          caller_context_t *);
 111  111  static int      do_nfs3readdir(vnode_t *, rddir_cache *, cred_t *);
 112  112  static void     nfs3readdir(vnode_t *, rddir_cache *, cred_t *);
 113  113  static void     nfs3readdirplus(vnode_t *, rddir_cache *, cred_t *);
 114  114  static int      nfs3_bio(struct buf *, stable_how *, cred_t *);
 115  115  static int      nfs3_getapage(vnode_t *, u_offset_t, size_t, uint_t *,
 116  116                          page_t *[], size_t, struct seg *, caddr_t,
 117  117                          enum seg_rw, cred_t *);
 118  118  static void     nfs3_readahead(vnode_t *, u_offset_t, caddr_t, struct seg *,
 119  119                          cred_t *);
 120  120  static int      nfs3_sync_putapage(vnode_t *, page_t *, u_offset_t, size_t,
 121  121                          int, cred_t *);
 122  122  static int      nfs3_sync_pageio(vnode_t *, page_t *, u_offset_t, size_t,
 123  123                          int, cred_t *);
 124  124  static int      nfs3_commit(vnode_t *, offset3, count3, cred_t *);
 125  125  static void     nfs3_set_mod(vnode_t *);
 126  126  static void     nfs3_get_commit(vnode_t *);
 127  127  static void     nfs3_get_commit_range(vnode_t *, u_offset_t, size_t);
 128  128  static int      nfs3_putpage_commit(vnode_t *, offset_t, size_t, cred_t *);
 129  129  static int      nfs3_commit_vp(vnode_t *, u_offset_t, size_t,  cred_t *);
 130  130  static int      nfs3_sync_commit(vnode_t *, page_t *, offset3, count3,
 131  131                          cred_t *);
 132  132  static void     nfs3_async_commit(vnode_t *, page_t *, offset3, count3,
 133  133                          cred_t *);
 134  134  static void     nfs3_delmap_callback(struct as *, void *, uint_t);
 135  135  
 136  136  /*
 137  137   * Error flags used to pass information about certain special errors
 138  138   * which need to be handled specially.
 139  139   */
 140  140  #define NFS_EOF                 -98
 141  141  #define NFS_VERF_MISMATCH       -97
 142  142  
 143  143  /* ALIGN64 aligns the given buffer and adjust buffer size to 64 bit */
 144  144  #define ALIGN64(x, ptr, sz)                                             \
 145  145          x = ((uintptr_t)(ptr)) & (sizeof (uint64_t) - 1);               \
 146  146          if (x) {                                                        \
 147  147                  x = sizeof (uint64_t) - (x);                            \
 148  148                  sz -= (x);                                              \
 149  149                  ptr += (x);                                             \
 150  150          }
 151  151  
 152  152  /*
 153  153   * These are the vnode ops routines which implement the vnode interface to
 154  154   * the networked file system.  These routines just take their parameters,
 155  155   * make them look networkish by putting the right info into interface structs,
 156  156   * and then calling the appropriate remote routine(s) to do the work.
 157  157   *
 158  158   * Note on directory name lookup cacheing:  If we detect a stale fhandle,
 159  159   * we purge the directory cache relative to that vnode.  This way, the
 160  160   * user won't get burned by the cache repeatedly.  See <nfs/rnode.h> for
 161  161   * more details on rnode locking.
 162  162   */
 163  163  
 164  164  static int      nfs3_open(vnode_t **, int, cred_t *, caller_context_t *);
 165  165  static int      nfs3_close(vnode_t *, int, int, offset_t, cred_t *,
 166  166                          caller_context_t *);
 167  167  static int      nfs3_read(vnode_t *, struct uio *, int, cred_t *,
 168  168                          caller_context_t *);
 169  169  static int      nfs3_write(vnode_t *, struct uio *, int, cred_t *,
 170  170                          caller_context_t *);
 171  171  static int      nfs3_ioctl(vnode_t *, int, intptr_t, int, cred_t *, int *,
 172  172                          caller_context_t *);
 173  173  static int      nfs3_getattr(vnode_t *, struct vattr *, int, cred_t *,
 174  174                          caller_context_t *);
 175  175  static int      nfs3_setattr(vnode_t *, struct vattr *, int, cred_t *,
 176  176                          caller_context_t *);
 177  177  static int      nfs3_access(vnode_t *, int, int, cred_t *, caller_context_t *);
 178  178  static int      nfs3_readlink(vnode_t *, struct uio *, cred_t *,
 179  179                          caller_context_t *);
 180  180  static int      nfs3_fsync(vnode_t *, int, cred_t *, caller_context_t *);
 181  181  static void     nfs3_inactive(vnode_t *, cred_t *, caller_context_t *);
 182  182  static int      nfs3_lookup(vnode_t *, char *, vnode_t **,
 183  183                          struct pathname *, int, vnode_t *, cred_t *,
 184  184                          caller_context_t *, int *, pathname_t *);
 185  185  static int      nfs3_create(vnode_t *, char *, struct vattr *, enum vcexcl,
 186  186                          int, vnode_t **, cred_t *, int, caller_context_t *,
 187  187                          vsecattr_t *);
 188  188  static int      nfs3_remove(vnode_t *, char *, cred_t *, caller_context_t *,
 189  189                          int);
 190  190  static int      nfs3_link(vnode_t *, vnode_t *, char *, cred_t *,
 191  191                          caller_context_t *, int);
 192  192  static int      nfs3_rename(vnode_t *, char *, vnode_t *, char *, cred_t *,
 193  193                          caller_context_t *, int);
 194  194  static int      nfs3_mkdir(vnode_t *, char *, struct vattr *, vnode_t **,
 195  195                          cred_t *, caller_context_t *, int, vsecattr_t *);
 196  196  static int      nfs3_rmdir(vnode_t *, char *, vnode_t *, cred_t *,
 197  197                          caller_context_t *, int);
 198  198  static int      nfs3_symlink(vnode_t *, char *, struct vattr *, char *,
 199  199                          cred_t *, caller_context_t *, int);
 200  200  static int      nfs3_readdir(vnode_t *, struct uio *, cred_t *, int *,
 201  201                          caller_context_t *, int);
 202  202  static int      nfs3_fid(vnode_t *, fid_t *, caller_context_t *);
 203  203  static int      nfs3_rwlock(vnode_t *, int, caller_context_t *);
 204  204  static void     nfs3_rwunlock(vnode_t *, int, caller_context_t *);
 205  205  static int      nfs3_seek(vnode_t *, offset_t, offset_t *, caller_context_t *);
 206  206  static int      nfs3_getpage(vnode_t *, offset_t, size_t, uint_t *,
 207  207                          page_t *[], size_t, struct seg *, caddr_t,
 208  208                          enum seg_rw, cred_t *, caller_context_t *);
 209  209  static int      nfs3_putpage(vnode_t *, offset_t, size_t, int, cred_t *,
 210  210                          caller_context_t *);
 211  211  static int      nfs3_map(vnode_t *, offset_t, struct as *, caddr_t *, size_t,
 212  212                          uchar_t, uchar_t, uint_t, cred_t *, caller_context_t *);
 213  213  static int      nfs3_addmap(vnode_t *, offset_t, struct as *, caddr_t, size_t,
 214  214                          uchar_t, uchar_t, uint_t, cred_t *, caller_context_t *);
 215  215  static int      nfs3_frlock(vnode_t *, int, struct flock64 *, int, offset_t,
 216  216                          struct flk_callback *, cred_t *, caller_context_t *);
 217  217  static int      nfs3_space(vnode_t *, int, struct flock64 *, int, offset_t,
 218  218                          cred_t *, caller_context_t *);
 219  219  static int      nfs3_realvp(vnode_t *, vnode_t **, caller_context_t *);
 220  220  static int      nfs3_delmap(vnode_t *, offset_t, struct as *, caddr_t, size_t,
 221  221                          uint_t, uint_t, uint_t, cred_t *, caller_context_t *);
 222  222  static int      nfs3_pathconf(vnode_t *, int, ulong_t *, cred_t *,
 223  223                          caller_context_t *);
 224  224  static int      nfs3_pageio(vnode_t *, page_t *, u_offset_t, size_t, int,
 225  225                          cred_t *, caller_context_t *);
 226  226  static void     nfs3_dispose(vnode_t *, page_t *, int, int, cred_t *,
 227  227                          caller_context_t *);
 228  228  static int      nfs3_setsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
 229  229                          caller_context_t *);
 230  230  static int      nfs3_getsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
 231  231                          caller_context_t *);
 232  232  static int      nfs3_shrlock(vnode_t *, int, struct shrlock *, int, cred_t *,
 233  233                          caller_context_t *);
 234  234  
 235  235  struct vnodeops *nfs3_vnodeops;
 236  236  
 237  237  const fs_operation_def_t nfs3_vnodeops_template[] = {
 238  238          VOPNAME_OPEN,           { .vop_open = nfs3_open },
 239  239          VOPNAME_CLOSE,          { .vop_close = nfs3_close },
 240  240          VOPNAME_READ,           { .vop_read = nfs3_read },
 241  241          VOPNAME_WRITE,          { .vop_write = nfs3_write },
 242  242          VOPNAME_IOCTL,          { .vop_ioctl = nfs3_ioctl },
 243  243          VOPNAME_GETATTR,        { .vop_getattr = nfs3_getattr },
 244  244          VOPNAME_SETATTR,        { .vop_setattr = nfs3_setattr },
 245  245          VOPNAME_ACCESS,         { .vop_access = nfs3_access },
 246  246          VOPNAME_LOOKUP,         { .vop_lookup = nfs3_lookup },
 247  247          VOPNAME_CREATE,         { .vop_create = nfs3_create },
 248  248          VOPNAME_REMOVE,         { .vop_remove = nfs3_remove },
 249  249          VOPNAME_LINK,           { .vop_link = nfs3_link },
 250  250          VOPNAME_RENAME,         { .vop_rename = nfs3_rename },
 251  251          VOPNAME_MKDIR,          { .vop_mkdir = nfs3_mkdir },
 252  252          VOPNAME_RMDIR,          { .vop_rmdir = nfs3_rmdir },
 253  253          VOPNAME_READDIR,        { .vop_readdir = nfs3_readdir },
 254  254          VOPNAME_SYMLINK,        { .vop_symlink = nfs3_symlink },
 255  255          VOPNAME_READLINK,       { .vop_readlink = nfs3_readlink },
 256  256          VOPNAME_FSYNC,          { .vop_fsync = nfs3_fsync },
 257  257          VOPNAME_INACTIVE,       { .vop_inactive = nfs3_inactive },
 258  258          VOPNAME_FID,            { .vop_fid = nfs3_fid },
 259  259          VOPNAME_RWLOCK,         { .vop_rwlock = nfs3_rwlock },
 260  260          VOPNAME_RWUNLOCK,       { .vop_rwunlock = nfs3_rwunlock },
 261  261          VOPNAME_SEEK,           { .vop_seek = nfs3_seek },
 262  262          VOPNAME_FRLOCK,         { .vop_frlock = nfs3_frlock },
 263  263          VOPNAME_SPACE,          { .vop_space = nfs3_space },
 264  264          VOPNAME_REALVP,         { .vop_realvp = nfs3_realvp },
 265  265          VOPNAME_GETPAGE,        { .vop_getpage = nfs3_getpage },
 266  266          VOPNAME_PUTPAGE,        { .vop_putpage = nfs3_putpage },
 267  267          VOPNAME_MAP,            { .vop_map = nfs3_map },
 268  268          VOPNAME_ADDMAP,         { .vop_addmap = nfs3_addmap },
 269  269          VOPNAME_DELMAP,         { .vop_delmap = nfs3_delmap },
 270  270          /* no separate nfs3_dump */
 271  271          VOPNAME_DUMP,           { .vop_dump = nfs_dump },
 272  272          VOPNAME_PATHCONF,       { .vop_pathconf = nfs3_pathconf },
 273  273          VOPNAME_PAGEIO,         { .vop_pageio = nfs3_pageio },
 274  274          VOPNAME_DISPOSE,        { .vop_dispose = nfs3_dispose },
 275  275          VOPNAME_SETSECATTR,     { .vop_setsecattr = nfs3_setsecattr },
 276  276          VOPNAME_GETSECATTR,     { .vop_getsecattr = nfs3_getsecattr },
 277  277          VOPNAME_SHRLOCK,        { .vop_shrlock = nfs3_shrlock },
 278  278          VOPNAME_VNEVENT,        { .vop_vnevent = fs_vnevent_support },
 279  279          NULL,                   NULL
 280  280  };
 281  281  
 282  282  /*
 283  283   * XXX:  This is referenced in modstubs.s
 284  284   */
 285  285  struct vnodeops *
 286  286  nfs3_getvnodeops(void)
 287  287  {
 288  288          return (nfs3_vnodeops);
 289  289  }
 290  290  
 291  291  /* ARGSUSED */
 292  292  static int
 293  293  nfs3_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
 294  294  {
 295  295          int error;
 296  296          struct vattr va;
 297  297          rnode_t *rp;
 298  298          vnode_t *vp;
 299  299  
 300  300          vp = *vpp;
 301  301          if (nfs_zone() != VTOMI(vp)->mi_zone)
 302  302                  return (EIO);
 303  303          rp = VTOR(vp);
 304  304          mutex_enter(&rp->r_statelock);
 305  305          if (rp->r_cred == NULL) {
 306  306                  crhold(cr);
 307  307                  rp->r_cred = cr;
 308  308          }
 309  309          mutex_exit(&rp->r_statelock);
 310  310  
 311  311          /*
 312  312           * If there is no cached data or if close-to-open
 313  313           * consistency checking is turned off, we can avoid
 314  314           * the over the wire getattr.  Otherwise, if the
 315  315           * file system is mounted readonly, then just verify
 316  316           * the caches are up to date using the normal mechanism.
 317  317           * Else, if the file is not mmap'd, then just mark
 318  318           * the attributes as timed out.  They will be refreshed
 319  319           * and the caches validated prior to being used.
 320  320           * Else, the file system is mounted writeable so
 321  321           * force an over the wire GETATTR in order to ensure
 322  322           * that all cached data is valid.
 323  323           */
 324  324          if (vp->v_count > 1 ||
 325  325              ((vn_has_cached_data(vp) || HAVE_RDDIR_CACHE(rp)) &&
 326  326              !(VTOMI(vp)->mi_flags & MI_NOCTO))) {
 327  327                  if (vn_is_readonly(vp))
 328  328                          error = nfs3_validate_caches(vp, cr);
 329  329                  else if (rp->r_mapcnt == 0 && vp->v_count == 1) {
 330  330                          PURGE_ATTRCACHE(vp);
 331  331                          error = 0;
 332  332                  } else {
 333  333                          va.va_mask = AT_ALL;
 334  334                          error = nfs3_getattr_otw(vp, &va, cr);
 335  335                  }
 336  336          } else
 337  337                  error = 0;
 338  338  
 339  339          return (error);
 340  340  }
 341  341  
 342  342  /* ARGSUSED */
 343  343  static int
 344  344  nfs3_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
 345  345                  caller_context_t *ct)
 346  346  {
 347  347          rnode_t *rp;
 348  348          int error;
 349  349          struct vattr va;
 350  350  
 351  351          /*
 352  352           * zone_enter(2) prevents processes from changing zones with NFS files
 353  353           * open; if we happen to get here from the wrong zone we can't do
 354  354           * anything over the wire.
 355  355           */
 356  356          if (VTOMI(vp)->mi_zone != nfs_zone()) {
 357  357                  /*
 358  358                   * We could attempt to clean up locks, except we're sure
 359  359                   * that the current process didn't acquire any locks on
 360  360                   * the file: any attempt to lock a file belong to another zone
 361  361                   * will fail, and one can't lock an NFS file and then change
 362  362                   * zones, as that fails too.
 363  363                   *
 364  364                   * Returning an error here is the sane thing to do.  A
 365  365                   * subsequent call to VN_RELE() which translates to a
 366  366                   * nfs3_inactive() will clean up state: if the zone of the
 367  367                   * vnode's origin is still alive and kicking, an async worker
 368  368                   * thread will handle the request (from the correct zone), and
 369  369                   * everything (minus the commit and final nfs3_getattr_otw()
 370  370                   * call) should be OK. If the zone is going away
 371  371                   * nfs_async_inactive() will throw away cached pages inline.
 372  372                   */
 373  373                  return (EIO);
 374  374          }
 375  375  
 376  376          /*
 377  377           * If we are using local locking for this filesystem, then
 378  378           * release all of the SYSV style record locks.  Otherwise,
 379  379           * we are doing network locking and we need to release all
 380  380           * of the network locks.  All of the locks held by this
 381  381           * process on this file are released no matter what the
 382  382           * incoming reference count is.
 383  383           */
 384  384          if (VTOMI(vp)->mi_flags & MI_LLOCK) {
 385  385                  cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
 386  386                  cleanshares(vp, ttoproc(curthread)->p_pid);
 387  387          } else
 388  388                  nfs_lockrelease(vp, flag, offset, cr);
 389  389  
 390  390          if (count > 1)
 391  391                  return (0);
 392  392  
 393  393          /*
 394  394           * If the file has been `unlinked', then purge the
 395  395           * DNLC so that this vnode will get reycled quicker
 396  396           * and the .nfs* file on the server will get removed.
 397  397           */
 398  398          rp = VTOR(vp);
 399  399          if (rp->r_unldvp != NULL)
 400  400                  dnlc_purge_vp(vp);
 401  401  
 402  402          /*
 403  403           * If the file was open for write and there are pages,
 404  404           * then if the file system was mounted using the "no-close-
 405  405           *      to-open" semantics, then start an asynchronous flush
 406  406           *      of the all of the pages in the file.
 407  407           * else the file system was not mounted using the "no-close-
 408  408           *      to-open" semantics, then do a synchronous flush and
 409  409           *      commit of all of the dirty and uncommitted pages.
 410  410           *
 411  411           * The asynchronous flush of the pages in the "nocto" path
 412  412           * mostly just associates a cred pointer with the rnode so
 413  413           * writes which happen later will have a better chance of
 414  414           * working.  It also starts the data being written to the
 415  415           * server, but without unnecessarily delaying the application.
 416  416           */
 417  417          if ((flag & FWRITE) && vn_has_cached_data(vp)) {
 418  418                  if (VTOMI(vp)->mi_flags & MI_NOCTO) {
 419  419                          error = nfs3_putpage(vp, (offset_t)0, 0, B_ASYNC,
 420  420                              cr, ct);
 421  421                          if (error == EAGAIN)
 422  422                                  error = 0;
 423  423                  } else
 424  424                          error = nfs3_putpage_commit(vp, (offset_t)0, 0, cr);
 425  425                  if (!error) {
 426  426                          mutex_enter(&rp->r_statelock);
 427  427                          error = rp->r_error;
 428  428                          rp->r_error = 0;
 429  429                          mutex_exit(&rp->r_statelock);
 430  430                  }
 431  431          } else {
 432  432                  mutex_enter(&rp->r_statelock);
 433  433                  error = rp->r_error;
 434  434                  rp->r_error = 0;
 435  435                  mutex_exit(&rp->r_statelock);
 436  436          }
 437  437  
 438  438          /*
 439  439           * If RWRITEATTR is set, then issue an over the wire GETATTR to
 440  440           * refresh the attribute cache with a set of attributes which
 441  441           * weren't returned from a WRITE.  This will enable the close-
 442  442           * to-open processing to work.
 443  443           */
 444  444          if (rp->r_flags & RWRITEATTR)
 445  445                  (void) nfs3_getattr_otw(vp, &va, cr);
 446  446  
 447  447          return (error);
 448  448  }
 449  449  
 450  450  /* ARGSUSED */
 451  451  static int
 452  452  nfs3_directio_read(vnode_t *vp, struct uio *uiop, cred_t *cr)
 453  453  {
 454  454          mntinfo_t *mi;
 455  455          READ3args args;
 456  456          READ3uiores res;
 457  457          int tsize;
 458  458          offset_t offset;
 459  459          ssize_t count;
 460  460          int error;
 461  461          int douprintf;
 462  462          failinfo_t fi;
 463  463          char *sv_hostname;
 464  464  
 465  465          mi = VTOMI(vp);
 466  466          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
 467  467          sv_hostname = VTOR(vp)->r_server->sv_hostname;
 468  468  
 469  469          douprintf = 1;
 470  470          args.file = *VTOFH3(vp);
 471  471          fi.vp = vp;
 472  472          fi.fhp = (caddr_t)&args.file;
 473  473          fi.copyproc = nfs3copyfh;
 474  474          fi.lookupproc = nfs3lookup;
 475  475          fi.xattrdirproc = acl_getxattrdir3;
 476  476  
 477  477          res.uiop = uiop;
 478  478  
 479  479          res.wlist = NULL;
 480  480  
 481  481          offset = uiop->uio_loffset;
 482  482          count = uiop->uio_resid;
 483  483  
 484  484          do {
 485  485                  if (mi->mi_io_kstats) {
 486  486                          mutex_enter(&mi->mi_lock);
 487  487                          kstat_runq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
 488  488                          mutex_exit(&mi->mi_lock);
 489  489                  }
 490  490  
 491  491                  do {
 492  492                          tsize = MIN(mi->mi_tsize, count);
 493  493                          args.offset = (offset3)offset;
 494  494                          args.count = (count3)tsize;
 495  495                          res.size = (uint_t)tsize;
 496  496                          args.res_uiop = uiop;
 497  497                          args.res_data_val_alt = NULL;
 498  498  
 499  499                          error = rfs3call(mi, NFSPROC3_READ,
 500  500                              xdr_READ3args, (caddr_t)&args,
 501  501                              xdr_READ3uiores, (caddr_t)&res, cr,
 502  502                              &douprintf, &res.status, 0, &fi);
 503  503                  } while (error == ENFS_TRYAGAIN);
 504  504  
 505  505                  if (mi->mi_io_kstats) {
 506  506                          mutex_enter(&mi->mi_lock);
 507  507                          kstat_runq_exit(KSTAT_IO_PTR(mi->mi_io_kstats));
 508  508                          mutex_exit(&mi->mi_lock);
 509  509                  }
 510  510  
 511  511                  if (error)
 512  512                          return (error);
 513  513  
 514  514                  error = geterrno3(res.status);
 515  515                  if (error)
 516  516                          return (error);
 517  517  
 518  518                  if (res.count != res.size) {
 519  519                          zcmn_err(getzoneid(), CE_WARN,
 520  520  "nfs3_directio_read: server %s returned incorrect amount",
 521  521                              sv_hostname);
 522  522                          return (EIO);
 523  523                  }
 524  524                  count -= res.count;
 525  525                  offset += res.count;
 526  526                  if (mi->mi_io_kstats) {
 527  527                          mutex_enter(&mi->mi_lock);
 528  528                          KSTAT_IO_PTR(mi->mi_io_kstats)->reads++;
 529  529                          KSTAT_IO_PTR(mi->mi_io_kstats)->nread += res.count;
 530  530                          mutex_exit(&mi->mi_lock);
 531  531                  }
 532  532                  lwp_stat_update(LWP_STAT_INBLK, 1);
 533  533          } while (count && !res.eof);
 534  534  
 535  535          return (0);
 536  536  }
 537  537  
 538  538  /* ARGSUSED */
 539  539  static int
 540  540  nfs3_read(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr,
 541  541          caller_context_t *ct)
 542  542  {
 543  543          rnode_t *rp;
 544  544          u_offset_t off;
 545  545          offset_t diff;
 546  546          int on;
 547  547          size_t n;
 548  548          caddr_t base;
 549  549          uint_t flags;
 550  550          int error = 0;
 551  551          mntinfo_t *mi;
 552  552  
 553  553          rp = VTOR(vp);
 554  554          mi = VTOMI(vp);
 555  555  
 556  556          ASSERT(nfs_rw_lock_held(&rp->r_rwlock, RW_READER));
 557  557  
 558  558          if (nfs_zone() != mi->mi_zone)
 559  559                  return (EIO);
 560  560  
 561  561          if (vp->v_type != VREG)
 562  562                  return (EISDIR);
 563  563  
 564  564          if (uiop->uio_resid == 0)
 565  565                  return (0);
 566  566  
 567  567          if (uiop->uio_loffset < 0 || uiop->uio_loffset + uiop->uio_resid < 0)
 568  568                  return (EINVAL);
 569  569  
 570  570          /*
 571  571           * Bypass VM if caching has been disabled (e.g., locking) or if
 572  572           * using client-side direct I/O and the file is not mmap'd and
 573  573           * there are no cached pages.
 574  574           */
 575  575          if ((vp->v_flag & VNOCACHE) ||
 576  576              (((rp->r_flags & RDIRECTIO) || (mi->mi_flags & MI_DIRECTIO)) &&
 577  577              rp->r_mapcnt == 0 && rp->r_inmap == 0 &&
 578  578              !vn_has_cached_data(vp))) {
 579  579                  return (nfs3_directio_read(vp, uiop, cr));
 580  580          }
 581  581  
 582  582          do {
 583  583                  off = uiop->uio_loffset & MAXBMASK; /* mapping offset */
 584  584                  on = uiop->uio_loffset & MAXBOFFSET; /* Relative offset */
 585  585                  n = MIN(MAXBSIZE - on, uiop->uio_resid);
 586  586  
 587  587                  error = nfs3_validate_caches(vp, cr);
 588  588                  if (error)
 589  589                          break;
 590  590  
 591  591                  mutex_enter(&rp->r_statelock);
 592  592                  while (rp->r_flags & RINCACHEPURGE) {
 593  593                          if (!cv_wait_sig(&rp->r_cv, &rp->r_statelock)) {
 594  594                                  mutex_exit(&rp->r_statelock);
 595  595                                  return (EINTR);
 596  596                          }
 597  597                  }
 598  598                  diff = rp->r_size - uiop->uio_loffset;
 599  599                  mutex_exit(&rp->r_statelock);
 600  600                  if (diff <= 0)
 601  601                          break;
 602  602                  if (diff < n)
 603  603                          n = (size_t)diff;
 604  604  
 605  605                  if (vpm_enable) {
 606  606                          /*
 607  607                           * Copy data.
 608  608                           */
 609  609                          error = vpm_data_copy(vp, off + on, n, uiop,
 610  610                              1, NULL, 0, S_READ);
 611  611                  } else {
 612  612                          base = segmap_getmapflt(segkmap, vp, off + on, n, 1,
 613  613                              S_READ);
 614  614  
 615  615                          error = uiomove(base + on, n, UIO_READ, uiop);
 616  616                  }
 617  617  
 618  618                  if (!error) {
 619  619                          /*
 620  620                           * If read a whole block or read to eof,
 621  621                           * won't need this buffer again soon.
 622  622                           */
 623  623                          mutex_enter(&rp->r_statelock);
 624  624                          if (n + on == MAXBSIZE ||
 625  625                              uiop->uio_loffset == rp->r_size)
 626  626                                  flags = SM_DONTNEED;
 627  627                          else
 628  628                                  flags = 0;
 629  629                          mutex_exit(&rp->r_statelock);
 630  630                          if (vpm_enable) {
 631  631                                  error = vpm_sync_pages(vp, off, n, flags);
 632  632                          } else {
 633  633                                  error = segmap_release(segkmap, base, flags);
 634  634                          }
 635  635                  } else {
 636  636                          if (vpm_enable) {
 637  637                                  (void) vpm_sync_pages(vp, off, n, 0);
 638  638                          } else {
 639  639                                  (void) segmap_release(segkmap, base, 0);
 640  640                          }
 641  641                  }
 642  642          } while (!error && uiop->uio_resid > 0);
 643  643  
 644  644          return (error);
 645  645  }
 646  646  
 647  647  /* ARGSUSED */
 648  648  static int
 649  649  nfs3_write(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr,
 650  650          caller_context_t *ct)
 651  651  {
 652  652          rlim64_t limit = uiop->uio_llimit;
 653  653          rnode_t *rp;
 654  654          u_offset_t off;
 655  655          caddr_t base;
 656  656          uint_t flags;
 657  657          int remainder;
 658  658          size_t n;
 659  659          int on;
 660  660          int error;
 661  661          int resid;
 662  662          offset_t offset;
 663  663          mntinfo_t *mi;
 664  664          uint_t bsize;
 665  665  
 666  666          rp = VTOR(vp);
 667  667  
 668  668          if (vp->v_type != VREG)
 669  669                  return (EISDIR);
 670  670  
 671  671          mi = VTOMI(vp);
 672  672          if (nfs_zone() != mi->mi_zone)
 673  673                  return (EIO);
 674  674          if (uiop->uio_resid == 0)
 675  675                  return (0);
 676  676  
 677  677          if (ioflag & FAPPEND) {
 678  678                  struct vattr va;
 679  679  
 680  680                  /*
 681  681                   * Must serialize if appending.
 682  682                   */
 683  683                  if (nfs_rw_lock_held(&rp->r_rwlock, RW_READER)) {
 684  684                          nfs_rw_exit(&rp->r_rwlock);
 685  685                          if (nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER,
 686  686                              INTR(vp)))
 687  687                                  return (EINTR);
 688  688                  }
 689  689  
 690  690                  va.va_mask = AT_SIZE;
 691  691                  error = nfs3getattr(vp, &va, cr);
 692  692                  if (error)
 693  693                          return (error);
 694  694                  uiop->uio_loffset = va.va_size;
 695  695          }
 696  696  
 697  697          offset = uiop->uio_loffset + uiop->uio_resid;
 698  698  
 699  699          if (uiop->uio_loffset < 0 || offset < 0)
 700  700                  return (EINVAL);
 701  701  
 702  702          if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
 703  703                  limit = MAXOFFSET_T;
 704  704  
 705  705          /*
 706  706           * Check to make sure that the process will not exceed
 707  707           * its limit on file size.  It is okay to write up to
 708  708           * the limit, but not beyond.  Thus, the write which
 709  709           * reaches the limit will be short and the next write
 710  710           * will return an error.
 711  711           */
 712  712          remainder = 0;
 713  713          if (offset > limit) {
 714  714                  remainder = offset - limit;
 715  715                  uiop->uio_resid = limit - uiop->uio_loffset;
 716  716                  if (uiop->uio_resid <= 0) {
 717  717                          proc_t *p = ttoproc(curthread);
 718  718  
 719  719                          uiop->uio_resid += remainder;
 720  720                          mutex_enter(&p->p_lock);
 721  721                          (void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE],
 722  722                              p->p_rctls, p, RCA_UNSAFE_SIGINFO);
 723  723                          mutex_exit(&p->p_lock);
 724  724                          return (EFBIG);
 725  725                  }
 726  726          }
 727  727  
 728  728          if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_READER, INTR(vp)))
 729  729                  return (EINTR);
 730  730  
 731  731          /*
 732  732           * Bypass VM if caching has been disabled (e.g., locking) or if
 733  733           * using client-side direct I/O and the file is not mmap'd and
 734  734           * there are no cached pages.
 735  735           */
 736  736          if ((vp->v_flag & VNOCACHE) ||
 737  737              (((rp->r_flags & RDIRECTIO) || (mi->mi_flags & MI_DIRECTIO)) &&
 738  738              rp->r_mapcnt == 0 && rp->r_inmap == 0 &&
 739  739              !vn_has_cached_data(vp))) {
 740  740                  size_t bufsize;
 741  741                  int count;
 742  742                  u_offset_t org_offset;
 743  743                  stable_how stab_comm;
 744  744  
 745  745  nfs3_fwrite:
 746  746                  if (rp->r_flags & RSTALE) {
 747  747                          resid = uiop->uio_resid;
 748  748                          offset = uiop->uio_loffset;
 749  749                          error = rp->r_error;
 750  750                          /*
 751  751                           * A close may have cleared r_error, if so,
 752  752                           * propagate ESTALE error return properly
 753  753                           */
 754  754                          if (error == 0)
 755  755                                  error = ESTALE;
 756  756                          goto bottom;
 757  757                  }
 758  758                  bufsize = MIN(uiop->uio_resid, mi->mi_stsize);
 759  759                  base = kmem_alloc(bufsize, KM_SLEEP);
 760  760                  do {
 761  761                          if (ioflag & FDSYNC)
 762  762                                  stab_comm = DATA_SYNC;
 763  763                          else
 764  764                                  stab_comm = FILE_SYNC;
 765  765                          resid = uiop->uio_resid;
 766  766                          offset = uiop->uio_loffset;
 767  767                          count = MIN(uiop->uio_resid, bufsize);
 768  768                          org_offset = uiop->uio_loffset;
 769  769                          error = uiomove(base, count, UIO_WRITE, uiop);
 770  770                          if (!error) {
 771  771                                  error = nfs3write(vp, base, org_offset,
 772  772                                      count, cr, &stab_comm);
 773  773                          }
 774  774                  } while (!error && uiop->uio_resid > 0);
 775  775                  kmem_free(base, bufsize);
 776  776                  goto bottom;
 777  777          }
 778  778  
 779  779  
 780  780          bsize = vp->v_vfsp->vfs_bsize;
 781  781  
 782  782          do {
 783  783                  off = uiop->uio_loffset & MAXBMASK; /* mapping offset */
 784  784                  on = uiop->uio_loffset & MAXBOFFSET; /* Relative offset */
 785  785                  n = MIN(MAXBSIZE - on, uiop->uio_resid);
 786  786  
 787  787                  resid = uiop->uio_resid;
 788  788                  offset = uiop->uio_loffset;
 789  789  
 790  790                  if (rp->r_flags & RSTALE) {
 791  791                          error = rp->r_error;
 792  792                          /*
 793  793                           * A close may have cleared r_error, if so,
 794  794                           * propagate ESTALE error return properly
 795  795                           */
 796  796                          if (error == 0)
 797  797                                  error = ESTALE;
 798  798                          break;
 799  799                  }
 800  800  
 801  801                  /*
 802  802                   * Don't create dirty pages faster than they
 803  803                   * can be cleaned so that the system doesn't
 804  804                   * get imbalanced.  If the async queue is
 805  805                   * maxed out, then wait for it to drain before
 806  806                   * creating more dirty pages.  Also, wait for
 807  807                   * any threads doing pagewalks in the vop_getattr
 808  808                   * entry points so that they don't block for
 809  809                   * long periods.
 810  810                   */
 811  811                  mutex_enter(&rp->r_statelock);
 812  812                  while ((mi->mi_max_threads != 0 &&
 813  813                      rp->r_awcount > 2 * mi->mi_max_threads) ||
 814  814                      rp->r_gcount > 0) {
 815  815                          if (INTR(vp)) {
 816  816                                  klwp_t *lwp = ttolwp(curthread);
 817  817  
 818  818                                  if (lwp != NULL)
 819  819                                          lwp->lwp_nostop++;
 820  820                                  if (!cv_wait_sig(&rp->r_cv, &rp->r_statelock)) {
 821  821                                          mutex_exit(&rp->r_statelock);
 822  822                                          if (lwp != NULL)
 823  823                                                  lwp->lwp_nostop--;
 824  824                                          error = EINTR;
 825  825                                          goto bottom;
 826  826                                  }
 827  827                                  if (lwp != NULL)
 828  828                                          lwp->lwp_nostop--;
 829  829                          } else
 830  830                                  cv_wait(&rp->r_cv, &rp->r_statelock);
 831  831                  }
 832  832                  mutex_exit(&rp->r_statelock);
 833  833  
 834  834                  /*
 835  835                   * Touch the page and fault it in if it is not in core
 836  836                   * before segmap_getmapflt or vpm_data_copy can lock it.
 837  837                   * This is to avoid the deadlock if the buffer is mapped
 838  838                   * to the same file through mmap which we want to write.
 839  839                   */
 840  840                  uio_prefaultpages((long)n, uiop);
 841  841  
 842  842                  if (vpm_enable) {
 843  843                          /*
 844  844                           * It will use kpm mappings, so no need to
 845  845                           * pass an address.
 846  846                           */
 847  847                          error = writerp(rp, NULL, n, uiop, 0);
 848  848                  } else  {
 849  849                          if (segmap_kpm) {
 850  850                                  int pon = uiop->uio_loffset & PAGEOFFSET;
 851  851                                  size_t pn = MIN(PAGESIZE - pon,
 852  852                                      uiop->uio_resid);
 853  853                                  int pagecreate;
 854  854  
 855  855                                  mutex_enter(&rp->r_statelock);
 856  856                                  pagecreate = (pon == 0) && (pn == PAGESIZE ||
 857  857                                      uiop->uio_loffset + pn >= rp->r_size);
 858  858                                  mutex_exit(&rp->r_statelock);
 859  859  
 860  860                                  base = segmap_getmapflt(segkmap, vp, off + on,
 861  861                                      pn, !pagecreate, S_WRITE);
 862  862  
 863  863                                  error = writerp(rp, base + pon, n, uiop,
 864  864                                      pagecreate);
 865  865  
 866  866                          } else {
 867  867                                  base = segmap_getmapflt(segkmap, vp, off + on,
 868  868                                      n, 0, S_READ);
 869  869                                  error = writerp(rp, base + on, n, uiop, 0);
 870  870                          }
 871  871                  }
 872  872  
 873  873                  if (!error) {
 874  874                          if (mi->mi_flags & MI_NOAC)
 875  875                                  flags = SM_WRITE;
 876  876                          else if ((uiop->uio_loffset % bsize) == 0 ||
 877  877                              IS_SWAPVP(vp)) {
 878  878                                  /*
 879  879                                   * Have written a whole block.
 880  880                                   * Start an asynchronous write
 881  881                                   * and mark the buffer to
 882  882                                   * indicate that it won't be
 883  883                                   * needed again soon.
 884  884                                   */
 885  885                                  flags = SM_WRITE | SM_ASYNC | SM_DONTNEED;
 886  886                          } else
 887  887                                  flags = 0;
 888  888                          if ((ioflag & (FSYNC|FDSYNC)) ||
 889  889                              (rp->r_flags & ROUTOFSPACE)) {
 890  890                                  flags &= ~SM_ASYNC;
 891  891                                  flags |= SM_WRITE;
 892  892                          }
 893  893                          if (vpm_enable) {
 894  894                                  error = vpm_sync_pages(vp, off, n, flags);
 895  895                          } else {
 896  896                                  error = segmap_release(segkmap, base, flags);
 897  897                          }
 898  898                  } else {
 899  899                          if (vpm_enable) {
 900  900                                  (void) vpm_sync_pages(vp, off, n, 0);
 901  901                          } else {
 902  902                                  (void) segmap_release(segkmap, base, 0);
 903  903                          }
 904  904                          /*
 905  905                           * In the event that we got an access error while
 906  906                           * faulting in a page for a write-only file just
 907  907                           * force a write.
 908  908                           */
 909  909                          if (error == EACCES)
 910  910                                  goto nfs3_fwrite;
 911  911                  }
 912  912          } while (!error && uiop->uio_resid > 0);
 913  913  
 914  914  bottom:
 915  915          if (error) {
 916  916                  uiop->uio_resid = resid + remainder;
 917  917                  uiop->uio_loffset = offset;
 918  918          } else
 919  919                  uiop->uio_resid += remainder;
 920  920  
 921  921          nfs_rw_exit(&rp->r_lkserlock);
 922  922  
 923  923          return (error);
 924  924  }
 925  925  
 926  926  /*
 927  927   * Flags are composed of {B_ASYNC, B_INVAL, B_FREE, B_DONTNEED}
 928  928   */
 929  929  static int
 930  930  nfs3_rdwrlbn(vnode_t *vp, page_t *pp, u_offset_t off, size_t len,
 931  931          int flags, cred_t *cr)
 932  932  {
 933  933          struct buf *bp;
 934  934          int error;
 935  935          page_t *savepp;
 936  936          uchar_t fsdata;
 937  937          stable_how stab_comm;
 938  938  
 939  939          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
 940  940          bp = pageio_setup(pp, len, vp, flags);
 941  941          ASSERT(bp != NULL);
 942  942  
 943  943          /*
 944  944           * pageio_setup should have set b_addr to 0.  This
 945  945           * is correct since we want to do I/O on a page
 946  946           * boundary.  bp_mapin will use this addr to calculate
 947  947           * an offset, and then set b_addr to the kernel virtual
 948  948           * address it allocated for us.
 949  949           */
 950  950          ASSERT(bp->b_un.b_addr == 0);
 951  951  
 952  952          bp->b_edev = 0;
 953  953          bp->b_dev = 0;
 954  954          bp->b_lblkno = lbtodb(off);
 955  955          bp->b_file = vp;
 956  956          bp->b_offset = (offset_t)off;
 957  957          bp_mapin(bp);
 958  958  
 959  959          /*
 960  960           * Calculate the desired level of stability to write data
 961  961           * on the server and then mark all of the pages to reflect
 962  962           * this.
 963  963           */
 964  964          if ((flags & (B_WRITE|B_ASYNC)) == (B_WRITE|B_ASYNC) &&
 965  965              freemem > desfree) {
 966  966                  stab_comm = UNSTABLE;
 967  967                  fsdata = C_DELAYCOMMIT;
 968  968          } else {
 969  969                  stab_comm = FILE_SYNC;
 970  970                  fsdata = C_NOCOMMIT;
 971  971          }
 972  972  
 973  973          savepp = pp;
 974  974          do {
 975  975                  pp->p_fsdata = fsdata;
 976  976          } while ((pp = pp->p_next) != savepp);
 977  977  
 978  978          error = nfs3_bio(bp, &stab_comm, cr);
 979  979  
 980  980          bp_mapout(bp);
 981  981          pageio_done(bp);
 982  982  
 983  983          /*
 984  984           * If the server wrote pages in a more stable fashion than
 985  985           * was requested, then clear all of the marks in the pages
 986  986           * indicating that COMMIT operations were required.
 987  987           */
 988  988          if (stab_comm != UNSTABLE && fsdata == C_DELAYCOMMIT) {
 989  989                  do {
 990  990                          pp->p_fsdata = C_NOCOMMIT;
 991  991                  } while ((pp = pp->p_next) != savepp);
 992  992          }
 993  993  
 994  994          return (error);
 995  995  }
 996  996  
 997  997  /*
 998  998   * Write to file.  Writes to remote server in largest size
 999  999   * chunks that the server can handle.  Write is synchronous.
1000 1000   */
1001 1001  static int
1002 1002  nfs3write(vnode_t *vp, caddr_t base, u_offset_t offset, int count, cred_t *cr,
1003 1003          stable_how *stab_comm)
1004 1004  {
1005 1005          mntinfo_t *mi;
1006 1006          WRITE3args args;
1007 1007          WRITE3res res;
1008 1008          int error;
1009 1009          int tsize;
1010 1010          rnode_t *rp;
1011 1011          int douprintf;
1012 1012  
1013 1013          rp = VTOR(vp);
1014 1014          mi = VTOMI(vp);
1015 1015  
1016 1016          ASSERT(nfs_zone() == mi->mi_zone);
1017 1017  
1018 1018          args.file = *VTOFH3(vp);
1019 1019          args.stable = *stab_comm;
1020 1020  
1021 1021          *stab_comm = FILE_SYNC;
1022 1022  
1023 1023          douprintf = 1;
1024 1024  
1025 1025          do {
1026 1026                  if ((vp->v_flag & VNOCACHE) ||
1027 1027                      (rp->r_flags & RDIRECTIO) ||
1028 1028                      (mi->mi_flags & MI_DIRECTIO))
1029 1029                          tsize = MIN(mi->mi_stsize, count);
1030 1030                  else
1031 1031                          tsize = MIN(mi->mi_curwrite, count);
1032 1032                  args.offset = (offset3)offset;
1033 1033                  args.count = (count3)tsize;
1034 1034                  args.data.data_len = (uint_t)tsize;
1035 1035                  args.data.data_val = base;
1036 1036  
1037 1037                  if (mi->mi_io_kstats) {
1038 1038                          mutex_enter(&mi->mi_lock);
1039 1039                          kstat_runq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
1040 1040                          mutex_exit(&mi->mi_lock);
1041 1041                  }
1042 1042                  args.mblk = NULL;
1043 1043                  do {
1044 1044                          error = rfs3call(mi, NFSPROC3_WRITE,
1045 1045                              xdr_WRITE3args, (caddr_t)&args,
1046 1046                              xdr_WRITE3res, (caddr_t)&res, cr,
1047 1047                              &douprintf, &res.status, 0, NULL);
1048 1048                  } while (error == ENFS_TRYAGAIN);
1049 1049                  if (mi->mi_io_kstats) {
1050 1050                          mutex_enter(&mi->mi_lock);
1051 1051                          kstat_runq_exit(KSTAT_IO_PTR(mi->mi_io_kstats));
1052 1052                          mutex_exit(&mi->mi_lock);
1053 1053                  }
1054 1054  
1055 1055                  if (error)
1056 1056                          return (error);
1057 1057                  error = geterrno3(res.status);
1058 1058                  if (!error) {
1059 1059                          if (res.resok.count > args.count) {
1060 1060                                  zcmn_err(getzoneid(), CE_WARN,
1061 1061                                      "nfs3write: server %s wrote %u, "
1062 1062                                      "requested was %u",
1063 1063                                      rp->r_server->sv_hostname,
1064 1064                                      res.resok.count, args.count);
1065 1065                                  return (EIO);
1066 1066                          }
1067 1067                          if (res.resok.committed == UNSTABLE) {
1068 1068                                  *stab_comm = UNSTABLE;
1069 1069                                  if (args.stable == DATA_SYNC ||
1070 1070                                      args.stable == FILE_SYNC) {
1071 1071                                          zcmn_err(getzoneid(), CE_WARN,
1072 1072                          "nfs3write: server %s did not commit to stable storage",
1073 1073                                              rp->r_server->sv_hostname);
1074 1074                                          return (EIO);
1075 1075                                  }
1076 1076                          }
1077 1077                          tsize = (int)res.resok.count;
1078 1078                          count -= tsize;
1079 1079                          base += tsize;
1080 1080                          offset += tsize;
1081 1081                          if (mi->mi_io_kstats) {
1082 1082                                  mutex_enter(&mi->mi_lock);
1083 1083                                  KSTAT_IO_PTR(mi->mi_io_kstats)->writes++;
1084 1084                                  KSTAT_IO_PTR(mi->mi_io_kstats)->nwritten +=
1085 1085                                      tsize;
1086 1086                                  mutex_exit(&mi->mi_lock);
1087 1087                          }
1088 1088                          lwp_stat_update(LWP_STAT_OUBLK, 1);
1089 1089                          mutex_enter(&rp->r_statelock);
1090 1090                          if (rp->r_flags & RHAVEVERF) {
1091 1091                                  if (rp->r_verf != res.resok.verf) {
1092 1092                                          nfs3_set_mod(vp);
1093 1093                                          rp->r_verf = res.resok.verf;
1094 1094                                          /*
1095 1095                                           * If the data was written UNSTABLE,
1096 1096                                           * then might as well stop because
1097 1097                                           * the whole block will have to get
1098 1098                                           * rewritten anyway.
1099 1099                                           */
1100 1100                                          if (*stab_comm == UNSTABLE) {
1101 1101                                                  mutex_exit(&rp->r_statelock);
1102 1102                                                  break;
1103 1103                                          }
1104 1104                                  }
1105 1105                          } else {
1106 1106                                  rp->r_verf = res.resok.verf;
1107 1107                                  rp->r_flags |= RHAVEVERF;
1108 1108                          }
1109 1109                          /*
1110 1110                           * Mark the attribute cache as timed out and
1111 1111                           * set RWRITEATTR to indicate that the file
1112 1112                           * was modified with a WRITE operation and
1113 1113                           * that the attributes can not be trusted.
1114 1114                           */
1115 1115                          PURGE_ATTRCACHE_LOCKED(rp);
1116 1116                          rp->r_flags |= RWRITEATTR;
1117 1117                          mutex_exit(&rp->r_statelock);
1118 1118                  }
1119 1119          } while (!error && count);
1120 1120  
1121 1121          return (error);
1122 1122  }
1123 1123  
1124 1124  /*
1125 1125   * Read from a file.  Reads data in largest chunks our interface can handle.
1126 1126   */
1127 1127  static int
1128 1128  nfs3read(vnode_t *vp, caddr_t base, offset_t offset, int count,
1129 1129          size_t *residp, cred_t *cr)
1130 1130  {
1131 1131          mntinfo_t *mi;
1132 1132          READ3args args;
1133 1133          READ3vres res;
1134 1134          int tsize;
1135 1135          int error;
1136 1136          int douprintf;
1137 1137          failinfo_t fi;
1138 1138          rnode_t *rp;
1139 1139          struct vattr va;
1140 1140          hrtime_t t;
1141 1141  
1142 1142          rp = VTOR(vp);
1143 1143          mi = VTOMI(vp);
1144 1144          ASSERT(nfs_zone() == mi->mi_zone);
1145 1145          douprintf = 1;
1146 1146  
1147 1147          args.file = *VTOFH3(vp);
1148 1148          fi.vp = vp;
1149 1149          fi.fhp = (caddr_t)&args.file;
1150 1150          fi.copyproc = nfs3copyfh;
1151 1151          fi.lookupproc = nfs3lookup;
1152 1152          fi.xattrdirproc = acl_getxattrdir3;
1153 1153  
1154 1154          res.pov.fres.vp = vp;
1155 1155          res.pov.fres.vap = &va;
1156 1156  
1157 1157          res.wlist = NULL;
1158 1158          *residp = count;
1159 1159          do {
1160 1160                  if (mi->mi_io_kstats) {
1161 1161                          mutex_enter(&mi->mi_lock);
1162 1162                          kstat_runq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
1163 1163                          mutex_exit(&mi->mi_lock);
1164 1164                  }
1165 1165  
1166 1166                  do {
1167 1167                          if ((vp->v_flag & VNOCACHE) ||
1168 1168                              (rp->r_flags & RDIRECTIO) ||
1169 1169                              (mi->mi_flags & MI_DIRECTIO))
1170 1170                                  tsize = MIN(mi->mi_tsize, count);
1171 1171                          else
1172 1172                                  tsize = MIN(mi->mi_curread, count);
1173 1173                          res.data.data_val = base;
1174 1174                          res.data.data_len = tsize;
1175 1175                          args.offset = (offset3)offset;
1176 1176                          args.count = (count3)tsize;
1177 1177                          args.res_uiop = NULL;
1178 1178                          args.res_data_val_alt = base;
1179 1179  
1180 1180                          t = gethrtime();
1181 1181                          error = rfs3call(mi, NFSPROC3_READ,
1182 1182                              xdr_READ3args, (caddr_t)&args,
1183 1183                              xdr_READ3vres, (caddr_t)&res, cr,
1184 1184                              &douprintf, &res.status, 0, &fi);
1185 1185                  } while (error == ENFS_TRYAGAIN);
1186 1186  
1187 1187                  if (mi->mi_io_kstats) {
1188 1188                          mutex_enter(&mi->mi_lock);
1189 1189                          kstat_runq_exit(KSTAT_IO_PTR(mi->mi_io_kstats));
1190 1190                          mutex_exit(&mi->mi_lock);
1191 1191                  }
1192 1192  
1193 1193                  if (error)
1194 1194                          return (error);
1195 1195  
1196 1196                  error = geterrno3(res.status);
1197 1197                  if (error)
1198 1198                          return (error);
1199 1199  
1200 1200                  if (res.count != res.data.data_len) {
1201 1201                          zcmn_err(getzoneid(), CE_WARN,
1202 1202                              "nfs3read: server %s returned incorrect amount",
1203 1203                              rp->r_server->sv_hostname);
1204 1204                          return (EIO);
1205 1205                  }
1206 1206  
1207 1207                  count -= res.count;
1208 1208                  *residp = count;
1209 1209                  base += res.count;
1210 1210                  offset += res.count;
1211 1211                  if (mi->mi_io_kstats) {
1212 1212                          mutex_enter(&mi->mi_lock);
1213 1213                          KSTAT_IO_PTR(mi->mi_io_kstats)->reads++;
1214 1214                          KSTAT_IO_PTR(mi->mi_io_kstats)->nread += res.count;
1215 1215                          mutex_exit(&mi->mi_lock);
1216 1216                  }
1217 1217                  lwp_stat_update(LWP_STAT_INBLK, 1);
1218 1218          } while (count && !res.eof);
1219 1219  
1220 1220          if (res.pov.attributes) {
1221 1221                  mutex_enter(&rp->r_statelock);
1222 1222                  if (!CACHE_VALID(rp, va.va_mtime, va.va_size)) {
1223 1223                          mutex_exit(&rp->r_statelock);
1224 1224                          PURGE_ATTRCACHE(vp);
1225 1225                  } else {
1226 1226                          if (rp->r_mtime <= t)
1227 1227                                  nfs_attrcache_va(vp, &va);
1228 1228                          mutex_exit(&rp->r_statelock);
1229 1229                  }
1230 1230          }
1231 1231  
1232 1232          return (0);
1233 1233  }
1234 1234  
1235 1235  /* ARGSUSED */
1236 1236  static int
1237 1237  nfs3_ioctl(vnode_t *vp, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp,
1238 1238          caller_context_t *ct)
1239 1239  {
1240 1240  
1241 1241          if (nfs_zone() != VTOMI(vp)->mi_zone)
1242 1242                  return (EIO);
1243 1243          switch (cmd) {
1244 1244                  case _FIODIRECTIO:
1245 1245                          return (nfs_directio(vp, (int)arg, cr));
1246 1246                  default:
1247 1247                          return (ENOTTY);
1248 1248          }
1249 1249  }
1250 1250  
1251 1251  /* ARGSUSED */
1252 1252  static int
1253 1253  nfs3_getattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr,
1254 1254          caller_context_t *ct)
1255 1255  {
1256 1256          int error;
1257 1257          rnode_t *rp;
1258 1258  
1259 1259          if (nfs_zone() != VTOMI(vp)->mi_zone)
1260 1260                  return (EIO);
1261 1261          /*
1262 1262           * If it has been specified that the return value will
1263 1263           * just be used as a hint, and we are only being asked
1264 1264           * for size, fsid or rdevid, then return the client's
1265 1265           * notion of these values without checking to make sure
1266 1266           * that the attribute cache is up to date.
1267 1267           * The whole point is to avoid an over the wire GETATTR
1268 1268           * call.
1269 1269           */
1270 1270          rp = VTOR(vp);
1271 1271          if (flags & ATTR_HINT) {
1272 1272                  if (vap->va_mask ==
1273 1273                      (vap->va_mask & (AT_SIZE | AT_FSID | AT_RDEV))) {
1274 1274                          mutex_enter(&rp->r_statelock);
1275 1275                          if (vap->va_mask | AT_SIZE)
1276 1276                                  vap->va_size = rp->r_size;
1277 1277                          if (vap->va_mask | AT_FSID)
1278 1278                                  vap->va_fsid = rp->r_attr.va_fsid;
1279 1279                          if (vap->va_mask | AT_RDEV)
1280 1280                                  vap->va_rdev = rp->r_attr.va_rdev;
1281 1281                          mutex_exit(&rp->r_statelock);
1282 1282                          return (0);
1283 1283                  }
1284 1284          }
1285 1285  
1286 1286          /*
1287 1287           * Only need to flush pages if asking for the mtime
1288 1288           * and if there any dirty pages or any outstanding
1289 1289           * asynchronous (write) requests for this file.
1290 1290           */
1291 1291          if (vap->va_mask & AT_MTIME) {
1292 1292                  if (vn_has_cached_data(vp) &&
1293 1293                      ((rp->r_flags & RDIRTY) || rp->r_awcount > 0)) {
1294 1294                          mutex_enter(&rp->r_statelock);
1295 1295                          rp->r_gcount++;
1296 1296                          mutex_exit(&rp->r_statelock);
1297 1297                          error = nfs3_putpage(vp, (offset_t)0, 0, 0, cr, ct);
1298 1298                          mutex_enter(&rp->r_statelock);
1299 1299                          if (error && (error == ENOSPC || error == EDQUOT)) {
1300 1300                                  if (!rp->r_error)
1301 1301                                          rp->r_error = error;
1302 1302                          }
1303 1303                          if (--rp->r_gcount == 0)
1304 1304                                  cv_broadcast(&rp->r_cv);
1305 1305                          mutex_exit(&rp->r_statelock);
1306 1306                  }
1307 1307          }
1308 1308  
1309 1309          return (nfs3getattr(vp, vap, cr));
1310 1310  }
1311 1311  
1312 1312  /*ARGSUSED4*/
1313 1313  static int
1314 1314  nfs3_setattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr,
1315 1315                  caller_context_t *ct)
1316 1316  {
1317 1317          int error;
1318 1318          struct vattr va;
1319 1319  
1320 1320          if (vap->va_mask & AT_NOSET)
1321 1321                  return (EINVAL);
1322 1322          if (nfs_zone() != VTOMI(vp)->mi_zone)
1323 1323                  return (EIO);
1324 1324  
1325 1325          va.va_mask = AT_UID | AT_MODE;
1326 1326          error = nfs3getattr(vp, &va, cr);
1327 1327          if (error)
1328 1328                  return (error);
1329 1329  
1330 1330          error = secpolicy_vnode_setattr(cr, vp, vap, &va, flags, nfs3_accessx,
1331 1331              vp);
1332 1332          if (error)
1333 1333                  return (error);
1334 1334  
1335 1335          error = nfs3setattr(vp, vap, flags, cr);
1336 1336  
1337 1337          if (error == 0 && (vap->va_mask & AT_SIZE) && vap->va_size == 0)
1338 1338                  vnevent_truncate(vp, ct);
1339 1339  
1340 1340          return (error);
1341 1341  }
1342 1342  
1343 1343  static int
1344 1344  nfs3setattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr)
1345 1345  {
1346 1346          int error;
1347 1347          uint_t mask;
1348 1348          SETATTR3args args;
1349 1349          SETATTR3res res;
1350 1350          int douprintf;
1351 1351          rnode_t *rp;
1352 1352          struct vattr va;
1353 1353          mode_t omode;
1354 1354          vsecattr_t *vsp;
1355 1355          hrtime_t t;
1356 1356  
1357 1357          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
1358 1358          mask = vap->va_mask;
1359 1359  
1360 1360          rp = VTOR(vp);
1361 1361  
1362 1362          /*
1363 1363           * Only need to flush pages if there are any pages and
1364 1364           * if the file is marked as dirty in some fashion.  The
1365 1365           * file must be flushed so that we can accurately
1366 1366           * determine the size of the file and the cached data
1367 1367           * after the SETATTR returns.  A file is considered to
1368 1368           * be dirty if it is either marked with RDIRTY, has
1369 1369           * outstanding i/o's active, or is mmap'd.  In this
1370 1370           * last case, we can't tell whether there are dirty
1371 1371           * pages, so we flush just to be sure.
1372 1372           */
1373 1373          if (vn_has_cached_data(vp) &&
1374 1374              ((rp->r_flags & RDIRTY) ||
1375 1375              rp->r_count > 0 ||
1376 1376              rp->r_mapcnt > 0)) {
1377 1377                  ASSERT(vp->v_type != VCHR);
1378 1378                  error = nfs3_putpage(vp, (offset_t)0, 0, 0, cr, NULL);
1379 1379                  if (error && (error == ENOSPC || error == EDQUOT)) {
1380 1380                          mutex_enter(&rp->r_statelock);
1381 1381                          if (!rp->r_error)
1382 1382                                  rp->r_error = error;
1383 1383                          mutex_exit(&rp->r_statelock);
1384 1384                  }
1385 1385          }
1386 1386  
1387 1387          args.object = *RTOFH3(rp);
1388 1388          /*
1389 1389           * If the intent is for the server to set the times,
1390 1390           * there is no point in have the mask indicating set mtime or
1391 1391           * atime, because the vap values may be junk, and so result
1392 1392           * in an overflow error. Remove these flags from the vap mask
1393 1393           * before calling in this case, and restore them afterwards.
1394 1394           */
1395 1395          if ((mask & (AT_ATIME | AT_MTIME)) && !(flags & ATTR_UTIME)) {
1396 1396                  /* Use server times, so don't set the args time fields */
1397 1397                  vap->va_mask &= ~(AT_ATIME | AT_MTIME);
1398 1398                  error = vattr_to_sattr3(vap, &args.new_attributes);
1399 1399                  vap->va_mask |= (mask & (AT_ATIME | AT_MTIME));
1400 1400                  if (mask & AT_ATIME) {
1401 1401                          args.new_attributes.atime.set_it = SET_TO_SERVER_TIME;
1402 1402                  }
1403 1403                  if (mask & AT_MTIME) {
1404 1404                          args.new_attributes.mtime.set_it = SET_TO_SERVER_TIME;
1405 1405                  }
1406 1406          } else {
1407 1407                  /* Either do not set times or use the client specified times */
1408 1408                  error = vattr_to_sattr3(vap, &args.new_attributes);
1409 1409          }
1410 1410  
1411 1411          if (error) {
1412 1412                  /* req time field(s) overflow - return immediately */
1413 1413                  return (error);
1414 1414          }
1415 1415  
1416 1416          va.va_mask = AT_MODE | AT_CTIME;
1417 1417          error = nfs3getattr(vp, &va, cr);
1418 1418          if (error)
1419 1419                  return (error);
1420 1420          omode = va.va_mode;
1421 1421  
1422 1422  tryagain:
1423 1423          if (mask & AT_SIZE) {
1424 1424                  args.guard.check = TRUE;
1425 1425                  args.guard.obj_ctime.seconds = va.va_ctime.tv_sec;
1426 1426                  args.guard.obj_ctime.nseconds = va.va_ctime.tv_nsec;
1427 1427          } else
1428 1428                  args.guard.check = FALSE;
1429 1429  
1430 1430          douprintf = 1;
1431 1431  
1432 1432          t = gethrtime();
1433 1433  
1434 1434          error = rfs3call(VTOMI(vp), NFSPROC3_SETATTR,
1435 1435              xdr_SETATTR3args, (caddr_t)&args,
1436 1436              xdr_SETATTR3res, (caddr_t)&res, cr,
1437 1437              &douprintf, &res.status, 0, NULL);
1438 1438  
1439 1439          /*
1440 1440           * Purge the access cache and ACL cache if changing either the
1441 1441           * owner of the file, the group owner, or the mode.  These may
1442 1442           * change the access permissions of the file, so purge old
1443 1443           * information and start over again.
1444 1444           */
1445 1445          if (mask & (AT_UID | AT_GID | AT_MODE)) {
1446 1446                  (void) nfs_access_purge_rp(rp);
1447 1447                  if (rp->r_secattr != NULL) {
1448 1448                          mutex_enter(&rp->r_statelock);
1449 1449                          vsp = rp->r_secattr;
1450 1450                          rp->r_secattr = NULL;
1451 1451                          mutex_exit(&rp->r_statelock);
1452 1452                          if (vsp != NULL)
1453 1453                                  nfs_acl_free(vsp);
1454 1454                  }
1455 1455          }
1456 1456  
1457 1457          if (error) {
1458 1458                  PURGE_ATTRCACHE(vp);
1459 1459                  return (error);
1460 1460          }
1461 1461  
1462 1462          error = geterrno3(res.status);
1463 1463          if (!error) {
1464 1464                  /*
1465 1465                   * If changing the size of the file, invalidate
1466 1466                   * any local cached data which is no longer part
1467 1467                   * of the file.  We also possibly invalidate the
1468 1468                   * last page in the file.  We could use
1469 1469                   * pvn_vpzero(), but this would mark the page as
1470 1470                   * modified and require it to be written back to
1471 1471                   * the server for no particularly good reason.
1472 1472                   * This way, if we access it, then we bring it
1473 1473                   * back in.  A read should be cheaper than a
1474 1474                   * write.
1475 1475                   */
1476 1476                  if (mask & AT_SIZE) {
1477 1477                          nfs_invalidate_pages(vp,
1478 1478                              (vap->va_size & PAGEMASK), cr);
1479 1479                  }
1480 1480                  nfs3_cache_wcc_data(vp, &res.resok.obj_wcc, t, cr);
1481 1481                  /*
1482 1482                   * Some servers will change the mode to clear the setuid
1483 1483                   * and setgid bits when changing the uid or gid.  The
1484 1484                   * client needs to compensate appropriately.
1485 1485                   */
1486 1486                  if (mask & (AT_UID | AT_GID)) {
1487 1487                          int terror;
1488 1488  
1489 1489                          va.va_mask = AT_MODE;
1490 1490                          terror = nfs3getattr(vp, &va, cr);
1491 1491                          if (!terror &&
1492 1492                              (((mask & AT_MODE) && va.va_mode != vap->va_mode) ||
1493 1493                              (!(mask & AT_MODE) && va.va_mode != omode))) {
1494 1494                                  va.va_mask = AT_MODE;
1495 1495                                  if (mask & AT_MODE)
1496 1496                                          va.va_mode = vap->va_mode;
1497 1497                                  else
1498 1498                                          va.va_mode = omode;
1499 1499                                  (void) nfs3setattr(vp, &va, 0, cr);
1500 1500                          }
1501 1501                  }
1502 1502          } else {
1503 1503                  nfs3_cache_wcc_data(vp, &res.resfail.obj_wcc, t, cr);
1504 1504                  /*
1505 1505                   * If we got back a "not synchronized" error, then
1506 1506                   * we need to retry with a new guard value.  The
1507 1507                   * guard value used is the change time.  If the
1508 1508                   * server returned post_op_attr, then we can just
1509 1509                   * retry because we have the latest attributes.
1510 1510                   * Otherwise, we issue a GETATTR to get the latest
1511 1511                   * attributes and then retry.  If we couldn't get
1512 1512                   * the attributes this way either, then we give
1513 1513                   * up because we can't complete the operation as
1514 1514                   * required.
1515 1515                   */
1516 1516                  if (res.status == NFS3ERR_NOT_SYNC) {
1517 1517                          va.va_mask = AT_CTIME;
1518 1518                          if (nfs3getattr(vp, &va, cr) == 0)
1519 1519                                  goto tryagain;
1520 1520                  }
1521 1521                  PURGE_STALE_FH(error, vp, cr);
1522 1522          }
1523 1523  
1524 1524          return (error);
1525 1525  }
1526 1526  
1527 1527  static int
1528 1528  nfs3_accessx(void *vp, int mode, cred_t *cr)
1529 1529  {
1530 1530          ASSERT(nfs_zone() == VTOMI((vnode_t *)vp)->mi_zone);
1531 1531          return (nfs3_access(vp, mode, 0, cr, NULL));
1532 1532  }
1533 1533  
1534 1534  /* ARGSUSED */
1535 1535  static int
1536 1536  nfs3_access(vnode_t *vp, int mode, int flags, cred_t *cr, caller_context_t *ct)
1537 1537  {
1538 1538          int error;
1539 1539          ACCESS3args args;
1540 1540          ACCESS3res res;
1541 1541          int douprintf;
1542 1542          uint32 acc;
1543 1543          rnode_t *rp;
1544 1544          cred_t *cred, *ncr, *ncrfree = NULL;
1545 1545          failinfo_t fi;
1546 1546          nfs_access_type_t cacc;
1547 1547          hrtime_t t;
1548 1548  
1549 1549          acc = 0;
1550 1550          if (nfs_zone() != VTOMI(vp)->mi_zone)
1551 1551                  return (EIO);
1552 1552          if (mode & VREAD)
1553 1553                  acc |= ACCESS3_READ;
1554 1554          if (mode & VWRITE) {
1555 1555                  if (vn_is_readonly(vp) && !IS_DEVVP(vp))
1556 1556                          return (EROFS);
1557 1557                  if (vp->v_type == VDIR)
1558 1558                          acc |= ACCESS3_DELETE;
1559 1559                  acc |= ACCESS3_MODIFY | ACCESS3_EXTEND;
1560 1560          }
1561 1561          if (mode & VEXEC) {
1562 1562                  if (vp->v_type == VDIR)
1563 1563                          acc |= ACCESS3_LOOKUP;
1564 1564                  else
1565 1565                          acc |= ACCESS3_EXECUTE;
1566 1566          }
1567 1567  
1568 1568          rp = VTOR(vp);
1569 1569          args.object = *VTOFH3(vp);
1570 1570          if (vp->v_type == VDIR) {
1571 1571                  args.access = ACCESS3_READ | ACCESS3_DELETE | ACCESS3_MODIFY |
1572 1572                      ACCESS3_EXTEND | ACCESS3_LOOKUP;
1573 1573          } else {
1574 1574                  args.access = ACCESS3_READ | ACCESS3_MODIFY | ACCESS3_EXTEND |
1575 1575                      ACCESS3_EXECUTE;
1576 1576          }
1577 1577          fi.vp = vp;
1578 1578          fi.fhp = (caddr_t)&args.object;
1579 1579          fi.copyproc = nfs3copyfh;
1580 1580          fi.lookupproc = nfs3lookup;
1581 1581          fi.xattrdirproc = acl_getxattrdir3;
1582 1582  
1583 1583          cred = cr;
1584 1584          /*
1585 1585           * ncr and ncrfree both initially
1586 1586           * point to the memory area returned
1587 1587           * by crnetadjust();
1588 1588           * ncrfree not NULL when exiting means
1589 1589           * that we need to release it
1590 1590           */
1591 1591          ncr = crnetadjust(cred);
1592 1592          ncrfree = ncr;
1593 1593  tryagain:
1594 1594          if (rp->r_acache != NULL) {
1595 1595                  cacc = nfs_access_check(rp, acc, cred);
1596 1596                  if (cacc == NFS_ACCESS_ALLOWED) {
1597 1597                          if (ncrfree != NULL)
1598 1598                                  crfree(ncrfree);
1599 1599                          return (0);
1600 1600                  }
1601 1601                  if (cacc == NFS_ACCESS_DENIED) {
1602 1602                          /*
1603 1603                           * If the cred can be adjusted, try again
1604 1604                           * with the new cred.
1605 1605                           */
1606 1606                          if (ncr != NULL) {
1607 1607                                  cred = ncr;
1608 1608                                  ncr = NULL;
1609 1609                                  goto tryagain;
1610 1610                          }
1611 1611                          if (ncrfree != NULL)
1612 1612                                  crfree(ncrfree);
1613 1613                          return (EACCES);
1614 1614                  }
1615 1615          }
1616 1616  
1617 1617          douprintf = 1;
1618 1618  
1619 1619          t = gethrtime();
1620 1620  
1621 1621          error = rfs3call(VTOMI(vp), NFSPROC3_ACCESS,
1622 1622              xdr_ACCESS3args, (caddr_t)&args,
1623 1623              xdr_ACCESS3res, (caddr_t)&res, cred,
1624 1624              &douprintf, &res.status, 0, &fi);
1625 1625  
1626 1626          if (error) {
1627 1627                  if (ncrfree != NULL)
1628 1628                          crfree(ncrfree);
1629 1629                  return (error);
1630 1630          }
1631 1631  
1632 1632          error = geterrno3(res.status);
1633 1633          if (!error) {
1634 1634                  nfs3_cache_post_op_attr(vp, &res.resok.obj_attributes, t, cr);
1635 1635                  nfs_access_cache(rp, args.access, res.resok.access, cred);
1636 1636                  /*
1637 1637                   * we just cached results with cred; if cred is the
1638 1638                   * adjusted credentials from crnetadjust, we do not want
1639 1639                   * to release them before exiting: hence setting ncrfree
1640 1640                   * to NULL
1641 1641                   */
1642 1642                  if (cred != cr)
1643 1643                          ncrfree = NULL;
1644 1644                  if ((acc & res.resok.access) != acc) {
1645 1645                          /*
1646 1646                           * If the cred can be adjusted, try again
1647 1647                           * with the new cred.
1648 1648                           */
1649 1649                          if (ncr != NULL) {
1650 1650                                  cred = ncr;
1651 1651                                  ncr = NULL;
1652 1652                                  goto tryagain;
1653 1653                          }
1654 1654                          error = EACCES;
1655 1655                  }
1656 1656          } else {
1657 1657                  nfs3_cache_post_op_attr(vp, &res.resfail.obj_attributes, t, cr);
1658 1658                  PURGE_STALE_FH(error, vp, cr);
1659 1659          }
1660 1660  
1661 1661          if (ncrfree != NULL)
1662 1662                  crfree(ncrfree);
1663 1663  
1664 1664          return (error);
1665 1665  }
1666 1666  
1667 1667  static int nfs3_do_symlink_cache = 1;
1668 1668  
1669 1669  /* ARGSUSED */
1670 1670  static int
1671 1671  nfs3_readlink(vnode_t *vp, struct uio *uiop, cred_t *cr, caller_context_t *ct)
1672 1672  {
1673 1673          int error;
1674 1674          READLINK3args args;
1675 1675          READLINK3res res;
1676 1676          nfspath3 resdata_backup;
1677 1677          rnode_t *rp;
1678 1678          int douprintf;
1679 1679          int len;
1680 1680          failinfo_t fi;
1681 1681          hrtime_t t;
1682 1682  
1683 1683          /*
1684 1684           * Can't readlink anything other than a symbolic link.
1685 1685           */
1686 1686          if (vp->v_type != VLNK)
1687 1687                  return (EINVAL);
1688 1688          if (nfs_zone() != VTOMI(vp)->mi_zone)
1689 1689                  return (EIO);
1690 1690  
1691 1691          rp = VTOR(vp);
1692 1692          if (nfs3_do_symlink_cache && rp->r_symlink.contents != NULL) {
1693 1693                  error = nfs3_validate_caches(vp, cr);
1694 1694                  if (error)
1695 1695                          return (error);
1696 1696                  mutex_enter(&rp->r_statelock);
1697 1697                  if (rp->r_symlink.contents != NULL) {
1698 1698                          error = uiomove(rp->r_symlink.contents,
1699 1699                              rp->r_symlink.len, UIO_READ, uiop);
1700 1700                          mutex_exit(&rp->r_statelock);
1701 1701                          return (error);
1702 1702                  }
1703 1703                  mutex_exit(&rp->r_statelock);
1704 1704          }
1705 1705  
1706 1706          args.symlink = *VTOFH3(vp);
1707 1707          fi.vp = vp;
1708 1708          fi.fhp = (caddr_t)&args.symlink;
1709 1709          fi.copyproc = nfs3copyfh;
1710 1710          fi.lookupproc = nfs3lookup;
1711 1711          fi.xattrdirproc = acl_getxattrdir3;
1712 1712  
1713 1713          res.resok.data = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1714 1714  
1715 1715          resdata_backup = res.resok.data;
1716 1716  
1717 1717          douprintf = 1;
1718 1718  
1719 1719          t = gethrtime();
1720 1720  
1721 1721          error = rfs3call(VTOMI(vp), NFSPROC3_READLINK,
1722 1722              xdr_READLINK3args, (caddr_t)&args,
1723 1723              xdr_READLINK3res, (caddr_t)&res, cr,
1724 1724              &douprintf, &res.status, 0, &fi);
1725 1725  
1726 1726          if (res.resok.data == nfs3nametoolong)
1727 1727                  error = EINVAL;
1728 1728  
1729 1729          if (error) {
1730 1730                  kmem_free(resdata_backup, MAXPATHLEN);
1731 1731                  return (error);
1732 1732          }
1733 1733  
1734 1734          error = geterrno3(res.status);
1735 1735          if (!error) {
1736 1736                  nfs3_cache_post_op_attr(vp, &res.resok.symlink_attributes, t,
1737 1737                      cr);
1738 1738                  len = strlen(res.resok.data);
1739 1739                  error = uiomove(res.resok.data, len, UIO_READ, uiop);
1740 1740                  if (nfs3_do_symlink_cache && rp->r_symlink.contents == NULL) {
1741 1741                          mutex_enter(&rp->r_statelock);
1742 1742                                  if (rp->r_symlink.contents == NULL) {
1743 1743                                  rp->r_symlink.contents = res.resok.data;
1744 1744                                  rp->r_symlink.len = len;
1745 1745                                  rp->r_symlink.size = MAXPATHLEN;
1746 1746                                  mutex_exit(&rp->r_statelock);
1747 1747                          } else {
1748 1748                                  mutex_exit(&rp->r_statelock);
1749 1749  
1750 1750                                  kmem_free((void *)res.resok.data, MAXPATHLEN);
1751 1751                          }
1752 1752                  } else {
1753 1753                          kmem_free((void *)res.resok.data, MAXPATHLEN);
1754 1754                  }
1755 1755          } else {
1756 1756                  nfs3_cache_post_op_attr(vp,
1757 1757                      &res.resfail.symlink_attributes, t, cr);
1758 1758                  PURGE_STALE_FH(error, vp, cr);
1759 1759  
1760 1760                  kmem_free((void *)res.resok.data, MAXPATHLEN);
1761 1761  
1762 1762          }
1763 1763  
1764 1764          /*
1765 1765           * The over the wire error for attempting to readlink something
1766 1766           * other than a symbolic link is ENXIO.  However, we need to
1767 1767           * return EINVAL instead of ENXIO, so we map it here.
1768 1768           */
1769 1769          return (error == ENXIO ? EINVAL : error);
1770 1770  }
1771 1771  
1772 1772  /*
1773 1773   * Flush local dirty pages to stable storage on the server.
1774 1774   *
1775 1775   * If FNODSYNC is specified, then there is nothing to do because
1776 1776   * metadata changes are not cached on the client before being
1777 1777   * sent to the server.
1778 1778   */
1779 1779  /* ARGSUSED */
1780 1780  static int
1781 1781  nfs3_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
1782 1782  {
1783 1783          int error;
1784 1784  
1785 1785          if ((syncflag & FNODSYNC) || IS_SWAPVP(vp))
1786 1786                  return (0);
1787 1787          if (nfs_zone() != VTOMI(vp)->mi_zone)
1788 1788                  return (EIO);
1789 1789  
1790 1790          error = nfs3_putpage_commit(vp, (offset_t)0, 0, cr);
1791 1791          if (!error)
1792 1792                  error = VTOR(vp)->r_error;
1793 1793          return (error);
1794 1794  }
1795 1795  
1796 1796  /*
1797 1797   * Weirdness: if the file was removed or the target of a rename
1798 1798   * operation while it was open, it got renamed instead.  Here we
1799 1799   * remove the renamed file.
1800 1800   */
1801 1801  /* ARGSUSED */
1802 1802  static void
1803 1803  nfs3_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
1804 1804  {
1805 1805          rnode_t *rp;
1806 1806  
1807 1807          ASSERT(vp != DNLC_NO_VNODE);
1808 1808  
1809 1809          /*
1810 1810           * If this is coming from the wrong zone, we let someone in the right
1811 1811           * zone take care of it asynchronously.  We can get here due to
1812 1812           * VN_RELE() being called from pageout() or fsflush().  This call may
1813 1813           * potentially turn into an expensive no-op if, for instance, v_count
1814 1814           * gets incremented in the meantime, but it's still correct.
1815 1815           */
1816 1816          if (nfs_zone() != VTOMI(vp)->mi_zone) {
1817 1817                  nfs_async_inactive(vp, cr, nfs3_inactive);
1818 1818                  return;
1819 1819          }
1820 1820  
1821 1821          rp = VTOR(vp);
1822 1822  redo:
1823 1823          if (rp->r_unldvp != NULL) {
1824 1824                  /*
1825 1825                   * Save the vnode pointer for the directory where the
1826 1826                   * unlinked-open file got renamed, then set it to NULL
1827 1827                   * to prevent another thread from getting here before
1828 1828                   * we're done with the remove.  While we have the
1829 1829                   * statelock, make local copies of the pertinent rnode
1830 1830                   * fields.  If we weren't to do this in an atomic way, the
1831 1831                   * the unl* fields could become inconsistent with respect
1832 1832                   * to each other due to a race condition between this
1833 1833                   * code and nfs_remove().  See bug report 1034328.
1834 1834                   */
1835 1835                  mutex_enter(&rp->r_statelock);
1836 1836                  if (rp->r_unldvp != NULL) {
1837 1837                          vnode_t *unldvp;
1838 1838                          char *unlname;
1839 1839                          cred_t *unlcred;
1840 1840                          REMOVE3args args;
1841 1841                          REMOVE3res res;
1842 1842                          int douprintf;
1843 1843                          int error;
1844 1844                          hrtime_t t;
1845 1845  
1846 1846                          unldvp = rp->r_unldvp;
1847 1847                          rp->r_unldvp = NULL;
1848 1848                          unlname = rp->r_unlname;
1849 1849                          rp->r_unlname = NULL;
1850 1850                          unlcred = rp->r_unlcred;
1851 1851                          rp->r_unlcred = NULL;
1852 1852                          mutex_exit(&rp->r_statelock);
1853 1853  
1854 1854                          /*
1855 1855                           * If there are any dirty pages left, then flush
1856 1856                           * them.  This is unfortunate because they just
1857 1857                           * may get thrown away during the remove operation,
1858 1858                           * but we have to do this for correctness.
1859 1859                           */
1860 1860                          if (vn_has_cached_data(vp) &&
1861 1861                              ((rp->r_flags & RDIRTY) || rp->r_count > 0)) {
1862 1862                                  ASSERT(vp->v_type != VCHR);
1863 1863                                  error = nfs3_putpage(vp, (offset_t)0, 0, 0,
1864 1864                                      cr, ct);
1865 1865                                  if (error) {
1866 1866                                          mutex_enter(&rp->r_statelock);
1867 1867                                          if (!rp->r_error)
1868 1868                                                  rp->r_error = error;
1869 1869                                          mutex_exit(&rp->r_statelock);
1870 1870                                  }
1871 1871                          }
1872 1872  
1873 1873                          /*
1874 1874                           * Do the remove operation on the renamed file
1875 1875                           */
1876 1876                          setdiropargs3(&args.object, unlname, unldvp);
1877 1877  
1878 1878                          douprintf = 1;
1879 1879  
1880 1880                          t = gethrtime();
1881 1881  
1882 1882                          error = rfs3call(VTOMI(unldvp), NFSPROC3_REMOVE,
1883 1883                              xdr_diropargs3, (caddr_t)&args,
1884 1884                              xdr_REMOVE3res, (caddr_t)&res, unlcred,
1885 1885                              &douprintf, &res.status, 0, NULL);
1886 1886  
1887 1887                          if (error) {
1888 1888                                  PURGE_ATTRCACHE(unldvp);
1889 1889                          } else {
1890 1890                                  error = geterrno3(res.status);
1891 1891                                  if (!error) {
1892 1892                                          nfs3_cache_wcc_data(unldvp,
1893 1893                                              &res.resok.dir_wcc, t, cr);
1894 1894                                          if (HAVE_RDDIR_CACHE(VTOR(unldvp)))
1895 1895                                                  nfs_purge_rddir_cache(unldvp);
1896 1896                                  } else {
1897 1897                                          nfs3_cache_wcc_data(unldvp,
1898 1898                                              &res.resfail.dir_wcc, t, cr);
1899 1899                                          PURGE_STALE_FH(error, unldvp, cr);
1900 1900                                  }
1901 1901                          }
1902 1902  
1903 1903                          /*
1904 1904                           * Release stuff held for the remove
1905 1905                           */
1906 1906                          VN_RELE(unldvp);
1907 1907                          kmem_free(unlname, MAXNAMELEN);
1908 1908                          crfree(unlcred);
1909 1909                          goto redo;
1910 1910                  }
1911 1911                  mutex_exit(&rp->r_statelock);
1912 1912          }
1913 1913  
1914 1914          rp_addfree(rp, cr);
1915 1915  }
1916 1916  
1917 1917  /*
1918 1918   * Remote file system operations having to do with directory manipulation.
1919 1919   */
1920 1920  
1921 1921  /* ARGSUSED */
1922 1922  static int
1923 1923  nfs3_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
1924 1924          int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
1925 1925          int *direntflags, pathname_t *realpnp)
1926 1926  {
1927 1927          int error;
1928 1928          vnode_t *vp;
1929 1929          vnode_t *avp = NULL;
1930 1930          rnode_t *drp;
1931 1931  
1932 1932          if (nfs_zone() != VTOMI(dvp)->mi_zone)
1933 1933                  return (EPERM);
1934 1934  
1935 1935          drp = VTOR(dvp);
1936 1936  
1937 1937          /*
1938 1938           * Are we looking up extended attributes?  If so, "dvp" is
1939 1939           * the file or directory for which we want attributes, and
1940 1940           * we need a lookup of the hidden attribute directory
1941 1941           * before we lookup the rest of the path.
1942 1942           */
1943 1943          if (flags & LOOKUP_XATTR) {
1944 1944                  bool_t cflag = ((flags & CREATE_XATTR_DIR) != 0);
1945 1945                  mntinfo_t *mi;
1946 1946  
1947 1947                  mi = VTOMI(dvp);
1948 1948                  if (!(mi->mi_flags & MI_EXTATTR))
1949 1949                          return (EINVAL);
1950 1950  
1951 1951                  if (nfs_rw_enter_sig(&drp->r_rwlock, RW_READER, INTR(dvp)))
1952 1952                          return (EINTR);
1953 1953  
1954 1954                  (void) nfs3lookup_dnlc(dvp, XATTR_DIR_NAME, &avp, cr);
1955 1955                  if (avp == NULL)
1956 1956                          error = acl_getxattrdir3(dvp, &avp, cflag, cr, 0);
1957 1957                  else
1958 1958                          error = 0;
1959 1959  
1960 1960                  nfs_rw_exit(&drp->r_rwlock);
1961 1961  
1962 1962                  if (error) {
1963 1963                          if (mi->mi_flags & MI_EXTATTR)
1964 1964                                  return (error);
1965 1965                          return (EINVAL);
1966 1966                  }
1967 1967                  dvp = avp;
1968 1968                  drp = VTOR(dvp);
1969 1969          }
1970 1970  
1971 1971          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_READER, INTR(dvp))) {
1972 1972                  error = EINTR;
1973 1973                  goto out;
1974 1974          }
1975 1975  
1976 1976          error = nfs3lookup(dvp, nm, vpp, pnp, flags, rdir, cr, 0);
1977 1977  
1978 1978          nfs_rw_exit(&drp->r_rwlock);
1979 1979  
1980 1980          /*
1981 1981           * If vnode is a device, create special vnode.
1982 1982           */
1983 1983          if (!error && IS_DEVVP(*vpp)) {
1984 1984                  vp = *vpp;
1985 1985                  *vpp = specvp(vp, vp->v_rdev, vp->v_type, cr);
1986 1986                  VN_RELE(vp);
1987 1987          }
1988 1988  
1989 1989  out:
1990 1990          if (avp != NULL)
1991 1991                  VN_RELE(avp);
1992 1992  
1993 1993          return (error);
1994 1994  }
1995 1995  
1996 1996  static int nfs3_lookup_neg_cache = 1;
1997 1997  
1998 1998  #ifdef DEBUG
1999 1999  static int nfs3_lookup_dnlc_hits = 0;
2000 2000  static int nfs3_lookup_dnlc_misses = 0;
2001 2001  static int nfs3_lookup_dnlc_neg_hits = 0;
2002 2002  static int nfs3_lookup_dnlc_disappears = 0;
2003 2003  static int nfs3_lookup_dnlc_lookups = 0;
2004 2004  #endif
2005 2005  
2006 2006  /* ARGSUSED */
2007 2007  int
2008 2008  nfs3lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
2009 2009          int flags, vnode_t *rdir, cred_t *cr, int rfscall_flags)
2010 2010  {
2011 2011          int error;
2012 2012          rnode_t *drp;
2013 2013  
2014 2014          ASSERT(nfs_zone() == VTOMI(dvp)->mi_zone);
2015 2015          /*
2016 2016           * If lookup is for "", just return dvp.  Don't need
2017 2017           * to send it over the wire, look it up in the dnlc,
2018 2018           * or perform any access checks.
2019 2019           */
2020 2020          if (*nm == '\0') {
2021 2021                  VN_HOLD(dvp);
2022 2022                  *vpp = dvp;
2023 2023                  return (0);
2024 2024          }
2025 2025  
2026 2026          /*
2027 2027           * Can't do lookups in non-directories.
2028 2028           */
2029 2029          if (dvp->v_type != VDIR)
2030 2030                  return (ENOTDIR);
2031 2031  
2032 2032          /*
2033 2033           * If we're called with RFSCALL_SOFT, it's important that
2034 2034           * the only rfscall is one we make directly; if we permit
2035 2035           * an access call because we're looking up "." or validating
2036 2036           * a dnlc hit, we'll deadlock because that rfscall will not
2037 2037           * have the RFSCALL_SOFT set.
2038 2038           */
2039 2039          if (rfscall_flags & RFSCALL_SOFT)
2040 2040                  goto callit;
2041 2041  
2042 2042          /*
2043 2043           * If lookup is for ".", just return dvp.  Don't need
2044 2044           * to send it over the wire or look it up in the dnlc,
2045 2045           * just need to check access.
2046 2046           */
2047 2047          if (strcmp(nm, ".") == 0) {
2048 2048                  error = nfs3_access(dvp, VEXEC, 0, cr, NULL);
2049 2049                  if (error)
2050 2050                          return (error);
2051 2051                  VN_HOLD(dvp);
2052 2052                  *vpp = dvp;
2053 2053                  return (0);
2054 2054          }
2055 2055  
2056 2056          drp = VTOR(dvp);
2057 2057          if (!(drp->r_flags & RLOOKUP)) {
2058 2058                  mutex_enter(&drp->r_statelock);
2059 2059                  drp->r_flags |= RLOOKUP;
2060 2060                  mutex_exit(&drp->r_statelock);
2061 2061          }
2062 2062  
2063 2063          /*
2064 2064           * Lookup this name in the DNLC.  If there was a valid entry,
2065 2065           * then return the results of the lookup.
2066 2066           */
2067 2067          error = nfs3lookup_dnlc(dvp, nm, vpp, cr);
2068 2068          if (error || *vpp != NULL)
2069 2069                  return (error);
2070 2070  
2071 2071  callit:
2072 2072          error = nfs3lookup_otw(dvp, nm, vpp, cr, rfscall_flags);
2073 2073  
2074 2074          return (error);
2075 2075  }
2076 2076  
2077 2077  static int
2078 2078  nfs3lookup_dnlc(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr)
2079 2079  {
2080 2080          int error;
2081 2081          vnode_t *vp;
2082 2082  
2083 2083          ASSERT(*nm != '\0');
2084 2084          ASSERT(nfs_zone() == VTOMI(dvp)->mi_zone);
2085 2085          /*
2086 2086           * Lookup this name in the DNLC.  If successful, then validate
2087 2087           * the caches and then recheck the DNLC.  The DNLC is rechecked
2088 2088           * just in case this entry got invalidated during the call
2089 2089           * to nfs3_validate_caches.
2090 2090           *
2091 2091           * An assumption is being made that it is safe to say that a
2092 2092           * file exists which may not on the server.  Any operations to
2093 2093           * the server will fail with ESTALE.
2094 2094           */
2095 2095  #ifdef DEBUG
2096 2096          nfs3_lookup_dnlc_lookups++;
2097 2097  #endif
2098 2098          vp = dnlc_lookup(dvp, nm);
2099 2099          if (vp != NULL) {
2100 2100                  VN_RELE(vp);
2101 2101                  if (vp == DNLC_NO_VNODE && !vn_is_readonly(dvp)) {
2102 2102                          PURGE_ATTRCACHE(dvp);
2103 2103                  }
2104 2104                  error = nfs3_validate_caches(dvp, cr);
2105 2105                  if (error)
2106 2106                          return (error);
2107 2107                  vp = dnlc_lookup(dvp, nm);
2108 2108                  if (vp != NULL) {
2109 2109                          error = nfs3_access(dvp, VEXEC, 0, cr, NULL);
2110 2110                          if (error) {
2111 2111                                  VN_RELE(vp);
2112 2112                                  return (error);
2113 2113                          }
2114 2114                          if (vp == DNLC_NO_VNODE) {
2115 2115                                  VN_RELE(vp);
2116 2116  #ifdef DEBUG
2117 2117                                  nfs3_lookup_dnlc_neg_hits++;
2118 2118  #endif
2119 2119                                  return (ENOENT);
2120 2120                          }
2121 2121                          *vpp = vp;
2122 2122  #ifdef DEBUG
2123 2123                          nfs3_lookup_dnlc_hits++;
2124 2124  #endif
2125 2125                          return (0);
2126 2126                  }
2127 2127  #ifdef DEBUG
2128 2128                  nfs3_lookup_dnlc_disappears++;
2129 2129  #endif
2130 2130          }
2131 2131  #ifdef DEBUG
2132 2132          else
2133 2133                  nfs3_lookup_dnlc_misses++;
2134 2134  #endif
2135 2135  
2136 2136          *vpp = NULL;
2137 2137  
2138 2138          return (0);
2139 2139  }
2140 2140  
2141 2141  static int
2142 2142  nfs3lookup_otw(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr,
2143 2143          int rfscall_flags)
2144 2144  {
2145 2145          int error;
2146 2146          LOOKUP3args args;
2147 2147          LOOKUP3vres res;
2148 2148          int douprintf;
2149 2149          struct vattr vattr;
2150 2150          struct vattr dvattr;
2151 2151          vnode_t *vp;
2152 2152          failinfo_t fi;
2153 2153          hrtime_t t;
2154 2154  
2155 2155          ASSERT(*nm != '\0');
2156 2156          ASSERT(dvp->v_type == VDIR);
2157 2157          ASSERT(nfs_zone() == VTOMI(dvp)->mi_zone);
2158 2158  
2159 2159          setdiropargs3(&args.what, nm, dvp);
2160 2160  
2161 2161          fi.vp = dvp;
2162 2162          fi.fhp = (caddr_t)&args.what.dir;
2163 2163          fi.copyproc = nfs3copyfh;
2164 2164          fi.lookupproc = nfs3lookup;
2165 2165          fi.xattrdirproc = acl_getxattrdir3;
2166 2166          res.obj_attributes.fres.vp = dvp;
2167 2167          res.obj_attributes.fres.vap = &vattr;
2168 2168          res.dir_attributes.fres.vp = dvp;
2169 2169          res.dir_attributes.fres.vap = &dvattr;
2170 2170  
2171 2171          douprintf = 1;
2172 2172  
2173 2173          t = gethrtime();
2174 2174  
2175 2175          error = rfs3call(VTOMI(dvp), NFSPROC3_LOOKUP,
2176 2176              xdr_diropargs3, (caddr_t)&args,
2177 2177              xdr_LOOKUP3vres, (caddr_t)&res, cr,
2178 2178              &douprintf, &res.status, rfscall_flags, &fi);
2179 2179  
2180 2180          if (error)
2181 2181                  return (error);
2182 2182  
2183 2183          nfs3_cache_post_op_vattr(dvp, &res.dir_attributes, t, cr);
2184 2184  
2185 2185          error = geterrno3(res.status);
2186 2186          if (error) {
2187 2187                  PURGE_STALE_FH(error, dvp, cr);
2188 2188                  if (error == ENOENT && nfs3_lookup_neg_cache)
2189 2189                          dnlc_enter(dvp, nm, DNLC_NO_VNODE);
2190 2190                  return (error);
2191 2191          }
2192 2192  
2193 2193          if (res.obj_attributes.attributes) {
2194 2194                  vp = makenfs3node_va(&res.object, res.obj_attributes.fres.vap,
2195 2195                      dvp->v_vfsp, t, cr, VTOR(dvp)->r_path, nm);
2196 2196          } else {
2197 2197                  vp = makenfs3node_va(&res.object, NULL,
2198 2198                      dvp->v_vfsp, t, cr, VTOR(dvp)->r_path, nm);
2199 2199                  if (vp->v_type == VNON) {
2200 2200                          vattr.va_mask = AT_TYPE;
2201 2201                          error = nfs3getattr(vp, &vattr, cr);
2202 2202                          if (error) {
2203 2203                                  VN_RELE(vp);
2204 2204                                  return (error);
2205 2205                          }
2206 2206                          vp->v_type = vattr.va_type;
2207 2207                  }
2208 2208          }
2209 2209  
2210 2210          if (!(rfscall_flags & RFSCALL_SOFT))
2211 2211                  dnlc_update(dvp, nm, vp);
2212 2212  
2213 2213          *vpp = vp;
2214 2214  
2215 2215          return (error);
2216 2216  }
2217 2217  
2218 2218  #ifdef DEBUG
2219 2219  static int nfs3_create_misses = 0;
2220 2220  #endif
2221 2221  
2222 2222  /* ARGSUSED */
2223 2223  static int
2224 2224  nfs3_create(vnode_t *dvp, char *nm, struct vattr *va, enum vcexcl exclusive,
2225 2225          int mode, vnode_t **vpp, cred_t *cr, int lfaware, caller_context_t *ct,
2226 2226          vsecattr_t *vsecp)
2227 2227  {
2228 2228          int error;
2229 2229          vnode_t *vp;
2230 2230          rnode_t *rp;
2231 2231          struct vattr vattr;
2232 2232          rnode_t *drp;
2233 2233          vnode_t *tempvp;
2234 2234  
2235 2235          drp = VTOR(dvp);
2236 2236          if (nfs_zone() != VTOMI(dvp)->mi_zone)
2237 2237                  return (EPERM);
2238 2238          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
2239 2239                  return (EINTR);
2240 2240  
2241 2241  top:
2242 2242          /*
2243 2243           * We make a copy of the attributes because the caller does not
2244 2244           * expect us to change what va points to.
2245 2245           */
2246 2246          vattr = *va;
2247 2247  
2248 2248          /*
2249 2249           * If the pathname is "", just use dvp.  Don't need
2250 2250           * to send it over the wire, look it up in the dnlc,
2251 2251           * or perform any access checks.
2252 2252           */
2253 2253          if (*nm == '\0') {
2254 2254                  error = 0;
2255 2255                  VN_HOLD(dvp);
2256 2256                  vp = dvp;
2257 2257          /*
2258 2258           * If the pathname is ".", just use dvp.  Don't need
2259 2259           * to send it over the wire or look it up in the dnlc,
2260 2260           * just need to check access.
2261 2261           */
2262 2262          } else if (strcmp(nm, ".") == 0) {
2263 2263                  error = nfs3_access(dvp, VEXEC, 0, cr, ct);
2264 2264                  if (error) {
2265 2265                          nfs_rw_exit(&drp->r_rwlock);
2266 2266                          return (error);
2267 2267                  }
2268 2268                  VN_HOLD(dvp);
2269 2269                  vp = dvp;
2270 2270          /*
2271 2271           * We need to go over the wire, just to be sure whether the
2272 2272           * file exists or not.  Using the DNLC can be dangerous in
2273 2273           * this case when making a decision regarding existence.
2274 2274           */
2275 2275          } else {
2276 2276                  error = nfs3lookup_otw(dvp, nm, &vp, cr, 0);
2277 2277          }
2278 2278          if (!error) {
2279 2279                  if (exclusive == EXCL)
2280 2280                          error = EEXIST;
2281 2281                  else if (vp->v_type == VDIR && (mode & VWRITE))
2282 2282                          error = EISDIR;
2283 2283                  else {
2284 2284                          /*
2285 2285                           * If vnode is a device, create special vnode.
2286 2286                           */
2287 2287                          if (IS_DEVVP(vp)) {
2288 2288                                  tempvp = vp;
2289 2289                                  vp = specvp(vp, vp->v_rdev, vp->v_type, cr);
2290 2290                                  VN_RELE(tempvp);
2291 2291                          }
2292 2292                          if (!(error = VOP_ACCESS(vp, mode, 0, cr, ct))) {
2293 2293                                  if ((vattr.va_mask & AT_SIZE) &&
2294 2294                                      vp->v_type == VREG) {
2295 2295                                          rp = VTOR(vp);
2296 2296                                          /*
2297 2297                                           * Check here for large file handled
2298 2298                                           * by LF-unaware process (as
2299 2299                                           * ufs_create() does)
2300 2300                                           */
2301 2301                                          if (!(lfaware & FOFFMAX)) {
2302 2302                                                  mutex_enter(&rp->r_statelock);
2303 2303                                                  if (rp->r_size > MAXOFF32_T)
2304 2304                                                          error = EOVERFLOW;
2305 2305                                                  mutex_exit(&rp->r_statelock);
2306 2306                                          }
2307 2307                                          if (!error) {
2308 2308                                                  vattr.va_mask = AT_SIZE;
2309 2309                                                  error = nfs3setattr(vp,
2310 2310                                                      &vattr, 0, cr);
2311 2311  
2312 2312                                                  /*
2313 2313                                                   * Existing file was truncated;
2314 2314                                                   * emit a create event.
2315 2315                                                   */
2316 2316                                                  vnevent_create(vp, ct);
2317 2317                                          }
2318 2318                                  }
2319 2319                          }
2320 2320                  }
2321 2321                  nfs_rw_exit(&drp->r_rwlock);
2322 2322                  if (error) {
2323 2323                          VN_RELE(vp);
2324 2324                  } else {
2325 2325                          *vpp = vp;
2326 2326                  }
2327 2327  
2328 2328                  return (error);
2329 2329          }
2330 2330  
2331 2331          dnlc_remove(dvp, nm);
2332 2332  
2333 2333          /*
2334 2334           * Decide what the group-id of the created file should be.
2335 2335           * Set it in attribute list as advisory...
2336 2336           */
2337 2337          error = setdirgid(dvp, &vattr.va_gid, cr);
2338 2338          if (error) {
2339 2339                  nfs_rw_exit(&drp->r_rwlock);
2340 2340                  return (error);
2341 2341          }
2342 2342          vattr.va_mask |= AT_GID;
2343 2343  
2344 2344          ASSERT(vattr.va_mask & AT_TYPE);
2345 2345          if (vattr.va_type == VREG) {
2346 2346                  ASSERT(vattr.va_mask & AT_MODE);
2347 2347                  if (MANDMODE(vattr.va_mode)) {
2348 2348                          nfs_rw_exit(&drp->r_rwlock);
2349 2349                          return (EACCES);
2350 2350                  }
2351 2351                  error = nfs3create(dvp, nm, &vattr, exclusive, mode, vpp, cr,
2352 2352                      lfaware);
2353 2353                  /*
2354 2354                   * If this is not an exclusive create, then the CREATE
2355 2355                   * request will be made with the GUARDED mode set.  This
2356 2356                   * means that the server will return EEXIST if the file
2357 2357                   * exists.  The file could exist because of a retransmitted
2358 2358                   * request.  In this case, we recover by starting over and
2359 2359                   * checking to see whether the file exists.  This second
2360 2360                   * time through it should and a CREATE request will not be
2361 2361                   * sent.
2362 2362                   *
2363 2363                   * This handles the problem of a dangling CREATE request
2364 2364                   * which contains attributes which indicate that the file
2365 2365                   * should be truncated.  This retransmitted request could
2366 2366                   * possibly truncate valid data in the file if not caught
2367 2367                   * by the duplicate request mechanism on the server or if
2368 2368                   * not caught by other means.  The scenario is:
2369 2369                   *
2370 2370                   * Client transmits CREATE request with size = 0
2371 2371                   * Client times out, retransmits request.
2372 2372                   * Response to the first request arrives from the server
2373 2373                   *  and the client proceeds on.
2374 2374                   * Client writes data to the file.
2375 2375                   * The server now processes retransmitted CREATE request
2376 2376                   *  and truncates file.
2377 2377                   *
2378 2378                   * The use of the GUARDED CREATE request prevents this from
2379 2379                   * happening because the retransmitted CREATE would fail
2380 2380                   * with EEXIST and would not truncate the file.
2381 2381                   */
2382 2382                  if (error == EEXIST && exclusive == NONEXCL) {
2383 2383  #ifdef DEBUG
2384 2384                          nfs3_create_misses++;
2385 2385  #endif
2386 2386                          goto top;
2387 2387                  }
2388 2388                  nfs_rw_exit(&drp->r_rwlock);
2389 2389                  return (error);
2390 2390          }
2391 2391          error = nfs3mknod(dvp, nm, &vattr, exclusive, mode, vpp, cr);
2392 2392          nfs_rw_exit(&drp->r_rwlock);
2393 2393          return (error);
2394 2394  }
2395 2395  
2396 2396  /* ARGSUSED */
2397 2397  static int
2398 2398  nfs3create(vnode_t *dvp, char *nm, struct vattr *va, enum vcexcl exclusive,
2399 2399          int mode, vnode_t **vpp, cred_t *cr, int lfaware)
2400 2400  {
2401 2401          int error;
2402 2402          CREATE3args args;
2403 2403          CREATE3res res;
2404 2404          int douprintf;
2405 2405          vnode_t *vp;
2406 2406          struct vattr vattr;
2407 2407          nfstime3 *verfp;
2408 2408          rnode_t *rp;
2409 2409          timestruc_t now;
2410 2410          hrtime_t t;
2411 2411  
2412 2412          ASSERT(nfs_zone() == VTOMI(dvp)->mi_zone);
2413 2413          setdiropargs3(&args.where, nm, dvp);
2414 2414          if (exclusive == EXCL) {
2415 2415                  args.how.mode = EXCLUSIVE;
2416 2416                  /*
2417 2417                   * Construct the create verifier.  This verifier needs
2418 2418                   * to be unique between different clients.  It also needs
2419 2419                   * to vary for each exclusive create request generated
2420 2420                   * from the client to the server.
2421 2421                   *
2422 2422                   * The first attempt is made to use the hostid and a
2423 2423                   * unique number on the client.  If the hostid has not
2424 2424                   * been set, the high resolution time that the exclusive
2425 2425                   * create request is being made is used.  This will work
2426 2426                   * unless two different clients, both with the hostid
2427 2427                   * not set, attempt an exclusive create request on the
2428 2428                   * same file, at exactly the same clock time.  The
2429 2429                   * chances of this happening seem small enough to be
2430 2430                   * reasonable.
2431 2431                   */
2432 2432                  verfp = (nfstime3 *)&args.how.createhow3_u.verf;
2433 2433                  verfp->seconds = zone_get_hostid(NULL);
2434 2434                  if (verfp->seconds != 0)
2435 2435                          verfp->nseconds = newnum();
2436 2436                  else {
2437 2437                          gethrestime(&now);
2438 2438                          verfp->seconds = now.tv_sec;
2439 2439                          verfp->nseconds = now.tv_nsec;
2440 2440                  }
2441 2441                  /*
2442 2442                   * Since the server will use this value for the mtime,
2443 2443                   * make sure that it can't overflow. Zero out the MSB.
2444 2444                   * The actual value does not matter here, only its uniqeness.
2445 2445                   */
2446 2446                  verfp->seconds %= INT32_MAX;
2447 2447          } else {
2448 2448                  /*
2449 2449                   * Issue the non-exclusive create in guarded mode.  This
2450 2450                   * may result in some false EEXIST responses for
2451 2451                   * retransmitted requests, but these will be handled at
2452 2452                   * a higher level.  By using GUARDED, duplicate requests
2453 2453                   * to do file truncation and possible access problems
2454 2454                   * can be avoided.
2455 2455                   */
2456 2456                  args.how.mode = GUARDED;
2457 2457                  error = vattr_to_sattr3(va,
2458 2458                      &args.how.createhow3_u.obj_attributes);
2459 2459                  if (error) {
2460 2460                          /* req time field(s) overflow - return immediately */
2461 2461                          return (error);
2462 2462                  }
2463 2463          }
2464 2464  
2465 2465          douprintf = 1;
2466 2466  
2467 2467          t = gethrtime();
2468 2468  
2469 2469          error = rfs3call(VTOMI(dvp), NFSPROC3_CREATE,
2470 2470              xdr_CREATE3args, (caddr_t)&args,
2471 2471              xdr_CREATE3res, (caddr_t)&res, cr,
2472 2472              &douprintf, &res.status, 0, NULL);
2473 2473  
2474 2474          if (error) {
2475 2475                  PURGE_ATTRCACHE(dvp);
2476 2476                  return (error);
2477 2477          }
2478 2478  
2479 2479          error = geterrno3(res.status);
2480 2480          if (!error) {
2481 2481                  nfs3_cache_wcc_data(dvp, &res.resok.dir_wcc, t, cr);
2482 2482                  if (HAVE_RDDIR_CACHE(VTOR(dvp)))
2483 2483                          nfs_purge_rddir_cache(dvp);
2484 2484  
2485 2485                  /*
2486 2486                   * On exclusive create the times need to be explicitly
2487 2487                   * set to clear any potential verifier that may be stored
2488 2488                   * in one of these fields (see comment below).  This
2489 2489                   * is done here to cover the case where no post op attrs
2490 2490                   * were returned or a 'invalid' time was returned in
2491 2491                   * the attributes.
2492 2492                   */
2493 2493                  if (exclusive == EXCL)
2494 2494                          va->va_mask |= (AT_MTIME | AT_ATIME);
2495 2495  
2496 2496                  if (!res.resok.obj.handle_follows) {
2497 2497                          error = nfs3lookup(dvp, nm, &vp, NULL, 0, NULL, cr, 0);
2498 2498                          if (error)
2499 2499                                  return (error);
2500 2500                  } else {
2501 2501                          if (res.resok.obj_attributes.attributes) {
2502 2502                                  vp = makenfs3node(&res.resok.obj.handle,
2503 2503                                      &res.resok.obj_attributes.attr,
2504 2504                                      dvp->v_vfsp, t, cr, NULL, NULL);
2505 2505                          } else {
2506 2506                                  vp = makenfs3node(&res.resok.obj.handle, NULL,
2507 2507                                      dvp->v_vfsp, t, cr, NULL, NULL);
2508 2508  
2509 2509                                  /*
2510 2510                                   * On an exclusive create, it is possible
2511 2511                                   * that attributes were returned but those
2512 2512                                   * postop attributes failed to decode
2513 2513                                   * properly.  If this is the case,
2514 2514                                   * then most likely the atime or mtime
2515 2515                                   * were invalid for our client; this
2516 2516                                   * is caused by the server storing the
2517 2517                                   * create verifier in one of the time
2518 2518                                   * fields(most likely mtime).
2519 2519                                   * So... we are going to setattr just the
2520 2520                                   * atime/mtime to clear things up.
2521 2521                                   */
2522 2522                                  if (exclusive == EXCL) {
2523 2523                                          if (error =
2524 2524                                              nfs3excl_create_settimes(vp,
2525 2525                                              va, cr)) {
2526 2526                                                  /*
2527 2527                                                   * Setting the times failed.
2528 2528                                                   * Remove the file and return
2529 2529                                                   * the error.
2530 2530                                                   */
2531 2531                                                  VN_RELE(vp);
2532 2532                                                  (void) nfs3_remove(dvp,
2533 2533                                                      nm, cr, NULL, 0);
2534 2534                                                  return (error);
2535 2535                                          }
2536 2536                                  }
2537 2537  
2538 2538                                  /*
2539 2539                                   * This handles the non-exclusive case
2540 2540                                   * and the exclusive case where no post op
2541 2541                                   * attrs were returned.
2542 2542                                   */
2543 2543                                  if (vp->v_type == VNON) {
2544 2544                                          vattr.va_mask = AT_TYPE;
2545 2545                                          error = nfs3getattr(vp, &vattr, cr);
2546 2546                                          if (error) {
2547 2547                                                  VN_RELE(vp);
2548 2548                                                  return (error);
2549 2549                                          }
2550 2550                                          vp->v_type = vattr.va_type;
2551 2551                                  }
2552 2552                          }
2553 2553                          dnlc_update(dvp, nm, vp);
2554 2554                  }
2555 2555  
2556 2556                  rp = VTOR(vp);
2557 2557  
2558 2558                  /*
2559 2559                   * Check here for large file handled by
2560 2560                   * LF-unaware process (as ufs_create() does)
2561 2561                   */
2562 2562                  if ((va->va_mask & AT_SIZE) && vp->v_type == VREG &&
2563 2563                      !(lfaware & FOFFMAX)) {
2564 2564                          mutex_enter(&rp->r_statelock);
2565 2565                          if (rp->r_size > MAXOFF32_T) {
2566 2566                                  mutex_exit(&rp->r_statelock);
2567 2567                                  VN_RELE(vp);
2568 2568                                  return (EOVERFLOW);
2569 2569                          }
2570 2570                          mutex_exit(&rp->r_statelock);
2571 2571                  }
2572 2572  
2573 2573                  if (exclusive == EXCL &&
2574 2574                      (va->va_mask & ~(AT_GID | AT_SIZE))) {
2575 2575                          /*
2576 2576                           * If doing an exclusive create, then generate
2577 2577                           * a SETATTR to set the initial attributes.
2578 2578                           * Try to set the mtime and the atime to the
2579 2579                           * server's current time.  It is somewhat
2580 2580                           * expected that these fields will be used to
2581 2581                           * store the exclusive create cookie.  If not,
2582 2582                           * server implementors will need to know that
2583 2583                           * a SETATTR will follow an exclusive create
2584 2584                           * and the cookie should be destroyed if
2585 2585                           * appropriate. This work may have been done
2586 2586                           * earlier in this function if post op attrs
2587 2587                           * were not available.
2588 2588                           *
2589 2589                           * The AT_GID and AT_SIZE bits are turned off
2590 2590                           * so that the SETATTR request will not attempt
2591 2591                           * to process these.  The gid will be set
2592 2592                           * separately if appropriate.  The size is turned
2593 2593                           * off because it is assumed that a new file will
2594 2594                           * be created empty and if the file wasn't empty,
2595 2595                           * then the exclusive create will have failed
2596 2596                           * because the file must have existed already.
2597 2597                           * Therefore, no truncate operation is needed.
2598 2598                           */
2599 2599                          va->va_mask &= ~(AT_GID | AT_SIZE);
2600 2600                          error = nfs3setattr(vp, va, 0, cr);
2601 2601                          if (error) {
2602 2602                                  /*
2603 2603                                   * Couldn't correct the attributes of
2604 2604                                   * the newly created file and the
2605 2605                                   * attributes are wrong.  Remove the
2606 2606                                   * file and return an error to the
2607 2607                                   * application.
2608 2608                                   */
2609 2609                                  VN_RELE(vp);
2610 2610                                  (void) nfs3_remove(dvp, nm, cr, NULL, 0);
2611 2611                                  return (error);
2612 2612                          }
2613 2613                  }
2614 2614  
2615 2615                  if (va->va_gid != rp->r_attr.va_gid) {
2616 2616                          /*
2617 2617                           * If the gid on the file isn't right, then
2618 2618                           * generate a SETATTR to attempt to change
2619 2619                           * it.  This may or may not work, depending
2620 2620                           * upon the server's semantics for allowing
2621 2621                           * file ownership changes.
2622 2622                           */
2623 2623                          va->va_mask = AT_GID;
2624 2624                          (void) nfs3setattr(vp, va, 0, cr);
2625 2625                  }
2626 2626  
2627 2627                  /*
2628 2628                   * If vnode is a device create special vnode
2629 2629                   */
2630 2630                  if (IS_DEVVP(vp)) {
2631 2631                          *vpp = specvp(vp, vp->v_rdev, vp->v_type, cr);
2632 2632                          VN_RELE(vp);
2633 2633                  } else
2634 2634                          *vpp = vp;
2635 2635          } else {
2636 2636                  nfs3_cache_wcc_data(dvp, &res.resfail.dir_wcc, t, cr);
2637 2637                  PURGE_STALE_FH(error, dvp, cr);
2638 2638          }
2639 2639  
2640 2640          return (error);
2641 2641  }
2642 2642  
2643 2643  /*
2644 2644   * Special setattr function to take care of rest of atime/mtime
2645 2645   * after successful exclusive create.  This function exists to avoid
2646 2646   * handling attributes from the server; exclusive the atime/mtime fields
2647 2647   * may be 'invalid' in client's view and therefore can not be trusted.
2648 2648   */
2649 2649  static int
2650 2650  nfs3excl_create_settimes(vnode_t *vp, struct vattr *vap, cred_t *cr)
2651 2651  {
2652 2652          int error;
2653 2653          uint_t mask;
2654 2654          SETATTR3args args;
2655 2655          SETATTR3res res;
2656 2656          int douprintf;
2657 2657          rnode_t *rp;
2658 2658          hrtime_t t;
2659 2659  
2660 2660          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
2661 2661          /* save the caller's mask so that it can be reset later */
2662 2662          mask = vap->va_mask;
2663 2663  
2664 2664          rp = VTOR(vp);
2665 2665  
2666 2666          args.object = *RTOFH3(rp);
2667 2667          args.guard.check = FALSE;
2668 2668  
2669 2669          /* Use the mask to initialize the arguments */
2670 2670          vap->va_mask = 0;
2671 2671          error = vattr_to_sattr3(vap, &args.new_attributes);
2672 2672  
2673 2673          /* We want to set just atime/mtime on this request */
2674 2674          args.new_attributes.atime.set_it = SET_TO_SERVER_TIME;
2675 2675          args.new_attributes.mtime.set_it = SET_TO_SERVER_TIME;
2676 2676  
2677 2677          douprintf = 1;
2678 2678  
2679 2679          t = gethrtime();
2680 2680  
2681 2681          error = rfs3call(VTOMI(vp), NFSPROC3_SETATTR,
2682 2682              xdr_SETATTR3args, (caddr_t)&args,
2683 2683              xdr_SETATTR3res, (caddr_t)&res, cr,
2684 2684              &douprintf, &res.status, 0, NULL);
2685 2685  
2686 2686          if (error) {
2687 2687                  vap->va_mask = mask;
2688 2688                  return (error);
2689 2689          }
2690 2690  
2691 2691          error = geterrno3(res.status);
2692 2692          if (!error) {
2693 2693                  /*
2694 2694                   * It is important to pick up the attributes.
2695 2695                   * Since this is the exclusive create path, the
2696 2696                   * attributes on the initial create were ignored
2697 2697                   * and we need these to have the correct info.
2698 2698                   */
2699 2699                  nfs3_cache_wcc_data(vp, &res.resok.obj_wcc, t, cr);
2700 2700                  /*
2701 2701                   * No need to do the atime/mtime work again so clear
2702 2702                   * the bits.
2703 2703                   */
2704 2704                  mask &= ~(AT_ATIME | AT_MTIME);
2705 2705          } else {
2706 2706                  nfs3_cache_wcc_data(vp, &res.resfail.obj_wcc, t, cr);
2707 2707          }
2708 2708  
2709 2709          vap->va_mask = mask;
2710 2710  
2711 2711          return (error);
2712 2712  }
2713 2713  
2714 2714  /* ARGSUSED */
2715 2715  static int
2716 2716  nfs3mknod(vnode_t *dvp, char *nm, struct vattr *va, enum vcexcl exclusive,
2717 2717          int mode, vnode_t **vpp, cred_t *cr)
2718 2718  {
2719 2719          int error;
2720 2720          MKNOD3args args;
2721 2721          MKNOD3res res;
2722 2722          int douprintf;
2723 2723          vnode_t *vp;
2724 2724          struct vattr vattr;
2725 2725          hrtime_t t;
2726 2726  
2727 2727          ASSERT(nfs_zone() == VTOMI(dvp)->mi_zone);
2728 2728          switch (va->va_type) {
2729 2729          case VCHR:
2730 2730          case VBLK:
2731 2731                  setdiropargs3(&args.where, nm, dvp);
2732 2732                  args.what.type = (va->va_type == VCHR) ? NF3CHR : NF3BLK;
2733 2733                  error = vattr_to_sattr3(va,
2734 2734                      &args.what.mknoddata3_u.device.dev_attributes);
2735 2735                  if (error) {
2736 2736                          /* req time field(s) overflow - return immediately */
2737 2737                          return (error);
2738 2738                  }
2739 2739                  args.what.mknoddata3_u.device.spec.specdata1 =
2740 2740                      getmajor(va->va_rdev);
2741 2741                  args.what.mknoddata3_u.device.spec.specdata2 =
2742 2742                      getminor(va->va_rdev);
2743 2743                  break;
2744 2744  
2745 2745          case VFIFO:
2746 2746          case VSOCK:
2747 2747                  setdiropargs3(&args.where, nm, dvp);
2748 2748                  args.what.type = (va->va_type == VFIFO) ? NF3FIFO : NF3SOCK;
2749 2749                  error = vattr_to_sattr3(va,
2750 2750                      &args.what.mknoddata3_u.pipe_attributes);
2751 2751                  if (error) {
2752 2752                          /* req time field(s) overflow - return immediately */
2753 2753                          return (error);
2754 2754                  }
2755 2755                  break;
2756 2756  
2757 2757          default:
2758 2758                  return (EINVAL);
2759 2759          }
2760 2760  
2761 2761          douprintf = 1;
2762 2762  
2763 2763          t = gethrtime();
2764 2764  
2765 2765          error = rfs3call(VTOMI(dvp), NFSPROC3_MKNOD,
2766 2766              xdr_MKNOD3args, (caddr_t)&args,
2767 2767              xdr_MKNOD3res, (caddr_t)&res, cr,
2768 2768              &douprintf, &res.status, 0, NULL);
2769 2769  
2770 2770          if (error) {
2771 2771                  PURGE_ATTRCACHE(dvp);
2772 2772                  return (error);
2773 2773          }
2774 2774  
2775 2775          error = geterrno3(res.status);
2776 2776          if (!error) {
2777 2777                  nfs3_cache_wcc_data(dvp, &res.resok.dir_wcc, t, cr);
2778 2778                  if (HAVE_RDDIR_CACHE(VTOR(dvp)))
2779 2779                          nfs_purge_rddir_cache(dvp);
2780 2780  
2781 2781                  if (!res.resok.obj.handle_follows) {
2782 2782                          error = nfs3lookup(dvp, nm, &vp, NULL, 0, NULL, cr, 0);
2783 2783                          if (error)
2784 2784                                  return (error);
2785 2785                  } else {
2786 2786                          if (res.resok.obj_attributes.attributes) {
2787 2787                                  vp = makenfs3node(&res.resok.obj.handle,
2788 2788                                      &res.resok.obj_attributes.attr,
2789 2789                                      dvp->v_vfsp, t, cr, NULL, NULL);
2790 2790                          } else {
2791 2791                                  vp = makenfs3node(&res.resok.obj.handle, NULL,
2792 2792                                      dvp->v_vfsp, t, cr, NULL, NULL);
2793 2793                                  if (vp->v_type == VNON) {
2794 2794                                          vattr.va_mask = AT_TYPE;
2795 2795                                          error = nfs3getattr(vp, &vattr, cr);
2796 2796                                          if (error) {
2797 2797                                                  VN_RELE(vp);
2798 2798                                                  return (error);
2799 2799                                          }
2800 2800                                          vp->v_type = vattr.va_type;
2801 2801                                  }
2802 2802  
2803 2803                          }
2804 2804                          dnlc_update(dvp, nm, vp);
2805 2805                  }
2806 2806  
2807 2807                  if (va->va_gid != VTOR(vp)->r_attr.va_gid) {
2808 2808                          va->va_mask = AT_GID;
2809 2809                          (void) nfs3setattr(vp, va, 0, cr);
2810 2810                  }
2811 2811  
2812 2812                  /*
2813 2813                   * If vnode is a device create special vnode
2814 2814                   */
2815 2815                  if (IS_DEVVP(vp)) {
2816 2816                          *vpp = specvp(vp, vp->v_rdev, vp->v_type, cr);
2817 2817                          VN_RELE(vp);
2818 2818                  } else
2819 2819                          *vpp = vp;
2820 2820          } else {
2821 2821                  nfs3_cache_wcc_data(dvp, &res.resfail.dir_wcc, t, cr);
2822 2822                  PURGE_STALE_FH(error, dvp, cr);
2823 2823          }
2824 2824          return (error);
2825 2825  }
2826 2826  
2827 2827  /*
2828 2828   * Weirdness: if the vnode to be removed is open
2829 2829   * we rename it instead of removing it and nfs_inactive
2830 2830   * will remove the new name.
2831 2831   */
2832 2832  /* ARGSUSED */
2833 2833  static int
2834 2834  nfs3_remove(vnode_t *dvp, char *nm, cred_t *cr, caller_context_t *ct, int flags)
2835 2835  {
2836 2836          int error;
2837 2837          REMOVE3args args;
2838 2838          REMOVE3res res;
2839 2839          vnode_t *vp;
2840 2840          char *tmpname;
2841 2841          int douprintf;
2842 2842          rnode_t *rp;
2843 2843          rnode_t *drp;
2844 2844          hrtime_t t;
2845 2845  
2846 2846          if (nfs_zone() != VTOMI(dvp)->mi_zone)
2847 2847                  return (EPERM);
2848 2848          drp = VTOR(dvp);
2849 2849          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
2850 2850                  return (EINTR);
2851 2851  
2852 2852          error = nfs3lookup(dvp, nm, &vp, NULL, 0, NULL, cr, 0);
2853 2853          if (error) {
2854 2854                  nfs_rw_exit(&drp->r_rwlock);
2855 2855                  return (error);
2856 2856          }
2857 2857  
2858 2858          if (vp->v_type == VDIR && secpolicy_fs_linkdir(cr, dvp->v_vfsp)) {
2859 2859                  VN_RELE(vp);
2860 2860                  nfs_rw_exit(&drp->r_rwlock);
2861 2861                  return (EPERM);
2862 2862          }
2863 2863  
2864 2864          /*
2865 2865           * First just remove the entry from the name cache, as it
2866 2866           * is most likely the only entry for this vp.
2867 2867           */
2868 2868          dnlc_remove(dvp, nm);
2869 2869  
2870 2870          /*
2871 2871           * If the file has a v_count > 1 then there may be more than one
2872 2872           * entry in the name cache due multiple links or an open file,
2873 2873           * but we don't have the real reference count so flush all
2874 2874           * possible entries.
2875 2875           */
2876 2876          if (vp->v_count > 1)
2877 2877                  dnlc_purge_vp(vp);
2878 2878  
2879 2879          /*
2880 2880           * Now we have the real reference count on the vnode
2881 2881           */
2882 2882          rp = VTOR(vp);
2883 2883          mutex_enter(&rp->r_statelock);
2884 2884          if (vp->v_count > 1 &&
2885 2885              (rp->r_unldvp == NULL || strcmp(nm, rp->r_unlname) == 0)) {
2886 2886                  mutex_exit(&rp->r_statelock);
2887 2887                  tmpname = newname();
2888 2888                  error = nfs3rename(dvp, nm, dvp, tmpname, cr, ct);
2889 2889                  if (error)
2890 2890                          kmem_free(tmpname, MAXNAMELEN);
2891 2891                  else {
2892 2892                          mutex_enter(&rp->r_statelock);
2893 2893                          if (rp->r_unldvp == NULL) {
2894 2894                                  VN_HOLD(dvp);
2895 2895                                  rp->r_unldvp = dvp;
2896 2896                                  if (rp->r_unlcred != NULL)
2897 2897                                          crfree(rp->r_unlcred);
2898 2898                                  crhold(cr);
2899 2899                                  rp->r_unlcred = cr;
2900 2900                                  rp->r_unlname = tmpname;
2901 2901                          } else {
2902 2902                                  kmem_free(rp->r_unlname, MAXNAMELEN);
2903 2903                                  rp->r_unlname = tmpname;
2904 2904                          }
2905 2905                          mutex_exit(&rp->r_statelock);
2906 2906                  }
2907 2907          } else {
2908 2908                  mutex_exit(&rp->r_statelock);
2909 2909                  /*
2910 2910                   * We need to flush any dirty pages which happen to
2911 2911                   * be hanging around before removing the file.  This
2912 2912                   * shouldn't happen very often and mostly on file
2913 2913                   * systems mounted "nocto".
2914 2914                   */
2915 2915                  if (vn_has_cached_data(vp) &&
2916 2916                      ((rp->r_flags & RDIRTY) || rp->r_count > 0)) {
2917 2917                          error = nfs3_putpage(vp, (offset_t)0, 0, 0, cr, ct);
2918 2918                          if (error && (error == ENOSPC || error == EDQUOT)) {
2919 2919                                  mutex_enter(&rp->r_statelock);
2920 2920                                  if (!rp->r_error)
2921 2921                                          rp->r_error = error;
2922 2922                                  mutex_exit(&rp->r_statelock);
2923 2923                          }
2924 2924                  }
2925 2925  
2926 2926                  setdiropargs3(&args.object, nm, dvp);
2927 2927  
2928 2928                  douprintf = 1;
2929 2929  
2930 2930                  t = gethrtime();
2931 2931  
2932 2932                  error = rfs3call(VTOMI(dvp), NFSPROC3_REMOVE,
2933 2933                      xdr_diropargs3, (caddr_t)&args,
2934 2934                      xdr_REMOVE3res, (caddr_t)&res, cr,
2935 2935                      &douprintf, &res.status, 0, NULL);
2936 2936  
2937 2937                  /*
2938 2938                   * The xattr dir may be gone after last attr is removed,
2939 2939                   * so flush it from dnlc.
2940 2940                   */
2941 2941                  if (dvp->v_flag & V_XATTRDIR)
2942 2942                          dnlc_purge_vp(dvp);
2943 2943  
2944 2944                  PURGE_ATTRCACHE(vp);
2945 2945  
2946 2946                  if (error) {
2947 2947                          PURGE_ATTRCACHE(dvp);
2948 2948                  } else {
2949 2949                          error = geterrno3(res.status);
2950 2950                          if (!error) {
2951 2951                                  nfs3_cache_wcc_data(dvp, &res.resok.dir_wcc, t,
2952 2952                                      cr);
2953 2953                                  if (HAVE_RDDIR_CACHE(drp))
2954 2954                                          nfs_purge_rddir_cache(dvp);
2955 2955                          } else {
2956 2956                                  nfs3_cache_wcc_data(dvp, &res.resfail.dir_wcc,
2957 2957                                      t, cr);
2958 2958                                  PURGE_STALE_FH(error, dvp, cr);
2959 2959                          }
2960 2960                  }
2961 2961          }
2962 2962  
2963 2963          if (error == 0) {
2964 2964                  vnevent_remove(vp, dvp, nm, ct);
2965 2965          }
2966 2966          VN_RELE(vp);
2967 2967  
2968 2968          nfs_rw_exit(&drp->r_rwlock);
2969 2969  
2970 2970          return (error);
2971 2971  }
2972 2972  
2973 2973  /* ARGSUSED */
2974 2974  static int
2975 2975  nfs3_link(vnode_t *tdvp, vnode_t *svp, char *tnm, cred_t *cr,
2976 2976          caller_context_t *ct, int flags)
2977 2977  {
2978 2978          int error;
2979 2979          LINK3args args;
2980 2980          LINK3res res;
2981 2981          vnode_t *realvp;
2982 2982          int douprintf;
2983 2983          mntinfo_t *mi;
2984 2984          rnode_t *tdrp;
2985 2985          hrtime_t t;
2986 2986  
2987 2987          if (nfs_zone() != VTOMI(tdvp)->mi_zone)
2988 2988                  return (EPERM);
2989 2989          if (VOP_REALVP(svp, &realvp, ct) == 0)
2990 2990                  svp = realvp;
2991 2991  
2992 2992          mi = VTOMI(svp);
2993 2993  
2994 2994          if (!(mi->mi_flags & MI_LINK))
2995 2995                  return (EOPNOTSUPP);
2996 2996  
2997 2997          args.file = *VTOFH3(svp);
2998 2998          setdiropargs3(&args.link, tnm, tdvp);
2999 2999  
3000 3000          tdrp = VTOR(tdvp);
3001 3001          if (nfs_rw_enter_sig(&tdrp->r_rwlock, RW_WRITER, INTR(tdvp)))
3002 3002                  return (EINTR);
3003 3003  
3004 3004          dnlc_remove(tdvp, tnm);
3005 3005  
3006 3006          douprintf = 1;
3007 3007  
3008 3008          t = gethrtime();
3009 3009  
3010 3010          error = rfs3call(mi, NFSPROC3_LINK,
3011 3011              xdr_LINK3args, (caddr_t)&args,
3012 3012              xdr_LINK3res, (caddr_t)&res, cr,
3013 3013              &douprintf, &res.status, 0, NULL);
3014 3014  
3015 3015          if (error) {
3016 3016                  PURGE_ATTRCACHE(tdvp);
3017 3017                  PURGE_ATTRCACHE(svp);
3018 3018                  nfs_rw_exit(&tdrp->r_rwlock);
3019 3019                  return (error);
3020 3020          }
3021 3021  
3022 3022          error = geterrno3(res.status);
3023 3023  
3024 3024          if (!error) {
3025 3025                  nfs3_cache_post_op_attr(svp, &res.resok.file_attributes, t, cr);
3026 3026                  nfs3_cache_wcc_data(tdvp, &res.resok.linkdir_wcc, t, cr);
3027 3027                  if (HAVE_RDDIR_CACHE(tdrp))
3028 3028                          nfs_purge_rddir_cache(tdvp);
3029 3029                  dnlc_update(tdvp, tnm, svp);
3030 3030          } else {
3031 3031                  nfs3_cache_post_op_attr(svp, &res.resfail.file_attributes, t,
3032 3032                      cr);
3033 3033                  nfs3_cache_wcc_data(tdvp, &res.resfail.linkdir_wcc, t, cr);
3034 3034                  if (error == EOPNOTSUPP) {
3035 3035                          mutex_enter(&mi->mi_lock);
3036 3036                          mi->mi_flags &= ~MI_LINK;
3037 3037                          mutex_exit(&mi->mi_lock);
3038 3038                  }
3039 3039          }
3040 3040  
3041 3041          nfs_rw_exit(&tdrp->r_rwlock);
3042 3042  
3043 3043          if (!error) {
3044 3044                  /*
3045 3045                   * Notify the source file of this link operation.
3046 3046                   */
3047 3047                  vnevent_link(svp, ct);
3048 3048          }
3049 3049          return (error);
3050 3050  }
3051 3051  
3052 3052  /* ARGSUSED */
3053 3053  static int
3054 3054  nfs3_rename(vnode_t *odvp, char *onm, vnode_t *ndvp, char *nnm, cred_t *cr,
3055 3055          caller_context_t *ct, int flags)
3056 3056  {
3057 3057          vnode_t *realvp;
3058 3058  
3059 3059          if (nfs_zone() != VTOMI(odvp)->mi_zone)
3060 3060                  return (EPERM);
3061 3061          if (VOP_REALVP(ndvp, &realvp, ct) == 0)
3062 3062                  ndvp = realvp;
3063 3063  
3064 3064          return (nfs3rename(odvp, onm, ndvp, nnm, cr, ct));
3065 3065  }
3066 3066  
3067 3067  /*
3068 3068   * nfs3rename does the real work of renaming in NFS Version 3.
3069 3069   */
3070 3070  static int
3071 3071  nfs3rename(vnode_t *odvp, char *onm, vnode_t *ndvp, char *nnm, cred_t *cr,
3072 3072      caller_context_t *ct)
3073 3073  {
3074 3074          int error;
3075 3075          RENAME3args args;
3076 3076          RENAME3res res;
3077 3077          int douprintf;
3078 3078          vnode_t *nvp = NULL;
3079 3079          vnode_t *ovp = NULL;
3080 3080          char *tmpname;
3081 3081          rnode_t *rp;
3082 3082          rnode_t *odrp;
3083 3083          rnode_t *ndrp;
3084 3084          hrtime_t t;
3085 3085  
3086 3086          ASSERT(nfs_zone() == VTOMI(odvp)->mi_zone);
3087 3087  
3088 3088          if (strcmp(onm, ".") == 0 || strcmp(onm, "..") == 0 ||
3089 3089              strcmp(nnm, ".") == 0 || strcmp(nnm, "..") == 0)
3090 3090                  return (EINVAL);
3091 3091  
3092 3092          odrp = VTOR(odvp);
3093 3093          ndrp = VTOR(ndvp);
3094 3094          if ((intptr_t)odrp < (intptr_t)ndrp) {
3095 3095                  if (nfs_rw_enter_sig(&odrp->r_rwlock, RW_WRITER, INTR(odvp)))
3096 3096                          return (EINTR);
3097 3097                  if (nfs_rw_enter_sig(&ndrp->r_rwlock, RW_WRITER, INTR(ndvp))) {
3098 3098                          nfs_rw_exit(&odrp->r_rwlock);
3099 3099                          return (EINTR);
3100 3100                  }
3101 3101          } else {
3102 3102                  if (nfs_rw_enter_sig(&ndrp->r_rwlock, RW_WRITER, INTR(ndvp)))
3103 3103                          return (EINTR);
3104 3104                  if (nfs_rw_enter_sig(&odrp->r_rwlock, RW_WRITER, INTR(odvp))) {
3105 3105                          nfs_rw_exit(&ndrp->r_rwlock);
3106 3106                          return (EINTR);
3107 3107                  }
3108 3108          }
3109 3109  
3110 3110          /*
3111 3111           * Lookup the target file.  If it exists, it needs to be
3112 3112           * checked to see whether it is a mount point and whether
3113 3113           * it is active (open).
3114 3114           */
3115 3115          error = nfs3lookup(ndvp, nnm, &nvp, NULL, 0, NULL, cr, 0);
3116 3116          if (!error) {
3117 3117                  /*
3118 3118                   * If this file has been mounted on, then just
3119 3119                   * return busy because renaming to it would remove
3120 3120                   * the mounted file system from the name space.
3121 3121                   */
3122 3122                  if (vn_mountedvfs(nvp) != NULL) {
3123 3123                          VN_RELE(nvp);
3124 3124                          nfs_rw_exit(&odrp->r_rwlock);
3125 3125                          nfs_rw_exit(&ndrp->r_rwlock);
3126 3126                          return (EBUSY);
3127 3127                  }
3128 3128  
3129 3129                  /*
3130 3130                   * Purge the name cache of all references to this vnode
3131 3131                   * so that we can check the reference count to infer
3132 3132                   * whether it is active or not.
3133 3133                   */
3134 3134                  /*
3135 3135                   * First just remove the entry from the name cache, as it
3136 3136                   * is most likely the only entry for this vp.
3137 3137                   */
3138 3138                  dnlc_remove(ndvp, nnm);
3139 3139                  /*
3140 3140                   * If the file has a v_count > 1 then there may be more
3141 3141                   * than one entry in the name cache due multiple links
3142 3142                   * or an open file, but we don't have the real reference
3143 3143                   * count so flush all possible entries.
3144 3144                   */
3145 3145                  if (nvp->v_count > 1)
3146 3146                          dnlc_purge_vp(nvp);
3147 3147  
3148 3148                  /*
3149 3149                   * If the vnode is active and is not a directory,
3150 3150                   * arrange to rename it to a
3151 3151                   * temporary file so that it will continue to be
3152 3152                   * accessible.  This implements the "unlink-open-file"
3153 3153                   * semantics for the target of a rename operation.
3154 3154                   * Before doing this though, make sure that the
3155 3155                   * source and target files are not already the same.
3156 3156                   */
3157 3157                  if (nvp->v_count > 1 && nvp->v_type != VDIR) {
3158 3158                          /*
3159 3159                           * Lookup the source name.
3160 3160                           */
3161 3161                          error = nfs3lookup(odvp, onm, &ovp, NULL, 0, NULL,
3162 3162                              cr, 0);
3163 3163  
3164 3164                          /*
3165 3165                           * The source name *should* already exist.
3166 3166                           */
3167 3167                          if (error) {
3168 3168                                  VN_RELE(nvp);
3169 3169                                  nfs_rw_exit(&odrp->r_rwlock);
3170 3170                                  nfs_rw_exit(&ndrp->r_rwlock);
3171 3171                                  return (error);
3172 3172                          }
3173 3173  
3174 3174                          /*
3175 3175                           * Compare the two vnodes.  If they are the same,
3176 3176                           * just release all held vnodes and return success.
3177 3177                           */
3178 3178                          if (ovp == nvp) {
3179 3179                                  VN_RELE(ovp);
3180 3180                                  VN_RELE(nvp);
3181 3181                                  nfs_rw_exit(&odrp->r_rwlock);
3182 3182                                  nfs_rw_exit(&ndrp->r_rwlock);
3183 3183                                  return (0);
3184 3184                          }
3185 3185  
3186 3186                          /*
3187 3187                           * Can't mix and match directories and non-
3188 3188                           * directories in rename operations.  We already
3189 3189                           * know that the target is not a directory.  If
3190 3190                           * the source is a directory, return an error.
3191 3191                           */
3192 3192                          if (ovp->v_type == VDIR) {
3193 3193                                  VN_RELE(ovp);
3194 3194                                  VN_RELE(nvp);
3195 3195                                  nfs_rw_exit(&odrp->r_rwlock);
3196 3196                                  nfs_rw_exit(&ndrp->r_rwlock);
3197 3197                                  return (ENOTDIR);
3198 3198                          }
3199 3199  
3200 3200                          /*
3201 3201                           * The target file exists, is not the same as
3202 3202                           * the source file, and is active.  Link it
3203 3203                           * to a temporary filename to avoid having
3204 3204                           * the server removing the file completely.
3205 3205                           */
3206 3206                          tmpname = newname();
3207 3207                          error = nfs3_link(ndvp, nvp, tmpname, cr, NULL, 0);
3208 3208                          if (error == EOPNOTSUPP) {
3209 3209                                  error = nfs3_rename(ndvp, nnm, ndvp, tmpname,
3210 3210                                      cr, NULL, 0);
3211 3211                          }
3212 3212                          if (error) {
3213 3213                                  kmem_free(tmpname, MAXNAMELEN);
3214 3214                                  VN_RELE(ovp);
3215 3215                                  VN_RELE(nvp);
3216 3216                                  nfs_rw_exit(&odrp->r_rwlock);
3217 3217                                  nfs_rw_exit(&ndrp->r_rwlock);
3218 3218                                  return (error);
3219 3219                          }
3220 3220                          rp = VTOR(nvp);
3221 3221                          mutex_enter(&rp->r_statelock);
3222 3222                          if (rp->r_unldvp == NULL) {
3223 3223                                  VN_HOLD(ndvp);
3224 3224                                  rp->r_unldvp = ndvp;
3225 3225                                  if (rp->r_unlcred != NULL)
3226 3226                                          crfree(rp->r_unlcred);
3227 3227                                  crhold(cr);
3228 3228                                  rp->r_unlcred = cr;
3229 3229                                  rp->r_unlname = tmpname;
3230 3230                          } else {
3231 3231                                  kmem_free(rp->r_unlname, MAXNAMELEN);
3232 3232                                  rp->r_unlname = tmpname;
3233 3233                          }
3234 3234                          mutex_exit(&rp->r_statelock);
3235 3235                  }
3236 3236          }
3237 3237  
3238 3238          if (ovp == NULL) {
3239 3239                  /*
3240 3240                   * When renaming directories to be a subdirectory of a
3241 3241                   * different parent, the dnlc entry for ".." will no
3242 3242                   * longer be valid, so it must be removed.
3243 3243                   *
3244 3244                   * We do a lookup here to determine whether we are renaming
3245 3245                   * a directory and we need to check if we are renaming
3246 3246                   * an unlinked file.  This might have already been done
3247 3247                   * in previous code, so we check ovp == NULL to avoid
3248 3248                   * doing it twice.
3249 3249                   */
3250 3250  
3251 3251                  error = nfs3lookup(odvp, onm, &ovp, NULL, 0, NULL, cr, 0);
3252 3252                  /*
3253 3253                   * The source name *should* already exist.
3254 3254                   */
3255 3255                  if (error) {
3256 3256                          nfs_rw_exit(&odrp->r_rwlock);
3257 3257                          nfs_rw_exit(&ndrp->r_rwlock);
3258 3258                          if (nvp) {
3259 3259                                  VN_RELE(nvp);
3260 3260                          }
3261 3261                          return (error);
3262 3262                  }
3263 3263                  ASSERT(ovp != NULL);
3264 3264          }
3265 3265  
3266 3266          dnlc_remove(odvp, onm);
3267 3267          dnlc_remove(ndvp, nnm);
3268 3268  
3269 3269          setdiropargs3(&args.from, onm, odvp);
3270 3270          setdiropargs3(&args.to, nnm, ndvp);
3271 3271  
3272 3272          douprintf = 1;
3273 3273  
3274 3274          t = gethrtime();
3275 3275  
3276 3276          error = rfs3call(VTOMI(odvp), NFSPROC3_RENAME,
3277 3277              xdr_RENAME3args, (caddr_t)&args,
3278 3278              xdr_RENAME3res, (caddr_t)&res, cr,
3279 3279              &douprintf, &res.status, 0, NULL);
3280 3280  
3281 3281          if (error) {
3282 3282                  PURGE_ATTRCACHE(odvp);
3283 3283                  PURGE_ATTRCACHE(ndvp);
3284 3284                  VN_RELE(ovp);
3285 3285                  nfs_rw_exit(&odrp->r_rwlock);
3286 3286                  nfs_rw_exit(&ndrp->r_rwlock);
3287 3287                  if (nvp) {
3288 3288                          VN_RELE(nvp);
3289 3289                  }
3290 3290                  return (error);
3291 3291          }
3292 3292  
3293 3293          error = geterrno3(res.status);
3294 3294  
3295 3295          if (!error) {
3296 3296                  nfs3_cache_wcc_data(odvp, &res.resok.fromdir_wcc, t, cr);
3297 3297                  if (HAVE_RDDIR_CACHE(odrp))
3298 3298                          nfs_purge_rddir_cache(odvp);
3299 3299                  if (ndvp != odvp) {
3300 3300                          nfs3_cache_wcc_data(ndvp, &res.resok.todir_wcc, t, cr);
3301 3301                          if (HAVE_RDDIR_CACHE(ndrp))
3302 3302                                  nfs_purge_rddir_cache(ndvp);
3303 3303                  }
3304 3304                  /*
3305 3305                   * when renaming directories to be a subdirectory of a
3306 3306                   * different parent, the dnlc entry for ".." will no
3307 3307                   * longer be valid, so it must be removed
3308 3308                   */
3309 3309                  rp = VTOR(ovp);
3310 3310                  if (ndvp != odvp) {
3311 3311                          if (ovp->v_type == VDIR) {
3312 3312                                  dnlc_remove(ovp, "..");
3313 3313                                  if (HAVE_RDDIR_CACHE(rp))
3314 3314                                          nfs_purge_rddir_cache(ovp);
3315 3315                          }
3316 3316                  }
3317 3317  
3318 3318                  /*
3319 3319                   * If we are renaming the unlinked file, update the
3320 3320                   * r_unldvp and r_unlname as needed.
3321 3321                   */
3322 3322                  mutex_enter(&rp->r_statelock);
3323 3323                  if (rp->r_unldvp != NULL) {
3324 3324                          if (strcmp(rp->r_unlname, onm) == 0) {
3325 3325                                  (void) strncpy(rp->r_unlname, nnm, MAXNAMELEN);
3326 3326                                  rp->r_unlname[MAXNAMELEN - 1] = '\0';
3327 3327  
3328 3328                                  if (ndvp != rp->r_unldvp) {
3329 3329                                          VN_RELE(rp->r_unldvp);
3330 3330                                          rp->r_unldvp = ndvp;
3331 3331                                          VN_HOLD(ndvp);
3332 3332                                  }
3333 3333                          }
3334 3334                  }
3335 3335                  mutex_exit(&rp->r_statelock);
3336 3336          } else {
3337 3337                  nfs3_cache_wcc_data(odvp, &res.resfail.fromdir_wcc, t, cr);
3338 3338                  if (ndvp != odvp) {
3339 3339                          nfs3_cache_wcc_data(ndvp, &res.resfail.todir_wcc, t,
3340 3340                              cr);
3341 3341                  }
3342 3342                  /*
3343 3343                   * System V defines rename to return EEXIST, not
3344 3344                   * ENOTEMPTY if the target directory is not empty.
3345 3345                   * Over the wire, the error is NFSERR_ENOTEMPTY
  
    | 
      ↓ open down ↓ | 
    3303 lines elided | 
    
      ↑ open up ↑ | 
  
3346 3346                   * which geterrno maps to ENOTEMPTY.
3347 3347                   */
3348 3348                  if (error == ENOTEMPTY)
3349 3349                          error = EEXIST;
3350 3350          }
3351 3351  
3352 3352          if (error == 0) {
3353 3353                  if (nvp)
3354 3354                          vnevent_rename_dest(nvp, ndvp, nnm, ct);
3355 3355  
3356      -                if (odvp != ndvp)
3357      -                        vnevent_rename_dest_dir(ndvp, ct);
3358 3356                  ASSERT(ovp != NULL);
3359 3357                  vnevent_rename_src(ovp, odvp, onm, ct);
     3358 +                vnevent_rename_dest_dir(ndvp, ovp, nnm, ct);
3360 3359          }
3361 3360  
3362 3361          if (nvp) {
3363 3362                  VN_RELE(nvp);
3364 3363          }
3365 3364          VN_RELE(ovp);
3366 3365  
3367 3366          nfs_rw_exit(&odrp->r_rwlock);
3368 3367          nfs_rw_exit(&ndrp->r_rwlock);
3369 3368  
3370 3369          return (error);
3371 3370  }
3372 3371  
3373 3372  /* ARGSUSED */
3374 3373  static int
3375 3374  nfs3_mkdir(vnode_t *dvp, char *nm, struct vattr *va, vnode_t **vpp, cred_t *cr,
3376 3375          caller_context_t *ct, int flags, vsecattr_t *vsecp)
3377 3376  {
3378 3377          int error;
3379 3378          MKDIR3args args;
3380 3379          MKDIR3res res;
3381 3380          int douprintf;
3382 3381          struct vattr vattr;
3383 3382          vnode_t *vp;
3384 3383          rnode_t *drp;
3385 3384          hrtime_t t;
3386 3385  
3387 3386          if (nfs_zone() != VTOMI(dvp)->mi_zone)
3388 3387                  return (EPERM);
3389 3388          setdiropargs3(&args.where, nm, dvp);
3390 3389  
3391 3390          /*
3392 3391           * Decide what the group-id and set-gid bit of the created directory
3393 3392           * should be.  May have to do a setattr to get the gid right.
3394 3393           */
3395 3394          error = setdirgid(dvp, &va->va_gid, cr);
3396 3395          if (error)
3397 3396                  return (error);
3398 3397          error = setdirmode(dvp, &va->va_mode, cr);
3399 3398          if (error)
3400 3399                  return (error);
3401 3400          va->va_mask |= AT_MODE|AT_GID;
3402 3401  
3403 3402          error = vattr_to_sattr3(va, &args.attributes);
3404 3403          if (error) {
3405 3404                  /* req time field(s) overflow - return immediately */
3406 3405                  return (error);
3407 3406          }
3408 3407  
3409 3408          drp = VTOR(dvp);
3410 3409          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
3411 3410                  return (EINTR);
3412 3411  
3413 3412          dnlc_remove(dvp, nm);
3414 3413  
3415 3414          douprintf = 1;
3416 3415  
3417 3416          t = gethrtime();
3418 3417  
3419 3418          error = rfs3call(VTOMI(dvp), NFSPROC3_MKDIR,
3420 3419              xdr_MKDIR3args, (caddr_t)&args,
3421 3420              xdr_MKDIR3res, (caddr_t)&res, cr,
3422 3421              &douprintf, &res.status, 0, NULL);
3423 3422  
3424 3423          if (error) {
3425 3424                  PURGE_ATTRCACHE(dvp);
3426 3425                  nfs_rw_exit(&drp->r_rwlock);
3427 3426                  return (error);
3428 3427          }
3429 3428  
3430 3429          error = geterrno3(res.status);
3431 3430          if (!error) {
3432 3431                  nfs3_cache_wcc_data(dvp, &res.resok.dir_wcc, t, cr);
3433 3432                  if (HAVE_RDDIR_CACHE(drp))
3434 3433                          nfs_purge_rddir_cache(dvp);
3435 3434  
3436 3435                  if (!res.resok.obj.handle_follows) {
3437 3436                          error = nfs3lookup(dvp, nm, &vp, NULL, 0, NULL, cr, 0);
3438 3437                          if (error) {
3439 3438                                  nfs_rw_exit(&drp->r_rwlock);
3440 3439                                  return (error);
3441 3440                          }
3442 3441                  } else {
3443 3442                          if (res.resok.obj_attributes.attributes) {
3444 3443                                  vp = makenfs3node(&res.resok.obj.handle,
3445 3444                                      &res.resok.obj_attributes.attr,
3446 3445                                      dvp->v_vfsp, t, cr, NULL, NULL);
3447 3446                          } else {
3448 3447                                  vp = makenfs3node(&res.resok.obj.handle, NULL,
3449 3448                                      dvp->v_vfsp, t, cr, NULL, NULL);
3450 3449                                  if (vp->v_type == VNON) {
3451 3450                                          vattr.va_mask = AT_TYPE;
3452 3451                                          error = nfs3getattr(vp, &vattr, cr);
3453 3452                                          if (error) {
3454 3453                                                  VN_RELE(vp);
3455 3454                                                  nfs_rw_exit(&drp->r_rwlock);
3456 3455                                                  return (error);
3457 3456                                          }
3458 3457                                          vp->v_type = vattr.va_type;
3459 3458                                  }
3460 3459                          }
3461 3460                          dnlc_update(dvp, nm, vp);
3462 3461                  }
3463 3462                  if (va->va_gid != VTOR(vp)->r_attr.va_gid) {
3464 3463                          va->va_mask = AT_GID;
3465 3464                          (void) nfs3setattr(vp, va, 0, cr);
3466 3465                  }
3467 3466                  *vpp = vp;
3468 3467          } else {
3469 3468                  nfs3_cache_wcc_data(dvp, &res.resfail.dir_wcc, t, cr);
3470 3469                  PURGE_STALE_FH(error, dvp, cr);
3471 3470          }
3472 3471  
3473 3472          nfs_rw_exit(&drp->r_rwlock);
3474 3473  
3475 3474          return (error);
3476 3475  }
3477 3476  
3478 3477  /* ARGSUSED */
3479 3478  static int
3480 3479  nfs3_rmdir(vnode_t *dvp, char *nm, vnode_t *cdir, cred_t *cr,
3481 3480          caller_context_t *ct, int flags)
3482 3481  {
3483 3482          int error;
3484 3483          RMDIR3args args;
3485 3484          RMDIR3res res;
3486 3485          vnode_t *vp;
3487 3486          int douprintf;
3488 3487          rnode_t *drp;
3489 3488          hrtime_t t;
3490 3489  
3491 3490          if (nfs_zone() != VTOMI(dvp)->mi_zone)
3492 3491                  return (EPERM);
3493 3492          drp = VTOR(dvp);
3494 3493          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
3495 3494                  return (EINTR);
3496 3495  
3497 3496          /*
3498 3497           * Attempt to prevent a rmdir(".") from succeeding.
3499 3498           */
3500 3499          error = nfs3lookup(dvp, nm, &vp, NULL, 0, NULL, cr, 0);
3501 3500          if (error) {
3502 3501                  nfs_rw_exit(&drp->r_rwlock);
3503 3502                  return (error);
3504 3503          }
3505 3504  
3506 3505          if (vp == cdir) {
3507 3506                  VN_RELE(vp);
3508 3507                  nfs_rw_exit(&drp->r_rwlock);
3509 3508                  return (EINVAL);
3510 3509          }
3511 3510  
3512 3511          setdiropargs3(&args.object, nm, dvp);
3513 3512  
3514 3513          /*
3515 3514           * First just remove the entry from the name cache, as it
3516 3515           * is most likely an entry for this vp.
3517 3516           */
3518 3517          dnlc_remove(dvp, nm);
3519 3518  
3520 3519          /*
3521 3520           * If there vnode reference count is greater than one, then
3522 3521           * there may be additional references in the DNLC which will
3523 3522           * need to be purged.  First, trying removing the entry for
3524 3523           * the parent directory and see if that removes the additional
3525 3524           * reference(s).  If that doesn't do it, then use dnlc_purge_vp
3526 3525           * to completely remove any references to the directory which
3527 3526           * might still exist in the DNLC.
3528 3527           */
3529 3528          if (vp->v_count > 1) {
3530 3529                  dnlc_remove(vp, "..");
3531 3530                  if (vp->v_count > 1)
3532 3531                          dnlc_purge_vp(vp);
3533 3532          }
3534 3533  
3535 3534          douprintf = 1;
3536 3535  
3537 3536          t = gethrtime();
3538 3537  
3539 3538          error = rfs3call(VTOMI(dvp), NFSPROC3_RMDIR,
3540 3539              xdr_diropargs3, (caddr_t)&args,
3541 3540              xdr_RMDIR3res, (caddr_t)&res, cr,
3542 3541              &douprintf, &res.status, 0, NULL);
3543 3542  
3544 3543          PURGE_ATTRCACHE(vp);
3545 3544  
3546 3545          if (error) {
3547 3546                  PURGE_ATTRCACHE(dvp);
3548 3547                  VN_RELE(vp);
3549 3548                  nfs_rw_exit(&drp->r_rwlock);
3550 3549                  return (error);
3551 3550          }
3552 3551  
3553 3552          error = geterrno3(res.status);
3554 3553          if (!error) {
3555 3554                  nfs3_cache_wcc_data(dvp, &res.resok.dir_wcc, t, cr);
3556 3555                  if (HAVE_RDDIR_CACHE(drp))
3557 3556                          nfs_purge_rddir_cache(dvp);
3558 3557                  if (HAVE_RDDIR_CACHE(VTOR(vp)))
3559 3558                          nfs_purge_rddir_cache(vp);
3560 3559          } else {
3561 3560                  nfs3_cache_wcc_data(dvp, &res.resfail.dir_wcc, t, cr);
3562 3561                  PURGE_STALE_FH(error, dvp, cr);
3563 3562                  /*
3564 3563                   * System V defines rmdir to return EEXIST, not
3565 3564                   * ENOTEMPTY if the directory is not empty.  Over
3566 3565                   * the wire, the error is NFSERR_ENOTEMPTY which
3567 3566                   * geterrno maps to ENOTEMPTY.
3568 3567                   */
3569 3568                  if (error == ENOTEMPTY)
3570 3569                          error = EEXIST;
3571 3570          }
3572 3571  
3573 3572          if (error == 0) {
3574 3573                  vnevent_rmdir(vp, dvp, nm, ct);
3575 3574          }
3576 3575          VN_RELE(vp);
3577 3576  
3578 3577          nfs_rw_exit(&drp->r_rwlock);
3579 3578  
3580 3579          return (error);
3581 3580  }
3582 3581  
3583 3582  /* ARGSUSED */
3584 3583  static int
3585 3584  nfs3_symlink(vnode_t *dvp, char *lnm, struct vattr *tva, char *tnm, cred_t *cr,
3586 3585          caller_context_t *ct, int flags)
3587 3586  {
3588 3587          int error;
3589 3588          SYMLINK3args args;
3590 3589          SYMLINK3res res;
3591 3590          int douprintf;
3592 3591          mntinfo_t *mi;
3593 3592          vnode_t *vp;
3594 3593          rnode_t *rp;
3595 3594          char *contents;
3596 3595          rnode_t *drp;
3597 3596          hrtime_t t;
3598 3597  
3599 3598          mi = VTOMI(dvp);
3600 3599  
3601 3600          if (nfs_zone() != mi->mi_zone)
3602 3601                  return (EPERM);
3603 3602          if (!(mi->mi_flags & MI_SYMLINK))
3604 3603                  return (EOPNOTSUPP);
3605 3604  
3606 3605          setdiropargs3(&args.where, lnm, dvp);
3607 3606          error = vattr_to_sattr3(tva, &args.symlink.symlink_attributes);
3608 3607          if (error) {
3609 3608                  /* req time field(s) overflow - return immediately */
3610 3609                  return (error);
3611 3610          }
3612 3611          args.symlink.symlink_data = tnm;
3613 3612  
3614 3613          drp = VTOR(dvp);
3615 3614          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp)))
3616 3615                  return (EINTR);
3617 3616  
3618 3617          dnlc_remove(dvp, lnm);
3619 3618  
3620 3619          douprintf = 1;
3621 3620  
3622 3621          t = gethrtime();
3623 3622  
3624 3623          error = rfs3call(mi, NFSPROC3_SYMLINK,
3625 3624              xdr_SYMLINK3args, (caddr_t)&args,
3626 3625              xdr_SYMLINK3res, (caddr_t)&res, cr,
3627 3626              &douprintf, &res.status, 0, NULL);
3628 3627  
3629 3628          if (error) {
3630 3629                  PURGE_ATTRCACHE(dvp);
3631 3630                  nfs_rw_exit(&drp->r_rwlock);
3632 3631                  return (error);
3633 3632          }
3634 3633  
3635 3634          error = geterrno3(res.status);
3636 3635          if (!error) {
3637 3636                  nfs3_cache_wcc_data(dvp, &res.resok.dir_wcc, t, cr);
3638 3637                  if (HAVE_RDDIR_CACHE(drp))
3639 3638                          nfs_purge_rddir_cache(dvp);
3640 3639  
3641 3640                  if (res.resok.obj.handle_follows) {
3642 3641                          if (res.resok.obj_attributes.attributes) {
3643 3642                                  vp = makenfs3node(&res.resok.obj.handle,
3644 3643                                      &res.resok.obj_attributes.attr,
3645 3644                                      dvp->v_vfsp, t, cr, NULL, NULL);
3646 3645                          } else {
3647 3646                                  vp = makenfs3node(&res.resok.obj.handle, NULL,
3648 3647                                      dvp->v_vfsp, t, cr, NULL, NULL);
3649 3648                                  vp->v_type = VLNK;
3650 3649                                  vp->v_rdev = 0;
3651 3650                          }
3652 3651                          dnlc_update(dvp, lnm, vp);
3653 3652                          rp = VTOR(vp);
3654 3653                          if (nfs3_do_symlink_cache &&
3655 3654                              rp->r_symlink.contents == NULL) {
3656 3655  
3657 3656                                  contents = kmem_alloc(MAXPATHLEN,
3658 3657                                      KM_NOSLEEP);
3659 3658  
3660 3659                                  if (contents != NULL) {
3661 3660                                          mutex_enter(&rp->r_statelock);
3662 3661                                          if (rp->r_symlink.contents == NULL) {
3663 3662                                                  rp->r_symlink.len = strlen(tnm);
3664 3663                                                  bcopy(tnm, contents,
3665 3664                                                      rp->r_symlink.len);
3666 3665                                                  rp->r_symlink.contents =
3667 3666                                                      contents;
3668 3667                                                  rp->r_symlink.size = MAXPATHLEN;
3669 3668                                                  mutex_exit(&rp->r_statelock);
3670 3669                                          } else {
3671 3670                                                  mutex_exit(&rp->r_statelock);
3672 3671                                                  kmem_free((void *)contents,
3673 3672                                                      MAXPATHLEN);
3674 3673                                          }
3675 3674                                  }
3676 3675                          }
3677 3676                          VN_RELE(vp);
3678 3677                  }
3679 3678          } else {
3680 3679                  nfs3_cache_wcc_data(dvp, &res.resfail.dir_wcc, t, cr);
3681 3680                  PURGE_STALE_FH(error, dvp, cr);
3682 3681                  if (error == EOPNOTSUPP) {
3683 3682                          mutex_enter(&mi->mi_lock);
3684 3683                          mi->mi_flags &= ~MI_SYMLINK;
3685 3684                          mutex_exit(&mi->mi_lock);
3686 3685                  }
3687 3686          }
3688 3687  
3689 3688          nfs_rw_exit(&drp->r_rwlock);
3690 3689  
3691 3690          return (error);
3692 3691  }
3693 3692  
3694 3693  #ifdef DEBUG
3695 3694  static int nfs3_readdir_cache_hits = 0;
3696 3695  static int nfs3_readdir_cache_shorts = 0;
3697 3696  static int nfs3_readdir_cache_waits = 0;
3698 3697  static int nfs3_readdir_cache_misses = 0;
3699 3698  static int nfs3_readdir_readahead = 0;
3700 3699  #endif
3701 3700  
3702 3701  static int nfs3_shrinkreaddir = 0;
3703 3702  
3704 3703  /*
3705 3704   * Read directory entries.
3706 3705   * There are some weird things to look out for here.  The uio_loffset
3707 3706   * field is either 0 or it is the offset returned from a previous
3708 3707   * readdir.  It is an opaque value used by the server to find the
3709 3708   * correct directory block to read. The count field is the number
3710 3709   * of blocks to read on the server.  This is advisory only, the server
3711 3710   * may return only one block's worth of entries.  Entries may be compressed
3712 3711   * on the server.
3713 3712   */
3714 3713  /* ARGSUSED */
3715 3714  static int
3716 3715  nfs3_readdir(vnode_t *vp, struct uio *uiop, cred_t *cr, int *eofp,
3717 3716          caller_context_t *ct, int flags)
3718 3717  {
3719 3718          int error;
3720 3719          size_t count;
3721 3720          rnode_t *rp;
3722 3721          rddir_cache *rdc;
3723 3722          rddir_cache *nrdc;
3724 3723          rddir_cache *rrdc;
3725 3724  #ifdef DEBUG
3726 3725          int missed;
3727 3726  #endif
3728 3727          int doreadahead;
3729 3728          rddir_cache srdc;
3730 3729          avl_index_t where;
3731 3730  
3732 3731          if (nfs_zone() != VTOMI(vp)->mi_zone)
3733 3732                  return (EIO);
3734 3733          rp = VTOR(vp);
3735 3734  
3736 3735          ASSERT(nfs_rw_lock_held(&rp->r_rwlock, RW_READER));
3737 3736  
3738 3737          /*
3739 3738           * Make sure that the directory cache is valid.
3740 3739           */
3741 3740          if (HAVE_RDDIR_CACHE(rp)) {
3742 3741                  if (nfs_disable_rddir_cache) {
3743 3742                          /*
3744 3743                           * Setting nfs_disable_rddir_cache in /etc/system
3745 3744                           * allows interoperability with servers that do not
3746 3745                           * properly update the attributes of directories.
3747 3746                           * Any cached information gets purged before an
3748 3747                           * access is made to it.
3749 3748                           */
3750 3749                          nfs_purge_rddir_cache(vp);
3751 3750                  } else {
3752 3751                          error = nfs3_validate_caches(vp, cr);
3753 3752                          if (error)
3754 3753                                  return (error);
3755 3754                  }
3756 3755          }
3757 3756  
3758 3757          /*
3759 3758           * It is possible that some servers may not be able to correctly
3760 3759           * handle a large READDIR or READDIRPLUS request due to bugs in
3761 3760           * their implementation.  In order to continue to interoperate
3762 3761           * with them, this workaround is provided to limit the maximum
3763 3762           * size of a READDIRPLUS request to 1024.  In any case, the request
3764 3763           * size is limited to MAXBSIZE.
3765 3764           */
3766 3765          count = MIN(uiop->uio_iov->iov_len,
3767 3766              nfs3_shrinkreaddir ? 1024 : MAXBSIZE);
3768 3767  
3769 3768          nrdc = NULL;
3770 3769  #ifdef DEBUG
3771 3770          missed = 0;
3772 3771  #endif
3773 3772  top:
3774 3773          /*
3775 3774           * Short circuit last readdir which always returns 0 bytes.
3776 3775           * This can be done after the directory has been read through
3777 3776           * completely at least once.  This will set r_direof which
3778 3777           * can be used to find the value of the last cookie.
3779 3778           */
3780 3779          mutex_enter(&rp->r_statelock);
3781 3780          if (rp->r_direof != NULL &&
3782 3781              uiop->uio_loffset == rp->r_direof->nfs3_ncookie) {
3783 3782                  mutex_exit(&rp->r_statelock);
3784 3783  #ifdef DEBUG
3785 3784                  nfs3_readdir_cache_shorts++;
3786 3785  #endif
3787 3786                  if (eofp)
3788 3787                          *eofp = 1;
3789 3788                  if (nrdc != NULL)
3790 3789                          rddir_cache_rele(nrdc);
3791 3790                  return (0);
3792 3791          }
3793 3792          /*
3794 3793           * Look for a cache entry.  Cache entries are identified
3795 3794           * by the NFS cookie value and the byte count requested.
3796 3795           */
3797 3796          srdc.nfs3_cookie = uiop->uio_loffset;
3798 3797          srdc.buflen = count;
3799 3798          rdc = avl_find(&rp->r_dir, &srdc, &where);
3800 3799          if (rdc != NULL) {
3801 3800                  rddir_cache_hold(rdc);
3802 3801                  /*
3803 3802                   * If the cache entry is in the process of being
3804 3803                   * filled in, wait until this completes.  The
3805 3804                   * RDDIRWAIT bit is set to indicate that someone
3806 3805                   * is waiting and then the thread currently
3807 3806                   * filling the entry is done, it should do a
3808 3807                   * cv_broadcast to wakeup all of the threads
3809 3808                   * waiting for it to finish.
3810 3809                   */
3811 3810                  if (rdc->flags & RDDIR) {
3812 3811                          nfs_rw_exit(&rp->r_rwlock);
3813 3812                          rdc->flags |= RDDIRWAIT;
3814 3813  #ifdef DEBUG
3815 3814                          nfs3_readdir_cache_waits++;
3816 3815  #endif
3817 3816                          if (!cv_wait_sig(&rdc->cv, &rp->r_statelock)) {
3818 3817                                  /*
3819 3818                                   * We got interrupted, probably
3820 3819                                   * the user typed ^C or an alarm
3821 3820                                   * fired.  We free the new entry
3822 3821                                   * if we allocated one.
3823 3822                                   */
3824 3823                                  mutex_exit(&rp->r_statelock);
3825 3824                                  (void) nfs_rw_enter_sig(&rp->r_rwlock,
3826 3825                                      RW_READER, FALSE);
3827 3826                                  rddir_cache_rele(rdc);
3828 3827                                  if (nrdc != NULL)
3829 3828                                          rddir_cache_rele(nrdc);
3830 3829                                  return (EINTR);
3831 3830                          }
3832 3831                          mutex_exit(&rp->r_statelock);
3833 3832                          (void) nfs_rw_enter_sig(&rp->r_rwlock,
3834 3833                              RW_READER, FALSE);
3835 3834                          rddir_cache_rele(rdc);
3836 3835                          goto top;
3837 3836                  }
3838 3837                  /*
3839 3838                   * Check to see if a readdir is required to
3840 3839                   * fill the entry.  If so, mark this entry
3841 3840                   * as being filled, remove our reference,
3842 3841                   * and branch to the code to fill the entry.
3843 3842                   */
3844 3843                  if (rdc->flags & RDDIRREQ) {
3845 3844                          rdc->flags &= ~RDDIRREQ;
3846 3845                          rdc->flags |= RDDIR;
3847 3846                          if (nrdc != NULL)
3848 3847                                  rddir_cache_rele(nrdc);
3849 3848                          nrdc = rdc;
3850 3849                          mutex_exit(&rp->r_statelock);
3851 3850                          goto bottom;
3852 3851                  }
3853 3852  #ifdef DEBUG
3854 3853                  if (!missed)
3855 3854                          nfs3_readdir_cache_hits++;
3856 3855  #endif
3857 3856                  /*
3858 3857                   * If an error occurred while attempting
3859 3858                   * to fill the cache entry, just return it.
3860 3859                   */
3861 3860                  if (rdc->error) {
3862 3861                          error = rdc->error;
3863 3862                          mutex_exit(&rp->r_statelock);
3864 3863                          rddir_cache_rele(rdc);
3865 3864                          if (nrdc != NULL)
3866 3865                                  rddir_cache_rele(nrdc);
3867 3866                          return (error);
3868 3867                  }
3869 3868  
3870 3869                  /*
3871 3870                   * The cache entry is complete and good,
3872 3871                   * copyout the dirent structs to the calling
3873 3872                   * thread.
3874 3873                   */
3875 3874                  error = uiomove(rdc->entries, rdc->entlen, UIO_READ, uiop);
3876 3875  
3877 3876                  /*
3878 3877                   * If no error occurred during the copyout,
3879 3878                   * update the offset in the uio struct to
3880 3879                   * contain the value of the next cookie
3881 3880                   * and set the eof value appropriately.
3882 3881                   */
3883 3882                  if (!error) {
3884 3883                          uiop->uio_loffset = rdc->nfs3_ncookie;
3885 3884                          if (eofp)
3886 3885                                  *eofp = rdc->eof;
3887 3886                  }
3888 3887  
3889 3888                  /*
3890 3889                   * Decide whether to do readahead.
3891 3890                   *
3892 3891                   * Don't if have already read to the end of
3893 3892                   * directory.  There is nothing more to read.
3894 3893                   *
3895 3894                   * Don't if the application is not doing
3896 3895                   * lookups in the directory.  The readahead
3897 3896                   * is only effective if the application can
3898 3897                   * be doing work while an async thread is
3899 3898                   * handling the over the wire request.
3900 3899                   */
3901 3900                  if (rdc->eof) {
3902 3901                          rp->r_direof = rdc;
3903 3902                          doreadahead = FALSE;
3904 3903                  } else if (!(rp->r_flags & RLOOKUP))
3905 3904                          doreadahead = FALSE;
3906 3905                  else
3907 3906                          doreadahead = TRUE;
3908 3907  
3909 3908                  if (!doreadahead) {
3910 3909                          mutex_exit(&rp->r_statelock);
3911 3910                          rddir_cache_rele(rdc);
3912 3911                          if (nrdc != NULL)
3913 3912                                  rddir_cache_rele(nrdc);
3914 3913                          return (error);
3915 3914                  }
3916 3915  
3917 3916                  /*
3918 3917                   * Check to see whether we found an entry
3919 3918                   * for the readahead.  If so, we don't need
3920 3919                   * to do anything further, so free the new
3921 3920                   * entry if one was allocated.  Otherwise,
3922 3921                   * allocate a new entry, add it to the cache,
3923 3922                   * and then initiate an asynchronous readdir
3924 3923                   * operation to fill it.
3925 3924                   */
3926 3925                  srdc.nfs3_cookie = rdc->nfs3_ncookie;
3927 3926                  srdc.buflen = count;
3928 3927                  rrdc = avl_find(&rp->r_dir, &srdc, &where);
3929 3928                  if (rrdc != NULL) {
3930 3929                          if (nrdc != NULL)
3931 3930                                  rddir_cache_rele(nrdc);
3932 3931                  } else {
3933 3932                          if (nrdc != NULL)
3934 3933                                  rrdc = nrdc;
3935 3934                          else {
3936 3935                                  rrdc = rddir_cache_alloc(KM_NOSLEEP);
3937 3936                          }
3938 3937                          if (rrdc != NULL) {
3939 3938                                  rrdc->nfs3_cookie = rdc->nfs3_ncookie;
3940 3939                                  rrdc->buflen = count;
3941 3940                                  avl_insert(&rp->r_dir, rrdc, where);
3942 3941                                  rddir_cache_hold(rrdc);
3943 3942                                  mutex_exit(&rp->r_statelock);
3944 3943                                  rddir_cache_rele(rdc);
3945 3944  #ifdef DEBUG
3946 3945                                  nfs3_readdir_readahead++;
3947 3946  #endif
3948 3947                                  nfs_async_readdir(vp, rrdc, cr, do_nfs3readdir);
3949 3948                                  return (error);
3950 3949                          }
3951 3950                  }
3952 3951  
3953 3952                  mutex_exit(&rp->r_statelock);
3954 3953                  rddir_cache_rele(rdc);
3955 3954                  return (error);
3956 3955          }
3957 3956  
3958 3957          /*
3959 3958           * Didn't find an entry in the cache.  Construct a new empty
3960 3959           * entry and link it into the cache.  Other processes attempting
3961 3960           * to access this entry will need to wait until it is filled in.
3962 3961           *
3963 3962           * Since kmem_alloc may block, another pass through the cache
3964 3963           * will need to be taken to make sure that another process
3965 3964           * hasn't already added an entry to the cache for this request.
3966 3965           */
3967 3966          if (nrdc == NULL) {
3968 3967                  mutex_exit(&rp->r_statelock);
3969 3968                  nrdc = rddir_cache_alloc(KM_SLEEP);
3970 3969                  nrdc->nfs3_cookie = uiop->uio_loffset;
3971 3970                  nrdc->buflen = count;
3972 3971                  goto top;
3973 3972          }
3974 3973  
3975 3974          /*
3976 3975           * Add this entry to the cache.
3977 3976           */
3978 3977          avl_insert(&rp->r_dir, nrdc, where);
3979 3978          rddir_cache_hold(nrdc);
3980 3979          mutex_exit(&rp->r_statelock);
3981 3980  
3982 3981  bottom:
3983 3982  #ifdef DEBUG
3984 3983          missed = 1;
3985 3984          nfs3_readdir_cache_misses++;
3986 3985  #endif
3987 3986          /*
3988 3987           * Do the readdir.  This routine decides whether to use
3989 3988           * READDIR or READDIRPLUS.
3990 3989           */
3991 3990          error = do_nfs3readdir(vp, nrdc, cr);
3992 3991  
3993 3992          /*
3994 3993           * If this operation failed, just return the error which occurred.
3995 3994           */
3996 3995          if (error != 0)
3997 3996                  return (error);
3998 3997  
3999 3998          /*
4000 3999           * Since the RPC operation will have taken sometime and blocked
4001 4000           * this process, another pass through the cache will need to be
4002 4001           * taken to find the correct cache entry.  It is possible that
4003 4002           * the correct cache entry will not be there (although one was
4004 4003           * added) because the directory changed during the RPC operation
4005 4004           * and the readdir cache was flushed.  In this case, just start
4006 4005           * over.  It is hoped that this will not happen too often... :-)
4007 4006           */
4008 4007          nrdc = NULL;
4009 4008          goto top;
4010 4009          /* NOTREACHED */
4011 4010  }
4012 4011  
4013 4012  static int
4014 4013  do_nfs3readdir(vnode_t *vp, rddir_cache *rdc, cred_t *cr)
4015 4014  {
4016 4015          int error;
4017 4016          rnode_t *rp;
4018 4017          mntinfo_t *mi;
4019 4018  
4020 4019          rp = VTOR(vp);
4021 4020          mi = VTOMI(vp);
4022 4021          ASSERT(nfs_zone() == mi->mi_zone);
4023 4022          /*
4024 4023           * Issue the proper request.
4025 4024           *
4026 4025           * If the server does not support READDIRPLUS, then use READDIR.
4027 4026           *
4028 4027           * Otherwise --
4029 4028           * Issue a READDIRPLUS if reading to fill an empty cache or if
4030 4029           * an application has performed a lookup in the directory which
4031 4030           * required an over the wire lookup.  The use of READDIRPLUS
4032 4031           * will help to (re)populate the DNLC.
4033 4032           */
4034 4033          if (!(mi->mi_flags & MI_READDIRONLY) &&
4035 4034              (rp->r_flags & (RLOOKUP | RREADDIRPLUS))) {
4036 4035                  if (rp->r_flags & RREADDIRPLUS) {
4037 4036                          mutex_enter(&rp->r_statelock);
4038 4037                          rp->r_flags &= ~RREADDIRPLUS;
4039 4038                          mutex_exit(&rp->r_statelock);
4040 4039                  }
4041 4040                  nfs3readdirplus(vp, rdc, cr);
4042 4041                  if (rdc->error == EOPNOTSUPP)
4043 4042                          nfs3readdir(vp, rdc, cr);
4044 4043          } else
4045 4044                  nfs3readdir(vp, rdc, cr);
4046 4045  
4047 4046          mutex_enter(&rp->r_statelock);
4048 4047          rdc->flags &= ~RDDIR;
4049 4048          if (rdc->flags & RDDIRWAIT) {
4050 4049                  rdc->flags &= ~RDDIRWAIT;
4051 4050                  cv_broadcast(&rdc->cv);
4052 4051          }
4053 4052          error = rdc->error;
4054 4053          if (error)
4055 4054                  rdc->flags |= RDDIRREQ;
4056 4055          mutex_exit(&rp->r_statelock);
4057 4056  
4058 4057          rddir_cache_rele(rdc);
4059 4058  
4060 4059          return (error);
4061 4060  }
4062 4061  
4063 4062  static void
4064 4063  nfs3readdir(vnode_t *vp, rddir_cache *rdc, cred_t *cr)
4065 4064  {
4066 4065          int error;
4067 4066          READDIR3args args;
4068 4067          READDIR3vres res;
4069 4068          vattr_t dva;
4070 4069          rnode_t *rp;
4071 4070          int douprintf;
4072 4071          failinfo_t fi, *fip = NULL;
4073 4072          mntinfo_t *mi;
4074 4073          hrtime_t t;
4075 4074  
4076 4075          rp = VTOR(vp);
4077 4076          mi = VTOMI(vp);
4078 4077          ASSERT(nfs_zone() == mi->mi_zone);
4079 4078  
4080 4079          args.dir = *RTOFH3(rp);
4081 4080          args.cookie = (cookie3)rdc->nfs3_cookie;
4082 4081          args.cookieverf = rp->r_cookieverf;
4083 4082          args.count = rdc->buflen;
4084 4083  
4085 4084          /*
4086 4085           * NFS client failover support
4087 4086           * suppress failover unless we have a zero cookie
4088 4087           */
4089 4088          if (args.cookie == (cookie3) 0) {
4090 4089                  fi.vp = vp;
4091 4090                  fi.fhp = (caddr_t)&args.dir;
4092 4091                  fi.copyproc = nfs3copyfh;
4093 4092                  fi.lookupproc = nfs3lookup;
4094 4093                  fi.xattrdirproc = acl_getxattrdir3;
4095 4094                  fip = &fi;
4096 4095          }
4097 4096  
4098 4097  #ifdef DEBUG
4099 4098          rdc->entries = rddir_cache_buf_alloc(rdc->buflen, KM_SLEEP);
4100 4099  #else
4101 4100          rdc->entries = kmem_alloc(rdc->buflen, KM_SLEEP);
4102 4101  #endif
4103 4102  
4104 4103          res.entries = (dirent64_t *)rdc->entries;
4105 4104          res.entries_size = rdc->buflen;
4106 4105          res.dir_attributes.fres.vap = &dva;
4107 4106          res.dir_attributes.fres.vp = vp;
4108 4107          res.loff = rdc->nfs3_cookie;
4109 4108  
4110 4109          douprintf = 1;
4111 4110  
4112 4111          if (mi->mi_io_kstats) {
4113 4112                  mutex_enter(&mi->mi_lock);
4114 4113                  kstat_runq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
4115 4114                  mutex_exit(&mi->mi_lock);
4116 4115          }
4117 4116  
4118 4117          t = gethrtime();
4119 4118  
4120 4119          error = rfs3call(VTOMI(vp), NFSPROC3_READDIR,
4121 4120              xdr_READDIR3args, (caddr_t)&args,
4122 4121              xdr_READDIR3vres, (caddr_t)&res, cr,
4123 4122              &douprintf, &res.status, 0, fip);
4124 4123  
4125 4124          if (mi->mi_io_kstats) {
4126 4125                  mutex_enter(&mi->mi_lock);
4127 4126                  kstat_runq_exit(KSTAT_IO_PTR(mi->mi_io_kstats));
4128 4127                  mutex_exit(&mi->mi_lock);
4129 4128          }
4130 4129  
4131 4130          if (error)
4132 4131                  goto err;
4133 4132  
4134 4133          nfs3_cache_post_op_vattr(vp, &res.dir_attributes, t, cr);
4135 4134  
4136 4135          error = geterrno3(res.status);
4137 4136          if (error) {
4138 4137                  PURGE_STALE_FH(error, vp, cr);
4139 4138                  goto err;
4140 4139          }
4141 4140  
4142 4141          if (mi->mi_io_kstats) {
4143 4142                  mutex_enter(&mi->mi_lock);
4144 4143                  KSTAT_IO_PTR(mi->mi_io_kstats)->reads++;
4145 4144                  KSTAT_IO_PTR(mi->mi_io_kstats)->nread += res.size;
4146 4145                  mutex_exit(&mi->mi_lock);
4147 4146          }
4148 4147  
4149 4148          rdc->nfs3_ncookie = res.loff;
4150 4149          rp->r_cookieverf = res.cookieverf;
4151 4150          rdc->eof = res.eof ? 1 : 0;
4152 4151          rdc->entlen = res.size;
4153 4152          ASSERT(rdc->entlen <= rdc->buflen);
4154 4153          rdc->error = 0;
4155 4154          return;
4156 4155  
4157 4156  err:
4158 4157          kmem_free(rdc->entries, rdc->buflen);
4159 4158          rdc->entries = NULL;
4160 4159          rdc->error = error;
4161 4160  }
4162 4161  
4163 4162  /*
4164 4163   * Read directory entries.
4165 4164   * There are some weird things to look out for here.  The uio_loffset
4166 4165   * field is either 0 or it is the offset returned from a previous
4167 4166   * readdir.  It is an opaque value used by the server to find the
4168 4167   * correct directory block to read. The count field is the number
4169 4168   * of blocks to read on the server.  This is advisory only, the server
4170 4169   * may return only one block's worth of entries.  Entries may be compressed
4171 4170   * on the server.
4172 4171   */
4173 4172  static void
4174 4173  nfs3readdirplus(vnode_t *vp, rddir_cache *rdc, cred_t *cr)
4175 4174  {
4176 4175          int error;
4177 4176          READDIRPLUS3args args;
4178 4177          READDIRPLUS3vres res;
4179 4178          vattr_t dva;
4180 4179          rnode_t *rp;
4181 4180          mntinfo_t *mi;
4182 4181          int douprintf;
4183 4182          failinfo_t fi, *fip = NULL;
4184 4183  
4185 4184          rp = VTOR(vp);
4186 4185          mi = VTOMI(vp);
4187 4186          ASSERT(nfs_zone() == mi->mi_zone);
4188 4187  
4189 4188          args.dir = *RTOFH3(rp);
4190 4189          args.cookie = (cookie3)rdc->nfs3_cookie;
4191 4190          args.cookieverf = rp->r_cookieverf;
4192 4191          args.dircount = rdc->buflen;
4193 4192          args.maxcount = mi->mi_tsize;
4194 4193  
4195 4194          /*
4196 4195           * NFS client failover support
4197 4196           * suppress failover unless we have a zero cookie
4198 4197           */
4199 4198          if (args.cookie == (cookie3)0) {
4200 4199                  fi.vp = vp;
4201 4200                  fi.fhp = (caddr_t)&args.dir;
4202 4201                  fi.copyproc = nfs3copyfh;
4203 4202                  fi.lookupproc = nfs3lookup;
4204 4203                  fi.xattrdirproc = acl_getxattrdir3;
4205 4204                  fip = &fi;
4206 4205          }
4207 4206  
4208 4207  #ifdef DEBUG
4209 4208          rdc->entries = rddir_cache_buf_alloc(rdc->buflen, KM_SLEEP);
4210 4209  #else
4211 4210          rdc->entries = kmem_alloc(rdc->buflen, KM_SLEEP);
4212 4211  #endif
4213 4212  
4214 4213          res.entries = (dirent64_t *)rdc->entries;
4215 4214          res.entries_size = rdc->buflen;
4216 4215          res.dir_attributes.fres.vap = &dva;
4217 4216          res.dir_attributes.fres.vp = vp;
4218 4217          res.loff = rdc->nfs3_cookie;
4219 4218          res.credentials = cr;
4220 4219  
4221 4220          douprintf = 1;
4222 4221  
4223 4222          if (mi->mi_io_kstats) {
4224 4223                  mutex_enter(&mi->mi_lock);
4225 4224                  kstat_runq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
4226 4225                  mutex_exit(&mi->mi_lock);
4227 4226          }
4228 4227  
4229 4228          res.time = gethrtime();
4230 4229  
4231 4230          error = rfs3call(mi, NFSPROC3_READDIRPLUS,
4232 4231              xdr_READDIRPLUS3args, (caddr_t)&args,
4233 4232              xdr_READDIRPLUS3vres, (caddr_t)&res, cr,
4234 4233              &douprintf, &res.status, 0, fip);
4235 4234  
4236 4235          if (mi->mi_io_kstats) {
4237 4236                  mutex_enter(&mi->mi_lock);
4238 4237                  kstat_runq_exit(KSTAT_IO_PTR(mi->mi_io_kstats));
4239 4238                  mutex_exit(&mi->mi_lock);
4240 4239          }
4241 4240  
4242 4241          if (error) {
4243 4242                  goto err;
4244 4243          }
4245 4244  
4246 4245          nfs3_cache_post_op_vattr(vp, &res.dir_attributes, res.time, cr);
4247 4246  
4248 4247          error = geterrno3(res.status);
4249 4248          if (error) {
4250 4249                  PURGE_STALE_FH(error, vp, cr);
4251 4250                  if (error == EOPNOTSUPP) {
4252 4251                          mutex_enter(&mi->mi_lock);
4253 4252                          mi->mi_flags |= MI_READDIRONLY;
4254 4253                          mutex_exit(&mi->mi_lock);
4255 4254                  }
4256 4255                  goto err;
4257 4256          }
4258 4257  
4259 4258          if (mi->mi_io_kstats) {
4260 4259                  mutex_enter(&mi->mi_lock);
4261 4260                  KSTAT_IO_PTR(mi->mi_io_kstats)->reads++;
4262 4261                  KSTAT_IO_PTR(mi->mi_io_kstats)->nread += res.size;
4263 4262                  mutex_exit(&mi->mi_lock);
4264 4263          }
4265 4264  
4266 4265          rdc->nfs3_ncookie = res.loff;
4267 4266          rp->r_cookieverf = res.cookieverf;
4268 4267          rdc->eof = res.eof ? 1 : 0;
4269 4268          rdc->entlen = res.size;
4270 4269          ASSERT(rdc->entlen <= rdc->buflen);
4271 4270          rdc->error = 0;
4272 4271  
4273 4272          return;
4274 4273  
4275 4274  err:
4276 4275          kmem_free(rdc->entries, rdc->buflen);
4277 4276          rdc->entries = NULL;
4278 4277          rdc->error = error;
4279 4278  }
4280 4279  
4281 4280  #ifdef DEBUG
4282 4281  static int nfs3_bio_do_stop = 0;
4283 4282  #endif
4284 4283  
4285 4284  static int
4286 4285  nfs3_bio(struct buf *bp, stable_how *stab_comm, cred_t *cr)
4287 4286  {
4288 4287          rnode_t *rp = VTOR(bp->b_vp);
4289 4288          int count;
4290 4289          int error;
4291 4290          cred_t *cred;
4292 4291          offset_t offset;
4293 4292  
4294 4293          ASSERT(nfs_zone() == VTOMI(bp->b_vp)->mi_zone);
4295 4294          offset = ldbtob(bp->b_lblkno);
4296 4295  
4297 4296          DTRACE_IO1(start, struct buf *, bp);
4298 4297  
4299 4298          if (bp->b_flags & B_READ) {
4300 4299                  mutex_enter(&rp->r_statelock);
4301 4300                  if (rp->r_cred != NULL) {
4302 4301                          cred = rp->r_cred;
4303 4302                          crhold(cred);
4304 4303                  } else {
4305 4304                          rp->r_cred = cr;
4306 4305                          crhold(cr);
4307 4306                          cred = cr;
4308 4307                          crhold(cred);
4309 4308                  }
4310 4309                  mutex_exit(&rp->r_statelock);
4311 4310          read_again:
4312 4311                  error = bp->b_error = nfs3read(bp->b_vp, bp->b_un.b_addr,
4313 4312                      offset, bp->b_bcount, &bp->b_resid, cred);
4314 4313                  crfree(cred);
4315 4314                  if (!error) {
4316 4315                          if (bp->b_resid) {
4317 4316                                  /*
4318 4317                                   * Didn't get it all because we hit EOF,
4319 4318                                   * zero all the memory beyond the EOF.
4320 4319                                   */
4321 4320                                  /* bzero(rdaddr + */
4322 4321                                  bzero(bp->b_un.b_addr +
4323 4322                                      bp->b_bcount - bp->b_resid, bp->b_resid);
4324 4323                          }
4325 4324                          mutex_enter(&rp->r_statelock);
4326 4325                          if (bp->b_resid == bp->b_bcount &&
4327 4326                              offset >= rp->r_size) {
4328 4327                                  /*
4329 4328                                   * We didn't read anything at all as we are
4330 4329                                   * past EOF.  Return an error indicator back
4331 4330                                   * but don't destroy the pages (yet).
4332 4331                                   */
4333 4332                                  error = NFS_EOF;
4334 4333                          }
4335 4334                          mutex_exit(&rp->r_statelock);
4336 4335                  } else if (error == EACCES) {
4337 4336                          mutex_enter(&rp->r_statelock);
4338 4337                          if (cred != cr) {
4339 4338                                  if (rp->r_cred != NULL)
4340 4339                                          crfree(rp->r_cred);
4341 4340                                  rp->r_cred = cr;
4342 4341                                  crhold(cr);
4343 4342                                  cred = cr;
4344 4343                                  crhold(cred);
4345 4344                                  mutex_exit(&rp->r_statelock);
4346 4345                                  goto read_again;
4347 4346                          }
4348 4347                          mutex_exit(&rp->r_statelock);
4349 4348                  }
4350 4349          } else {
4351 4350                  if (!(rp->r_flags & RSTALE)) {
4352 4351                          mutex_enter(&rp->r_statelock);
4353 4352                          if (rp->r_cred != NULL) {
4354 4353                                  cred = rp->r_cred;
4355 4354                                  crhold(cred);
4356 4355                          } else {
4357 4356                                  rp->r_cred = cr;
4358 4357                                  crhold(cr);
4359 4358                                  cred = cr;
4360 4359                                  crhold(cred);
4361 4360                          }
4362 4361                          mutex_exit(&rp->r_statelock);
4363 4362                  write_again:
4364 4363                          mutex_enter(&rp->r_statelock);
4365 4364                          count = MIN(bp->b_bcount, rp->r_size - offset);
4366 4365                          mutex_exit(&rp->r_statelock);
4367 4366                          if (count < 0)
4368 4367                                  cmn_err(CE_PANIC, "nfs3_bio: write count < 0");
4369 4368  #ifdef DEBUG
4370 4369                          if (count == 0) {
4371 4370                                  zcmn_err(getzoneid(), CE_WARN,
4372 4371                                      "nfs3_bio: zero length write at %lld",
4373 4372                                      offset);
4374 4373                                  nfs_printfhandle(&rp->r_fh);
4375 4374                                  if (nfs3_bio_do_stop)
4376 4375                                          debug_enter("nfs3_bio");
4377 4376                          }
4378 4377  #endif
4379 4378                          error = nfs3write(bp->b_vp, bp->b_un.b_addr, offset,
4380 4379                              count, cred, stab_comm);
4381 4380                          if (error == EACCES) {
4382 4381                                  mutex_enter(&rp->r_statelock);
4383 4382                                  if (cred != cr) {
4384 4383                                          if (rp->r_cred != NULL)
4385 4384                                                  crfree(rp->r_cred);
4386 4385                                          rp->r_cred = cr;
4387 4386                                          crhold(cr);
4388 4387                                          crfree(cred);
4389 4388                                          cred = cr;
4390 4389                                          crhold(cred);
4391 4390                                          mutex_exit(&rp->r_statelock);
4392 4391                                          goto write_again;
4393 4392                                  }
4394 4393                                  mutex_exit(&rp->r_statelock);
4395 4394                          }
4396 4395                          bp->b_error = error;
4397 4396                          if (error && error != EINTR) {
4398 4397                                  /*
4399 4398                                   * Don't print EDQUOT errors on the console.
4400 4399                                   * Don't print asynchronous EACCES errors.
4401 4400                                   * Don't print EFBIG errors.
4402 4401                                   * Print all other write errors.
4403 4402                                   */
4404 4403                                  if (error != EDQUOT && error != EFBIG &&
4405 4404                                      (error != EACCES ||
4406 4405                                      !(bp->b_flags & B_ASYNC)))
4407 4406                                          nfs_write_error(bp->b_vp, error, cred);
4408 4407                                  /*
4409 4408                                   * Update r_error and r_flags as appropriate.
4410 4409                                   * If the error was ESTALE, then mark the
4411 4410                                   * rnode as not being writeable and save
4412 4411                                   * the error status.  Otherwise, save any
4413 4412                                   * errors which occur from asynchronous
4414 4413                                   * page invalidations.  Any errors occurring
4415 4414                                   * from other operations should be saved
4416 4415                                   * by the caller.
4417 4416                                   */
4418 4417                                  mutex_enter(&rp->r_statelock);
4419 4418                                  if (error == ESTALE) {
4420 4419                                          rp->r_flags |= RSTALE;
4421 4420                                          if (!rp->r_error)
4422 4421                                                  rp->r_error = error;
4423 4422                                  } else if (!rp->r_error &&
4424 4423                                      (bp->b_flags &
4425 4424                                      (B_INVAL|B_FORCE|B_ASYNC)) ==
4426 4425                                      (B_INVAL|B_FORCE|B_ASYNC)) {
4427 4426                                          rp->r_error = error;
4428 4427                                  }
4429 4428                                  mutex_exit(&rp->r_statelock);
4430 4429                          }
4431 4430                          crfree(cred);
4432 4431                  } else {
4433 4432                          error = rp->r_error;
4434 4433                          /*
4435 4434                           * A close may have cleared r_error, if so,
4436 4435                           * propagate ESTALE error return properly
4437 4436                           */
4438 4437                          if (error == 0)
4439 4438                                  error = ESTALE;
4440 4439                  }
4441 4440          }
4442 4441  
4443 4442          if (error != 0 && error != NFS_EOF)
4444 4443                  bp->b_flags |= B_ERROR;
4445 4444  
4446 4445          DTRACE_IO1(done, struct buf *, bp);
4447 4446  
4448 4447          return (error);
4449 4448  }
4450 4449  
4451 4450  /* ARGSUSED */
4452 4451  static int
4453 4452  nfs3_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
4454 4453  {
4455 4454          rnode_t *rp;
4456 4455  
4457 4456          if (nfs_zone() != VTOMI(vp)->mi_zone)
4458 4457                  return (EIO);
4459 4458          rp = VTOR(vp);
4460 4459  
4461 4460          if (fidp->fid_len < (ushort_t)rp->r_fh.fh_len) {
4462 4461                  fidp->fid_len = rp->r_fh.fh_len;
4463 4462                  return (ENOSPC);
4464 4463          }
4465 4464          fidp->fid_len = rp->r_fh.fh_len;
4466 4465          bcopy(rp->r_fh.fh_buf, fidp->fid_data, fidp->fid_len);
4467 4466          return (0);
4468 4467  }
4469 4468  
4470 4469  /* ARGSUSED2 */
4471 4470  static int
4472 4471  nfs3_rwlock(vnode_t *vp, int write_lock, caller_context_t *ctp)
4473 4472  {
4474 4473          rnode_t *rp = VTOR(vp);
4475 4474  
4476 4475          if (!write_lock) {
4477 4476                  (void) nfs_rw_enter_sig(&rp->r_rwlock, RW_READER, FALSE);
4478 4477                  return (V_WRITELOCK_FALSE);
4479 4478          }
4480 4479  
4481 4480          if ((rp->r_flags & RDIRECTIO) || (VTOMI(vp)->mi_flags & MI_DIRECTIO)) {
4482 4481                  (void) nfs_rw_enter_sig(&rp->r_rwlock, RW_READER, FALSE);
4483 4482                  if (rp->r_mapcnt == 0 && !vn_has_cached_data(vp))
4484 4483                          return (V_WRITELOCK_FALSE);
4485 4484                  nfs_rw_exit(&rp->r_rwlock);
4486 4485          }
4487 4486  
4488 4487          (void) nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, FALSE);
4489 4488          return (V_WRITELOCK_TRUE);
4490 4489  }
4491 4490  
4492 4491  /* ARGSUSED */
4493 4492  static void
4494 4493  nfs3_rwunlock(vnode_t *vp, int write_lock, caller_context_t *ctp)
4495 4494  {
4496 4495          rnode_t *rp = VTOR(vp);
4497 4496  
4498 4497          nfs_rw_exit(&rp->r_rwlock);
4499 4498  }
4500 4499  
4501 4500  /* ARGSUSED */
4502 4501  static int
4503 4502  nfs3_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, caller_context_t *ct)
4504 4503  {
4505 4504  
4506 4505          /*
4507 4506           * Because we stuff the readdir cookie into the offset field
4508 4507           * someone may attempt to do an lseek with the cookie which
4509 4508           * we want to succeed.
4510 4509           */
4511 4510          if (vp->v_type == VDIR)
4512 4511                  return (0);
4513 4512          if (*noffp < 0)
4514 4513                  return (EINVAL);
4515 4514          return (0);
4516 4515  }
4517 4516  
4518 4517  /*
4519 4518   * number of nfs3_bsize blocks to read ahead.
4520 4519   */
4521 4520  static int nfs3_nra = 4;
4522 4521  
4523 4522  #ifdef DEBUG
4524 4523  static int nfs3_lostpage = 0;   /* number of times we lost original page */
4525 4524  #endif
4526 4525  
4527 4526  /*
4528 4527   * Return all the pages from [off..off+len) in file
4529 4528   */
4530 4529  /* ARGSUSED */
4531 4530  static int
4532 4531  nfs3_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
4533 4532          page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
4534 4533          enum seg_rw rw, cred_t *cr, caller_context_t *ct)
4535 4534  {
4536 4535          rnode_t *rp;
4537 4536          int error;
4538 4537          mntinfo_t *mi;
4539 4538  
4540 4539          if (vp->v_flag & VNOMAP)
4541 4540                  return (ENOSYS);
4542 4541  
4543 4542          if (nfs_zone() != VTOMI(vp)->mi_zone)
4544 4543                  return (EIO);
4545 4544          if (protp != NULL)
4546 4545                  *protp = PROT_ALL;
4547 4546  
4548 4547          /*
4549 4548           * Now valididate that the caches are up to date.
4550 4549           */
4551 4550          error = nfs3_validate_caches(vp, cr);
4552 4551          if (error)
4553 4552                  return (error);
4554 4553  
4555 4554          rp = VTOR(vp);
4556 4555          mi = VTOMI(vp);
4557 4556  retry:
4558 4557          mutex_enter(&rp->r_statelock);
4559 4558  
4560 4559          /*
4561 4560           * Don't create dirty pages faster than they
4562 4561           * can be cleaned so that the system doesn't
4563 4562           * get imbalanced.  If the async queue is
4564 4563           * maxed out, then wait for it to drain before
4565 4564           * creating more dirty pages.  Also, wait for
4566 4565           * any threads doing pagewalks in the vop_getattr
4567 4566           * entry points so that they don't block for
4568 4567           * long periods.
4569 4568           */
4570 4569          if (rw == S_CREATE) {
4571 4570                  while ((mi->mi_max_threads != 0 &&
4572 4571                      rp->r_awcount > 2 * mi->mi_max_threads) ||
4573 4572                      rp->r_gcount > 0)
4574 4573                          cv_wait(&rp->r_cv, &rp->r_statelock);
4575 4574          }
4576 4575  
4577 4576          /*
4578 4577           * If we are getting called as a side effect of an nfs_write()
4579 4578           * operation the local file size might not be extended yet.
4580 4579           * In this case we want to be able to return pages of zeroes.
4581 4580           */
4582 4581          if (off + len > rp->r_size + PAGEOFFSET && seg != segkmap) {
4583 4582                  mutex_exit(&rp->r_statelock);
4584 4583                  return (EFAULT);                /* beyond EOF */
4585 4584          }
4586 4585  
4587 4586          mutex_exit(&rp->r_statelock);
4588 4587  
4589 4588          error = pvn_getpages(nfs3_getapage, vp, off, len, protp,
4590 4589              pl, plsz, seg, addr, rw, cr);
4591 4590  
4592 4591          switch (error) {
4593 4592          case NFS_EOF:
4594 4593                  nfs_purge_caches(vp, NFS_NOPURGE_DNLC, cr);
4595 4594                  goto retry;
4596 4595          case ESTALE:
4597 4596                  PURGE_STALE_FH(error, vp, cr);
4598 4597          }
4599 4598  
4600 4599          return (error);
4601 4600  }
4602 4601  
4603 4602  /*
4604 4603   * Called from pvn_getpages to get a particular page.
4605 4604   */
4606 4605  /* ARGSUSED */
4607 4606  static int
4608 4607  nfs3_getapage(vnode_t *vp, u_offset_t off, size_t len, uint_t *protp,
4609 4608          page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
4610 4609          enum seg_rw rw, cred_t *cr)
4611 4610  {
4612 4611          rnode_t *rp;
4613 4612          uint_t bsize;
4614 4613          struct buf *bp;
4615 4614          page_t *pp;
4616 4615          u_offset_t lbn;
4617 4616          u_offset_t io_off;
4618 4617          u_offset_t blkoff;
4619 4618          u_offset_t rablkoff;
4620 4619          size_t io_len;
4621 4620          uint_t blksize;
4622 4621          int error;
4623 4622          int readahead;
4624 4623          int readahead_issued = 0;
4625 4624          int ra_window; /* readahead window */
4626 4625          page_t *pagefound;
4627 4626          page_t *savepp;
4628 4627  
4629 4628          if (nfs_zone() != VTOMI(vp)->mi_zone)
4630 4629                  return (EIO);
4631 4630          rp = VTOR(vp);
4632 4631          bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
4633 4632  
4634 4633  reread:
4635 4634          bp = NULL;
4636 4635          pp = NULL;
4637 4636          pagefound = NULL;
4638 4637  
4639 4638          if (pl != NULL)
4640 4639                  pl[0] = NULL;
4641 4640  
4642 4641          error = 0;
4643 4642          lbn = off / bsize;
4644 4643          blkoff = lbn * bsize;
4645 4644  
4646 4645          /*
4647 4646           * Queueing up the readahead before doing the synchronous read
4648 4647           * results in a significant increase in read throughput because
4649 4648           * of the increased parallelism between the async threads and
4650 4649           * the process context.
4651 4650           */
4652 4651          if ((off & ((vp->v_vfsp->vfs_bsize) - 1)) == 0 &&
4653 4652              rw != S_CREATE &&
4654 4653              !(vp->v_flag & VNOCACHE)) {
4655 4654                  mutex_enter(&rp->r_statelock);
4656 4655  
4657 4656                  /*
4658 4657                   * Calculate the number of readaheads to do.
4659 4658                   * a) No readaheads at offset = 0.
4660 4659                   * b) Do maximum(nfs3_nra) readaheads when the readahead
4661 4660                   *    window is closed.
4662 4661                   * c) Do readaheads between 1 to (nfs3_nra - 1) depending
4663 4662                   *    upon how far the readahead window is open or close.
4664 4663                   * d) No readaheads if rp->r_nextr is not within the scope
4665 4664                   *    of the readahead window (random i/o).
4666 4665                   */
4667 4666  
4668 4667                  if (off == 0)
4669 4668                          readahead = 0;
4670 4669                  else if (blkoff == rp->r_nextr)
4671 4670                          readahead = nfs3_nra;
4672 4671                  else if (rp->r_nextr > blkoff &&
4673 4672                      ((ra_window = (rp->r_nextr - blkoff) / bsize)
4674 4673                      <= (nfs3_nra - 1)))
4675 4674                          readahead = nfs3_nra - ra_window;
4676 4675                  else
4677 4676                          readahead = 0;
4678 4677  
4679 4678                  rablkoff = rp->r_nextr;
4680 4679                  while (readahead > 0 && rablkoff + bsize < rp->r_size) {
4681 4680                          mutex_exit(&rp->r_statelock);
4682 4681                          if (nfs_async_readahead(vp, rablkoff + bsize,
4683 4682                              addr + (rablkoff + bsize - off), seg, cr,
4684 4683                              nfs3_readahead) < 0) {
4685 4684                                  mutex_enter(&rp->r_statelock);
4686 4685                                  break;
4687 4686                          }
4688 4687                          readahead--;
4689 4688                          rablkoff += bsize;
4690 4689                          /*
4691 4690                           * Indicate that we did a readahead so
4692 4691                           * readahead offset is not updated
4693 4692                           * by the synchronous read below.
4694 4693                           */
4695 4694                          readahead_issued = 1;
4696 4695                          mutex_enter(&rp->r_statelock);
4697 4696                          /*
4698 4697                           * set readahead offset to
4699 4698                           * offset of last async readahead
4700 4699                           * request.
4701 4700                           */
4702 4701                          rp->r_nextr = rablkoff;
4703 4702                  }
4704 4703                  mutex_exit(&rp->r_statelock);
4705 4704          }
4706 4705  
4707 4706  again:
4708 4707          if ((pagefound = page_exists(vp, off)) == NULL) {
4709 4708                  if (pl == NULL) {
4710 4709                          (void) nfs_async_readahead(vp, blkoff, addr, seg, cr,
4711 4710                              nfs3_readahead);
4712 4711                  } else if (rw == S_CREATE) {
4713 4712                          /*
4714 4713                           * Block for this page is not allocated, or the offset
4715 4714                           * is beyond the current allocation size, or we're
4716 4715                           * allocating a swap slot and the page was not found,
4717 4716                           * so allocate it and return a zero page.
4718 4717                           */
4719 4718                          if ((pp = page_create_va(vp, off,
4720 4719                              PAGESIZE, PG_WAIT, seg, addr)) == NULL)
4721 4720                                  cmn_err(CE_PANIC, "nfs3_getapage: page_create");
4722 4721                          io_len = PAGESIZE;
4723 4722                          mutex_enter(&rp->r_statelock);
4724 4723                          rp->r_nextr = off + PAGESIZE;
4725 4724                          mutex_exit(&rp->r_statelock);
4726 4725                  } else {
4727 4726                          /*
4728 4727                           * Need to go to server to get a BLOCK, exception to
4729 4728                           * that being while reading at offset = 0 or doing
4730 4729                           * random i/o, in that case read only a PAGE.
4731 4730                           */
4732 4731                          mutex_enter(&rp->r_statelock);
4733 4732                          if (blkoff < rp->r_size &&
4734 4733                              blkoff + bsize >= rp->r_size) {
4735 4734                                  /*
4736 4735                                   * If only a block or less is left in
4737 4736                                   * the file, read all that is remaining.
4738 4737                                   */
4739 4738                                  if (rp->r_size <= off) {
4740 4739                                          /*
4741 4740                                           * Trying to access beyond EOF,
4742 4741                                           * set up to get at least one page.
4743 4742                                           */
4744 4743                                          blksize = off + PAGESIZE - blkoff;
4745 4744                                  } else
4746 4745                                          blksize = rp->r_size - blkoff;
4747 4746                          } else if ((off == 0) ||
4748 4747                              (off != rp->r_nextr && !readahead_issued)) {
4749 4748                                  blksize = PAGESIZE;
4750 4749                                  blkoff = off; /* block = page here */
4751 4750                          } else
4752 4751                                  blksize = bsize;
4753 4752                          mutex_exit(&rp->r_statelock);
4754 4753  
4755 4754                          pp = pvn_read_kluster(vp, off, seg, addr, &io_off,
4756 4755                              &io_len, blkoff, blksize, 0);
4757 4756  
4758 4757                          /*
4759 4758                           * Some other thread has entered the page,
4760 4759                           * so just use it.
4761 4760                           */
4762 4761                          if (pp == NULL)
4763 4762                                  goto again;
4764 4763  
4765 4764                          /*
4766 4765                           * Now round the request size up to page boundaries.
4767 4766                           * This ensures that the entire page will be
4768 4767                           * initialized to zeroes if EOF is encountered.
4769 4768                           */
4770 4769                          io_len = ptob(btopr(io_len));
4771 4770  
4772 4771                          bp = pageio_setup(pp, io_len, vp, B_READ);
4773 4772                          ASSERT(bp != NULL);
4774 4773  
4775 4774                          /*
4776 4775                           * pageio_setup should have set b_addr to 0.  This
4777 4776                           * is correct since we want to do I/O on a page
4778 4777                           * boundary.  bp_mapin will use this addr to calculate
4779 4778                           * an offset, and then set b_addr to the kernel virtual
4780 4779                           * address it allocated for us.
4781 4780                           */
4782 4781                          ASSERT(bp->b_un.b_addr == 0);
4783 4782  
4784 4783                          bp->b_edev = 0;
4785 4784                          bp->b_dev = 0;
4786 4785                          bp->b_lblkno = lbtodb(io_off);
4787 4786                          bp->b_file = vp;
4788 4787                          bp->b_offset = (offset_t)off;
4789 4788                          bp_mapin(bp);
4790 4789  
4791 4790                          /*
4792 4791                           * If doing a write beyond what we believe is EOF,
4793 4792                           * don't bother trying to read the pages from the
4794 4793                           * server, we'll just zero the pages here.  We
4795 4794                           * don't check that the rw flag is S_WRITE here
4796 4795                           * because some implementations may attempt a
4797 4796                           * read access to the buffer before copying data.
4798 4797                           */
4799 4798                          mutex_enter(&rp->r_statelock);
4800 4799                          if (io_off >= rp->r_size && seg == segkmap) {
4801 4800                                  mutex_exit(&rp->r_statelock);
4802 4801                                  bzero(bp->b_un.b_addr, io_len);
4803 4802                          } else {
4804 4803                                  mutex_exit(&rp->r_statelock);
4805 4804                                  error = nfs3_bio(bp, NULL, cr);
4806 4805                          }
4807 4806  
4808 4807                          /*
4809 4808                           * Unmap the buffer before freeing it.
4810 4809                           */
4811 4810                          bp_mapout(bp);
4812 4811                          pageio_done(bp);
4813 4812  
4814 4813                          savepp = pp;
4815 4814                          do {
4816 4815                                  pp->p_fsdata = C_NOCOMMIT;
4817 4816                          } while ((pp = pp->p_next) != savepp);
4818 4817  
4819 4818                          if (error == NFS_EOF) {
4820 4819                                  /*
4821 4820                                   * If doing a write system call just return
4822 4821                                   * zeroed pages, else user tried to get pages
4823 4822                                   * beyond EOF, return error.  We don't check
4824 4823                                   * that the rw flag is S_WRITE here because
4825 4824                                   * some implementations may attempt a read
4826 4825                                   * access to the buffer before copying data.
4827 4826                                   */
4828 4827                                  if (seg == segkmap)
4829 4828                                          error = 0;
4830 4829                                  else
4831 4830                                          error = EFAULT;
4832 4831                          }
4833 4832  
4834 4833                          if (!readahead_issued && !error) {
4835 4834                                  mutex_enter(&rp->r_statelock);
4836 4835                                  rp->r_nextr = io_off + io_len;
4837 4836                                  mutex_exit(&rp->r_statelock);
4838 4837                          }
4839 4838                  }
4840 4839          }
4841 4840  
4842 4841  out:
4843 4842          if (pl == NULL)
4844 4843                  return (error);
4845 4844  
4846 4845          if (error) {
4847 4846                  if (pp != NULL)
4848 4847                          pvn_read_done(pp, B_ERROR);
4849 4848                  return (error);
4850 4849          }
4851 4850  
4852 4851          if (pagefound) {
4853 4852                  se_t se = (rw == S_CREATE ? SE_EXCL : SE_SHARED);
4854 4853  
4855 4854                  /*
4856 4855                   * Page exists in the cache, acquire the appropriate lock.
4857 4856                   * If this fails, start all over again.
4858 4857                   */
4859 4858                  if ((pp = page_lookup(vp, off, se)) == NULL) {
4860 4859  #ifdef DEBUG
4861 4860                          nfs3_lostpage++;
4862 4861  #endif
4863 4862                          goto reread;
4864 4863                  }
4865 4864                  pl[0] = pp;
4866 4865                  pl[1] = NULL;
4867 4866                  return (0);
4868 4867          }
4869 4868  
4870 4869          if (pp != NULL)
4871 4870                  pvn_plist_init(pp, pl, plsz, off, io_len, rw);
4872 4871  
4873 4872          return (error);
4874 4873  }
4875 4874  
4876 4875  static void
4877 4876  nfs3_readahead(vnode_t *vp, u_offset_t blkoff, caddr_t addr, struct seg *seg,
4878 4877          cred_t *cr)
4879 4878  {
4880 4879          int error;
4881 4880          page_t *pp;
4882 4881          u_offset_t io_off;
4883 4882          size_t io_len;
4884 4883          struct buf *bp;
4885 4884          uint_t bsize, blksize;
4886 4885          rnode_t *rp = VTOR(vp);
4887 4886          page_t *savepp;
4888 4887  
4889 4888          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
4890 4889          bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
4891 4890  
4892 4891          mutex_enter(&rp->r_statelock);
4893 4892          if (blkoff < rp->r_size && blkoff + bsize > rp->r_size) {
4894 4893                  /*
4895 4894                   * If less than a block left in file read less
4896 4895                   * than a block.
4897 4896                   */
4898 4897                  blksize = rp->r_size - blkoff;
4899 4898          } else
4900 4899                  blksize = bsize;
4901 4900          mutex_exit(&rp->r_statelock);
4902 4901  
4903 4902          pp = pvn_read_kluster(vp, blkoff, segkmap, addr,
4904 4903              &io_off, &io_len, blkoff, blksize, 1);
4905 4904          /*
4906 4905           * The isra flag passed to the kluster function is 1, we may have
4907 4906           * gotten a return value of NULL for a variety of reasons (# of free
4908 4907           * pages < minfree, someone entered the page on the vnode etc). In all
4909 4908           * cases, we want to punt on the readahead.
4910 4909           */
4911 4910          if (pp == NULL)
4912 4911                  return;
4913 4912  
4914 4913          /*
4915 4914           * Now round the request size up to page boundaries.
4916 4915           * This ensures that the entire page will be
4917 4916           * initialized to zeroes if EOF is encountered.
4918 4917           */
4919 4918          io_len = ptob(btopr(io_len));
4920 4919  
4921 4920          bp = pageio_setup(pp, io_len, vp, B_READ);
4922 4921          ASSERT(bp != NULL);
4923 4922  
4924 4923          /*
4925 4924           * pageio_setup should have set b_addr to 0.  This is correct since
4926 4925           * we want to do I/O on a page boundary. bp_mapin() will use this addr
4927 4926           * to calculate an offset, and then set b_addr to the kernel virtual
4928 4927           * address it allocated for us.
4929 4928           */
4930 4929          ASSERT(bp->b_un.b_addr == 0);
4931 4930  
4932 4931          bp->b_edev = 0;
4933 4932          bp->b_dev = 0;
4934 4933          bp->b_lblkno = lbtodb(io_off);
4935 4934          bp->b_file = vp;
4936 4935          bp->b_offset = (offset_t)blkoff;
4937 4936          bp_mapin(bp);
4938 4937  
4939 4938          /*
4940 4939           * If doing a write beyond what we believe is EOF, don't bother trying
4941 4940           * to read the pages from the server, we'll just zero the pages here.
4942 4941           * We don't check that the rw flag is S_WRITE here because some
4943 4942           * implementations may attempt a read access to the buffer before
4944 4943           * copying data.
4945 4944           */
4946 4945          mutex_enter(&rp->r_statelock);
4947 4946          if (io_off >= rp->r_size && seg == segkmap) {
4948 4947                  mutex_exit(&rp->r_statelock);
4949 4948                  bzero(bp->b_un.b_addr, io_len);
4950 4949                  error = 0;
4951 4950          } else {
4952 4951                  mutex_exit(&rp->r_statelock);
4953 4952                  error = nfs3_bio(bp, NULL, cr);
4954 4953                  if (error == NFS_EOF)
4955 4954                          error = 0;
4956 4955          }
4957 4956  
4958 4957          /*
4959 4958           * Unmap the buffer before freeing it.
4960 4959           */
4961 4960          bp_mapout(bp);
4962 4961          pageio_done(bp);
4963 4962  
4964 4963          savepp = pp;
4965 4964          do {
4966 4965                  pp->p_fsdata = C_NOCOMMIT;
4967 4966          } while ((pp = pp->p_next) != savepp);
4968 4967  
4969 4968          pvn_read_done(pp, error ? B_READ | B_ERROR : B_READ);
4970 4969  
4971 4970          /*
4972 4971           * In case of error set readahead offset
4973 4972           * to the lowest offset.
4974 4973           * pvn_read_done() calls VN_DISPOSE to destroy the pages
4975 4974           */
4976 4975          if (error && rp->r_nextr > io_off) {
4977 4976                  mutex_enter(&rp->r_statelock);
4978 4977                  if (rp->r_nextr > io_off)
4979 4978                          rp->r_nextr = io_off;
4980 4979                  mutex_exit(&rp->r_statelock);
4981 4980          }
4982 4981  }
4983 4982  
4984 4983  /*
4985 4984   * Flags are composed of {B_INVAL, B_FREE, B_DONTNEED, B_FORCE}
4986 4985   * If len == 0, do from off to EOF.
4987 4986   *
4988 4987   * The normal cases should be len == 0 && off == 0 (entire vp list),
4989 4988   * len == MAXBSIZE (from segmap_release actions), and len == PAGESIZE
4990 4989   * (from pageout).
4991 4990   */
4992 4991  /* ARGSUSED */
4993 4992  static int
4994 4993  nfs3_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr,
4995 4994          caller_context_t *ct)
4996 4995  {
4997 4996          int error;
4998 4997          rnode_t *rp;
4999 4998  
5000 4999          ASSERT(cr != NULL);
5001 5000  
5002 5001          /*
5003 5002           * XXX - Why should this check be made here?
5004 5003           */
5005 5004          if (vp->v_flag & VNOMAP)
5006 5005                  return (ENOSYS);
5007 5006          if (len == 0 && !(flags & B_INVAL) && vn_is_readonly(vp))
5008 5007                  return (0);
5009 5008          if (!(flags & B_ASYNC) && nfs_zone() != VTOMI(vp)->mi_zone)
5010 5009                  return (EIO);
5011 5010  
5012 5011          rp = VTOR(vp);
5013 5012          mutex_enter(&rp->r_statelock);
5014 5013          rp->r_count++;
5015 5014          mutex_exit(&rp->r_statelock);
5016 5015          error = nfs_putpages(vp, off, len, flags, cr);
5017 5016          mutex_enter(&rp->r_statelock);
5018 5017          rp->r_count--;
5019 5018          cv_broadcast(&rp->r_cv);
5020 5019          mutex_exit(&rp->r_statelock);
5021 5020  
5022 5021          return (error);
5023 5022  }
5024 5023  
5025 5024  /*
5026 5025   * Write out a single page, possibly klustering adjacent dirty pages.
5027 5026   */
5028 5027  int
5029 5028  nfs3_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
5030 5029          int flags, cred_t *cr)
5031 5030  {
5032 5031          u_offset_t io_off;
5033 5032          u_offset_t lbn_off;
5034 5033          u_offset_t lbn;
5035 5034          size_t io_len;
5036 5035          uint_t bsize;
5037 5036          int error;
5038 5037          rnode_t *rp;
5039 5038  
5040 5039          ASSERT(!vn_is_readonly(vp));
5041 5040          ASSERT(pp != NULL);
5042 5041          ASSERT(cr != NULL);
5043 5042          ASSERT((flags & B_ASYNC) || nfs_zone() == VTOMI(vp)->mi_zone);
5044 5043  
5045 5044          rp = VTOR(vp);
5046 5045          ASSERT(rp->r_count > 0);
5047 5046  
5048 5047          bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
5049 5048          lbn = pp->p_offset / bsize;
5050 5049          lbn_off = lbn * bsize;
5051 5050  
5052 5051          /*
5053 5052           * Find a kluster that fits in one block, or in
5054 5053           * one page if pages are bigger than blocks.  If
5055 5054           * there is less file space allocated than a whole
5056 5055           * page, we'll shorten the i/o request below.
5057 5056           */
5058 5057          pp = pvn_write_kluster(vp, pp, &io_off, &io_len, lbn_off,
5059 5058              roundup(bsize, PAGESIZE), flags);
5060 5059  
5061 5060          /*
5062 5061           * pvn_write_kluster shouldn't have returned a page with offset
5063 5062           * behind the original page we were given.  Verify that.
5064 5063           */
5065 5064          ASSERT((pp->p_offset / bsize) >= lbn);
5066 5065  
5067 5066          /*
5068 5067           * Now pp will have the list of kept dirty pages marked for
5069 5068           * write back.  It will also handle invalidation and freeing
5070 5069           * of pages that are not dirty.  Check for page length rounding
5071 5070           * problems.
5072 5071           */
5073 5072          if (io_off + io_len > lbn_off + bsize) {
5074 5073                  ASSERT((io_off + io_len) - (lbn_off + bsize) < PAGESIZE);
5075 5074                  io_len = lbn_off + bsize - io_off;
5076 5075          }
5077 5076          /*
5078 5077           * The RMODINPROGRESS flag makes sure that nfs(3)_bio() sees a
5079 5078           * consistent value of r_size. RMODINPROGRESS is set in writerp().
5080 5079           * When RMODINPROGRESS is set it indicates that a uiomove() is in
5081 5080           * progress and the r_size has not been made consistent with the
5082 5081           * new size of the file. When the uiomove() completes the r_size is
5083 5082           * updated and the RMODINPROGRESS flag is cleared.
5084 5083           *
5085 5084           * The RMODINPROGRESS flag makes sure that nfs(3)_bio() sees a
5086 5085           * consistent value of r_size. Without this handshaking, it is
5087 5086           * possible that nfs(3)_bio() picks  up the old value of r_size
5088 5087           * before the uiomove() in writerp() completes. This will result
5089 5088           * in the write through nfs(3)_bio() being dropped.
5090 5089           *
5091 5090           * More precisely, there is a window between the time the uiomove()
5092 5091           * completes and the time the r_size is updated. If a VOP_PUTPAGE()
5093 5092           * operation intervenes in this window, the page will be picked up,
5094 5093           * because it is dirty (it will be unlocked, unless it was
5095 5094           * pagecreate'd). When the page is picked up as dirty, the dirty
5096 5095           * bit is reset (pvn_getdirty()). In nfs(3)write(), r_size is
5097 5096           * checked. This will still be the old size. Therefore the page will
5098 5097           * not be written out. When segmap_release() calls VOP_PUTPAGE(),
5099 5098           * the page will be found to be clean and the write will be dropped.
5100 5099           */
5101 5100          if (rp->r_flags & RMODINPROGRESS) {
5102 5101                  mutex_enter(&rp->r_statelock);
5103 5102                  if ((rp->r_flags & RMODINPROGRESS) &&
5104 5103                      rp->r_modaddr + MAXBSIZE > io_off &&
5105 5104                      rp->r_modaddr < io_off + io_len) {
5106 5105                          page_t *plist;
5107 5106                          /*
5108 5107                           * A write is in progress for this region of the file.
5109 5108                           * If we did not detect RMODINPROGRESS here then this
5110 5109                           * path through nfs_putapage() would eventually go to
5111 5110                           * nfs(3)_bio() and may not write out all of the data
5112 5111                           * in the pages. We end up losing data. So we decide
5113 5112                           * to set the modified bit on each page in the page
5114 5113                           * list and mark the rnode with RDIRTY. This write
5115 5114                           * will be restarted at some later time.
5116 5115                           */
5117 5116                          plist = pp;
5118 5117                          while (plist != NULL) {
5119 5118                                  pp = plist;
5120 5119                                  page_sub(&plist, pp);
5121 5120                                  hat_setmod(pp);
5122 5121                                  page_io_unlock(pp);
5123 5122                                  page_unlock(pp);
5124 5123                          }
5125 5124                          rp->r_flags |= RDIRTY;
5126 5125                          mutex_exit(&rp->r_statelock);
5127 5126                          if (offp)
5128 5127                                  *offp = io_off;
5129 5128                          if (lenp)
5130 5129                                  *lenp = io_len;
5131 5130                          return (0);
5132 5131                  }
5133 5132                  mutex_exit(&rp->r_statelock);
5134 5133          }
5135 5134  
5136 5135          if (flags & B_ASYNC) {
5137 5136                  error = nfs_async_putapage(vp, pp, io_off, io_len, flags, cr,
5138 5137                      nfs3_sync_putapage);
5139 5138          } else
5140 5139                  error = nfs3_sync_putapage(vp, pp, io_off, io_len, flags, cr);
5141 5140  
5142 5141          if (offp)
5143 5142                  *offp = io_off;
5144 5143          if (lenp)
5145 5144                  *lenp = io_len;
5146 5145          return (error);
5147 5146  }
5148 5147  
5149 5148  static int
5150 5149  nfs3_sync_putapage(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len,
5151 5150          int flags, cred_t *cr)
5152 5151  {
5153 5152          int error;
5154 5153          rnode_t *rp;
5155 5154  
5156 5155          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
5157 5156  
5158 5157          flags |= B_WRITE;
5159 5158  
5160 5159          error = nfs3_rdwrlbn(vp, pp, io_off, io_len, flags, cr);
5161 5160  
5162 5161          rp = VTOR(vp);
5163 5162  
5164 5163          if ((error == ENOSPC || error == EDQUOT || error == EFBIG ||
5165 5164              error == EACCES) &&
5166 5165              (flags & (B_INVAL|B_FORCE)) != (B_INVAL|B_FORCE)) {
5167 5166                  if (!(rp->r_flags & ROUTOFSPACE)) {
5168 5167                          mutex_enter(&rp->r_statelock);
5169 5168                          rp->r_flags |= ROUTOFSPACE;
5170 5169                          mutex_exit(&rp->r_statelock);
5171 5170                  }
5172 5171                  flags |= B_ERROR;
5173 5172                  pvn_write_done(pp, flags);
5174 5173                  /*
5175 5174                   * If this was not an async thread, then try again to
5176 5175                   * write out the pages, but this time, also destroy
5177 5176                   * them whether or not the write is successful.  This
5178 5177                   * will prevent memory from filling up with these
5179 5178                   * pages and destroying them is the only alternative
5180 5179                   * if they can't be written out.
5181 5180                   *
5182 5181                   * Don't do this if this is an async thread because
5183 5182                   * when the pages are unlocked in pvn_write_done,
5184 5183                   * some other thread could have come along, locked
5185 5184                   * them, and queued for an async thread.  It would be
5186 5185                   * possible for all of the async threads to be tied
5187 5186                   * up waiting to lock the pages again and they would
5188 5187                   * all already be locked and waiting for an async
5189 5188                   * thread to handle them.  Deadlock.
5190 5189                   */
5191 5190                  if (!(flags & B_ASYNC)) {
5192 5191                          error = nfs3_putpage(vp, io_off, io_len,
5193 5192                              B_INVAL | B_FORCE, cr, NULL);
5194 5193                  }
5195 5194          } else {
5196 5195                  if (error)
5197 5196                          flags |= B_ERROR;
5198 5197                  else if (rp->r_flags & ROUTOFSPACE) {
5199 5198                          mutex_enter(&rp->r_statelock);
5200 5199                          rp->r_flags &= ~ROUTOFSPACE;
5201 5200                          mutex_exit(&rp->r_statelock);
5202 5201                  }
5203 5202                  pvn_write_done(pp, flags);
5204 5203                  if (freemem < desfree)
5205 5204                          (void) nfs3_commit_vp(vp, (u_offset_t)0, 0, cr);
5206 5205          }
5207 5206  
5208 5207          return (error);
5209 5208  }
5210 5209  
5211 5210  /* ARGSUSED */
5212 5211  static int
5213 5212  nfs3_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
5214 5213          size_t len, uchar_t prot, uchar_t maxprot, uint_t flags,
5215 5214          cred_t *cr, caller_context_t *ct)
5216 5215  {
5217 5216          struct segvn_crargs vn_a;
5218 5217          int error;
5219 5218          rnode_t *rp;
5220 5219          struct vattr va;
5221 5220  
5222 5221          if (nfs_zone() != VTOMI(vp)->mi_zone)
5223 5222                  return (EIO);
5224 5223  
5225 5224          if (vp->v_flag & VNOMAP)
5226 5225                  return (ENOSYS);
5227 5226  
5228 5227          if (off < 0 || off + len < 0)
5229 5228                  return (ENXIO);
5230 5229  
5231 5230          if (vp->v_type != VREG)
5232 5231                  return (ENODEV);
5233 5232  
5234 5233          /*
5235 5234           * If there is cached data and if close-to-open consistency
5236 5235           * checking is not turned off and if the file system is not
5237 5236           * mounted readonly, then force an over the wire getattr.
5238 5237           * Otherwise, just invoke nfs3getattr to get a copy of the
5239 5238           * attributes.  The attribute cache will be used unless it
5240 5239           * is timed out and if it is, then an over the wire getattr
5241 5240           * will be issued.
5242 5241           */
5243 5242          va.va_mask = AT_ALL;
5244 5243          if (vn_has_cached_data(vp) &&
5245 5244              !(VTOMI(vp)->mi_flags & MI_NOCTO) && !vn_is_readonly(vp))
5246 5245                  error = nfs3_getattr_otw(vp, &va, cr);
5247 5246          else
5248 5247                  error = nfs3getattr(vp, &va, cr);
5249 5248          if (error)
5250 5249                  return (error);
5251 5250  
5252 5251          /*
5253 5252           * Check to see if the vnode is currently marked as not cachable.
5254 5253           * This means portions of the file are locked (through VOP_FRLOCK).
5255 5254           * In this case the map request must be refused.  We use
5256 5255           * rp->r_lkserlock to avoid a race with concurrent lock requests.
5257 5256           */
5258 5257          rp = VTOR(vp);
5259 5258  
5260 5259          /*
5261 5260           * Atomically increment r_inmap after acquiring r_rwlock. The
5262 5261           * idea here is to acquire r_rwlock to block read/write and
5263 5262           * not to protect r_inmap. r_inmap will inform nfs3_read/write()
5264 5263           * that we are in nfs3_map(). Now, r_rwlock is acquired in order
5265 5264           * and we can prevent the deadlock that would have occurred
5266 5265           * when nfs3_addmap() would have acquired it out of order.
5267 5266           *
5268 5267           * Since we are not protecting r_inmap by any lock, we do not
5269 5268           * hold any lock when we decrement it. We atomically decrement
5270 5269           * r_inmap after we release r_lkserlock.
5271 5270           */
5272 5271  
5273 5272          if (nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, INTR(vp)))
5274 5273                  return (EINTR);
5275 5274          atomic_inc_uint(&rp->r_inmap);
5276 5275          nfs_rw_exit(&rp->r_rwlock);
5277 5276  
5278 5277          if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_READER, INTR(vp))) {
5279 5278                  atomic_dec_uint(&rp->r_inmap);
5280 5279                  return (EINTR);
5281 5280          }
5282 5281  
5283 5282          if (vp->v_flag & VNOCACHE) {
5284 5283                  error = EAGAIN;
5285 5284                  goto done;
5286 5285          }
5287 5286  
5288 5287          /*
5289 5288           * Don't allow concurrent locks and mapping if mandatory locking is
5290 5289           * enabled.
5291 5290           */
5292 5291          if ((flk_has_remote_locks(vp) || lm_has_sleep(vp)) &&
5293 5292              MANDLOCK(vp, va.va_mode)) {
5294 5293                  error = EAGAIN;
5295 5294                  goto done;
5296 5295          }
5297 5296  
5298 5297          as_rangelock(as);
5299 5298          error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
5300 5299          if (error != 0) {
5301 5300                  as_rangeunlock(as);
5302 5301                  goto done;
5303 5302          }
5304 5303  
5305 5304          vn_a.vp = vp;
5306 5305          vn_a.offset = off;
5307 5306          vn_a.type = (flags & MAP_TYPE);
5308 5307          vn_a.prot = (uchar_t)prot;
5309 5308          vn_a.maxprot = (uchar_t)maxprot;
5310 5309          vn_a.flags = (flags & ~MAP_TYPE);
5311 5310          vn_a.cred = cr;
5312 5311          vn_a.amp = NULL;
5313 5312          vn_a.szc = 0;
5314 5313          vn_a.lgrp_mem_policy_flags = 0;
5315 5314  
5316 5315          error = as_map(as, *addrp, len, segvn_create, &vn_a);
5317 5316          as_rangeunlock(as);
5318 5317  
5319 5318  done:
5320 5319          nfs_rw_exit(&rp->r_lkserlock);
5321 5320          atomic_dec_uint(&rp->r_inmap);
5322 5321          return (error);
5323 5322  }
5324 5323  
5325 5324  /* ARGSUSED */
5326 5325  static int
5327 5326  nfs3_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
5328 5327          size_t len, uchar_t prot, uchar_t maxprot, uint_t flags,
5329 5328          cred_t *cr, caller_context_t *ct)
5330 5329  {
5331 5330          rnode_t *rp;
5332 5331  
5333 5332          if (vp->v_flag & VNOMAP)
5334 5333                  return (ENOSYS);
5335 5334          if (nfs_zone() != VTOMI(vp)->mi_zone)
5336 5335                  return (EIO);
5337 5336  
5338 5337          rp = VTOR(vp);
5339 5338          atomic_add_long((ulong_t *)&rp->r_mapcnt, btopr(len));
5340 5339  
5341 5340          return (0);
5342 5341  }
5343 5342  
5344 5343  /* ARGSUSED */
5345 5344  static int
5346 5345  nfs3_frlock(vnode_t *vp, int cmd, struct flock64 *bfp, int flag,
5347 5346          offset_t offset, struct flk_callback *flk_cbp, cred_t *cr,
5348 5347          caller_context_t *ct)
5349 5348  {
5350 5349          netobj lm_fh3;
5351 5350          int rc;
5352 5351          u_offset_t start, end;
5353 5352          rnode_t *rp;
5354 5353          int error = 0, intr = INTR(vp);
5355 5354  
5356 5355          if (nfs_zone() != VTOMI(vp)->mi_zone)
5357 5356                  return (EIO);
5358 5357          /* check for valid cmd parameter */
5359 5358          if (cmd != F_GETLK && cmd != F_SETLK && cmd != F_SETLKW)
5360 5359                  return (EINVAL);
5361 5360  
5362 5361          /* Verify l_type. */
5363 5362          switch (bfp->l_type) {
5364 5363          case F_RDLCK:
5365 5364                  if (cmd != F_GETLK && !(flag & FREAD))
5366 5365                          return (EBADF);
5367 5366                  break;
5368 5367          case F_WRLCK:
5369 5368                  if (cmd != F_GETLK && !(flag & FWRITE))
5370 5369                          return (EBADF);
5371 5370                  break;
5372 5371          case F_UNLCK:
5373 5372                  intr = 0;
5374 5373                  break;
5375 5374  
5376 5375          default:
5377 5376                  return (EINVAL);
5378 5377          }
5379 5378  
5380 5379          /* check the validity of the lock range */
5381 5380          if (rc = flk_convert_lock_data(vp, bfp, &start, &end, offset))
5382 5381                  return (rc);
5383 5382          if (rc = flk_check_lock_data(start, end, MAXEND))
5384 5383                  return (rc);
5385 5384  
5386 5385          /*
5387 5386           * If the filesystem is mounted using local locking, pass the
5388 5387           * request off to the local locking code.
5389 5388           */
5390 5389          if (VTOMI(vp)->mi_flags & MI_LLOCK) {
5391 5390                  if (cmd == F_SETLK || cmd == F_SETLKW) {
5392 5391                          /*
5393 5392                           * For complete safety, we should be holding
5394 5393                           * r_lkserlock.  However, we can't call
5395 5394                           * lm_safelock and then fs_frlock while
5396 5395                           * holding r_lkserlock, so just invoke
5397 5396                           * lm_safelock and expect that this will
5398 5397                           * catch enough of the cases.
5399 5398                           */
5400 5399                          if (!lm_safelock(vp, bfp, cr))
5401 5400                                  return (EAGAIN);
5402 5401                  }
5403 5402                  return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
5404 5403          }
5405 5404  
5406 5405          rp = VTOR(vp);
5407 5406  
5408 5407          /*
5409 5408           * Check whether the given lock request can proceed, given the
5410 5409           * current file mappings.
5411 5410           */
5412 5411          if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_WRITER, intr))
5413 5412                  return (EINTR);
5414 5413          if (cmd == F_SETLK || cmd == F_SETLKW) {
5415 5414                  if (!lm_safelock(vp, bfp, cr)) {
5416 5415                          rc = EAGAIN;
5417 5416                          goto done;
5418 5417                  }
5419 5418          }
5420 5419  
5421 5420          /*
5422 5421           * Flush the cache after waiting for async I/O to finish.  For new
5423 5422           * locks, this is so that the process gets the latest bits from the
5424 5423           * server.  For unlocks, this is so that other clients see the
5425 5424           * latest bits once the file has been unlocked.  If currently dirty
5426 5425           * pages can't be flushed, then don't allow a lock to be set.  But
5427 5426           * allow unlocks to succeed, to avoid having orphan locks on the
5428 5427           * server.
5429 5428           */
5430 5429          if (cmd != F_GETLK) {
5431 5430                  mutex_enter(&rp->r_statelock);
5432 5431                  while (rp->r_count > 0) {
5433 5432                          if (intr) {
5434 5433                                  klwp_t *lwp = ttolwp(curthread);
5435 5434  
5436 5435                                  if (lwp != NULL)
5437 5436                                          lwp->lwp_nostop++;
5438 5437                                  if (cv_wait_sig(&rp->r_cv,
5439 5438                                      &rp->r_statelock) == 0) {
5440 5439                                          if (lwp != NULL)
5441 5440                                                  lwp->lwp_nostop--;
5442 5441                                          rc = EINTR;
5443 5442                                          break;
5444 5443                                  }
5445 5444                                  if (lwp != NULL)
5446 5445                                          lwp->lwp_nostop--;
5447 5446                          } else
5448 5447                                  cv_wait(&rp->r_cv, &rp->r_statelock);
5449 5448                  }
5450 5449                  mutex_exit(&rp->r_statelock);
5451 5450                  if (rc != 0)
5452 5451                          goto done;
5453 5452                  error = nfs3_putpage(vp, (offset_t)0, 0, B_INVAL, cr, ct);
5454 5453                  if (error) {
5455 5454                          if (error == ENOSPC || error == EDQUOT) {
5456 5455                                  mutex_enter(&rp->r_statelock);
5457 5456                                  if (!rp->r_error)
5458 5457                                          rp->r_error = error;
5459 5458                                  mutex_exit(&rp->r_statelock);
5460 5459                          }
5461 5460                          if (bfp->l_type != F_UNLCK) {
5462 5461                                  rc = ENOLCK;
5463 5462                                  goto done;
5464 5463                          }
5465 5464                  }
5466 5465          }
5467 5466  
5468 5467          lm_fh3.n_len = VTOFH3(vp)->fh3_length;
5469 5468          lm_fh3.n_bytes = (char *)&(VTOFH3(vp)->fh3_u.data);
5470 5469  
5471 5470          /*
5472 5471           * Call the lock manager to do the real work of contacting
5473 5472           * the server and obtaining the lock.
5474 5473           */
5475 5474          rc = lm4_frlock(vp, cmd, bfp, flag, offset, cr, &lm_fh3, flk_cbp);
5476 5475  
5477 5476          if (rc == 0)
5478 5477                  nfs_lockcompletion(vp, cmd);
5479 5478  
5480 5479  done:
5481 5480          nfs_rw_exit(&rp->r_lkserlock);
5482 5481          return (rc);
5483 5482  }
5484 5483  
5485 5484  /*
5486 5485   * Free storage space associated with the specified vnode.  The portion
5487 5486   * to be freed is specified by bfp->l_start and bfp->l_len (already
5488 5487   * normalized to a "whence" of 0).
5489 5488   *
5490 5489   * This is an experimental facility whose continued existence is not
5491 5490   * guaranteed.  Currently, we only support the special case
5492 5491   * of l_len == 0, meaning free to end of file.
5493 5492   */
5494 5493  /* ARGSUSED */
5495 5494  static int
5496 5495  nfs3_space(vnode_t *vp, int cmd, struct flock64 *bfp, int flag,
5497 5496          offset_t offset, cred_t *cr, caller_context_t *ct)
5498 5497  {
5499 5498          int error;
5500 5499  
5501 5500          ASSERT(vp->v_type == VREG);
5502 5501          if (cmd != F_FREESP)
5503 5502                  return (EINVAL);
5504 5503          if (nfs_zone() != VTOMI(vp)->mi_zone)
5505 5504                  return (EIO);
5506 5505  
5507 5506          error = convoff(vp, bfp, 0, offset);
5508 5507          if (!error) {
5509 5508                  ASSERT(bfp->l_start >= 0);
5510 5509                  if (bfp->l_len == 0) {
5511 5510                          struct vattr va;
5512 5511  
5513 5512                          /*
5514 5513                           * ftruncate should not change the ctime and
5515 5514                           * mtime if we truncate the file to its
  
    | 
      ↓ open down ↓ | 
    2146 lines elided | 
    
      ↑ open up ↑ | 
  
5516 5515                           * previous size.
5517 5516                           */
5518 5517                          va.va_mask = AT_SIZE;
5519 5518                          error = nfs3getattr(vp, &va, cr);
5520 5519                          if (error || va.va_size == bfp->l_start)
5521 5520                                  return (error);
5522 5521                          va.va_mask = AT_SIZE;
5523 5522                          va.va_size = bfp->l_start;
5524 5523                          error = nfs3setattr(vp, &va, 0, cr);
5525 5524  
5526      -                        if (error == 0 && bfp->l_start == 0)
5527      -                                vnevent_truncate(vp, ct);
     5525 +                        if (error == 0) {
     5526 +                                if (bfp->l_start == 0) {
     5527 +                                        vnevent_truncate(vp, ct);
     5528 +                                } else {
     5529 +                                        vnevent_resize(vp, ct);
     5530 +                                }
     5531 +                        }
5528 5532                  } else
5529 5533                          error = EINVAL;
5530 5534          }
5531 5535  
5532 5536          return (error);
5533 5537  }
5534 5538  
5535 5539  /* ARGSUSED */
5536 5540  static int
5537 5541  nfs3_realvp(vnode_t *vp, vnode_t **vpp, caller_context_t *ct)
5538 5542  {
5539 5543  
5540 5544          return (EINVAL);
5541 5545  }
5542 5546  
5543 5547  /*
5544 5548   * Setup and add an address space callback to do the work of the delmap call.
5545 5549   * The callback will (and must be) deleted in the actual callback function.
5546 5550   *
5547 5551   * This is done in order to take care of the problem that we have with holding
5548 5552   * the address space's a_lock for a long period of time (e.g. if the NFS server
5549 5553   * is down).  Callbacks will be executed in the address space code while the
5550 5554   * a_lock is not held.  Holding the address space's a_lock causes things such
5551 5555   * as ps and fork to hang because they are trying to acquire this lock as well.
5552 5556   */
5553 5557  /* ARGSUSED */
5554 5558  static int
5555 5559  nfs3_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
5556 5560          size_t len, uint_t prot, uint_t maxprot, uint_t flags,
5557 5561          cred_t *cr, caller_context_t *ct)
5558 5562  {
5559 5563          int                     caller_found;
5560 5564          int                     error;
5561 5565          rnode_t                 *rp;
5562 5566          nfs_delmap_args_t       *dmapp;
5563 5567          nfs_delmapcall_t        *delmap_call;
5564 5568  
5565 5569          if (vp->v_flag & VNOMAP)
5566 5570                  return (ENOSYS);
5567 5571          /*
5568 5572           * A process may not change zones if it has NFS pages mmap'ed
5569 5573           * in, so we can't legitimately get here from the wrong zone.
5570 5574           */
5571 5575          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
5572 5576  
5573 5577          rp = VTOR(vp);
5574 5578  
5575 5579          /*
5576 5580           * The way that the address space of this process deletes its mapping
5577 5581           * of this file is via the following call chains:
5578 5582           * - as_free()->SEGOP_UNMAP()/segvn_unmap()->VOP_DELMAP()/nfs3_delmap()
5579 5583           * - as_unmap()->SEGOP_UNMAP()/segvn_unmap()->VOP_DELMAP()/nfs3_delmap()
5580 5584           *
5581 5585           * With the use of address space callbacks we are allowed to drop the
5582 5586           * address space lock, a_lock, while executing the NFS operations that
5583 5587           * need to go over the wire.  Returning EAGAIN to the caller of this
5584 5588           * function is what drives the execution of the callback that we add
5585 5589           * below.  The callback will be executed by the address space code
5586 5590           * after dropping the a_lock.  When the callback is finished, since
5587 5591           * we dropped the a_lock, it must be re-acquired and segvn_unmap()
5588 5592           * is called again on the same segment to finish the rest of the work
5589 5593           * that needs to happen during unmapping.
5590 5594           *
5591 5595           * This action of calling back into the segment driver causes
5592 5596           * nfs3_delmap() to get called again, but since the callback was
5593 5597           * already executed at this point, it already did the work and there
5594 5598           * is nothing left for us to do.
5595 5599           *
5596 5600           * To Summarize:
5597 5601           * - The first time nfs3_delmap is called by the current thread is when
5598 5602           * we add the caller associated with this delmap to the delmap caller
5599 5603           * list, add the callback, and return EAGAIN.
5600 5604           * - The second time in this call chain when nfs3_delmap is called we
5601 5605           * will find this caller in the delmap caller list and realize there
5602 5606           * is no more work to do thus removing this caller from the list and
5603 5607           * returning the error that was set in the callback execution.
5604 5608           */
5605 5609          caller_found = nfs_find_and_delete_delmapcall(rp, &error);
5606 5610          if (caller_found) {
5607 5611                  /*
5608 5612                   * 'error' is from the actual delmap operations.  To avoid
5609 5613                   * hangs, we need to handle the return of EAGAIN differently
5610 5614                   * since this is what drives the callback execution.
5611 5615                   * In this case, we don't want to return EAGAIN and do the
5612 5616                   * callback execution because there are none to execute.
5613 5617                   */
5614 5618                  if (error == EAGAIN)
5615 5619                          return (0);
5616 5620                  else
5617 5621                          return (error);
5618 5622          }
5619 5623  
5620 5624          /* current caller was not in the list */
5621 5625          delmap_call = nfs_init_delmapcall();
5622 5626  
5623 5627          mutex_enter(&rp->r_statelock);
5624 5628          list_insert_tail(&rp->r_indelmap, delmap_call);
5625 5629          mutex_exit(&rp->r_statelock);
5626 5630  
5627 5631          dmapp = kmem_alloc(sizeof (nfs_delmap_args_t), KM_SLEEP);
5628 5632  
5629 5633          dmapp->vp = vp;
5630 5634          dmapp->off = off;
5631 5635          dmapp->addr = addr;
5632 5636          dmapp->len = len;
5633 5637          dmapp->prot = prot;
5634 5638          dmapp->maxprot = maxprot;
5635 5639          dmapp->flags = flags;
5636 5640          dmapp->cr = cr;
5637 5641          dmapp->caller = delmap_call;
5638 5642  
5639 5643          error = as_add_callback(as, nfs3_delmap_callback, dmapp,
5640 5644              AS_UNMAP_EVENT, addr, len, KM_SLEEP);
5641 5645  
5642 5646          return (error ? error : EAGAIN);
5643 5647  }
5644 5648  
5645 5649  /*
5646 5650   * Remove some pages from an mmap'd vnode.  Just update the
5647 5651   * count of pages.  If doing close-to-open, then flush and
5648 5652   * commit all of the pages associated with this file.
5649 5653   * Otherwise, start an asynchronous page flush to write out
5650 5654   * any dirty pages.  This will also associate a credential
5651 5655   * with the rnode which can be used to write the pages.
5652 5656   */
5653 5657  /* ARGSUSED */
5654 5658  static void
5655 5659  nfs3_delmap_callback(struct as *as, void *arg, uint_t event)
5656 5660  {
5657 5661          int                     error;
5658 5662          rnode_t                 *rp;
5659 5663          mntinfo_t               *mi;
5660 5664          nfs_delmap_args_t       *dmapp = (nfs_delmap_args_t *)arg;
5661 5665  
5662 5666          rp = VTOR(dmapp->vp);
5663 5667          mi = VTOMI(dmapp->vp);
5664 5668  
5665 5669          atomic_add_long((ulong_t *)&rp->r_mapcnt, -btopr(dmapp->len));
5666 5670          ASSERT(rp->r_mapcnt >= 0);
5667 5671  
5668 5672          /*
5669 5673           * Initiate a page flush and potential commit if there are
5670 5674           * pages, the file system was not mounted readonly, the segment
5671 5675           * was mapped shared, and the pages themselves were writeable.
5672 5676           */
5673 5677          if (vn_has_cached_data(dmapp->vp) && !vn_is_readonly(dmapp->vp) &&
5674 5678              dmapp->flags == MAP_SHARED && (dmapp->maxprot & PROT_WRITE)) {
5675 5679                  mutex_enter(&rp->r_statelock);
5676 5680                  rp->r_flags |= RDIRTY;
5677 5681                  mutex_exit(&rp->r_statelock);
5678 5682                  /*
5679 5683                   * If this is a cross-zone access a sync putpage won't work, so
5680 5684                   * the best we can do is try an async putpage.  That seems
5681 5685                   * better than something more draconian such as discarding the
5682 5686                   * dirty pages.
5683 5687                   */
5684 5688                  if ((mi->mi_flags & MI_NOCTO) ||
5685 5689                      nfs_zone() != mi->mi_zone)
5686 5690                          error = nfs3_putpage(dmapp->vp, dmapp->off, dmapp->len,
5687 5691                              B_ASYNC, dmapp->cr, NULL);
5688 5692                  else
5689 5693                          error = nfs3_putpage_commit(dmapp->vp, dmapp->off,
5690 5694                              dmapp->len, dmapp->cr);
5691 5695                  if (!error) {
5692 5696                          mutex_enter(&rp->r_statelock);
5693 5697                          error = rp->r_error;
5694 5698                          rp->r_error = 0;
5695 5699                          mutex_exit(&rp->r_statelock);
5696 5700                  }
5697 5701          } else
5698 5702                  error = 0;
5699 5703  
5700 5704          if ((rp->r_flags & RDIRECTIO) || (mi->mi_flags & MI_DIRECTIO))
5701 5705                  (void) nfs3_putpage(dmapp->vp, dmapp->off, dmapp->len,
5702 5706                      B_INVAL, dmapp->cr, NULL);
5703 5707  
5704 5708          dmapp->caller->error = error;
5705 5709          (void) as_delete_callback(as, arg);
5706 5710          kmem_free(dmapp, sizeof (nfs_delmap_args_t));
5707 5711  }
5708 5712  
5709 5713  static int nfs3_pathconf_disable_cache = 0;
5710 5714  
5711 5715  #ifdef DEBUG
5712 5716  static int nfs3_pathconf_cache_hits = 0;
5713 5717  static int nfs3_pathconf_cache_misses = 0;
5714 5718  #endif
5715 5719  
5716 5720  /* ARGSUSED */
5717 5721  static int
5718 5722  nfs3_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
5719 5723          caller_context_t *ct)
5720 5724  {
5721 5725          int error;
5722 5726          PATHCONF3args args;
5723 5727          PATHCONF3res res;
5724 5728          int douprintf;
5725 5729          failinfo_t fi;
5726 5730          rnode_t *rp;
5727 5731          hrtime_t t;
5728 5732  
5729 5733          if (nfs_zone() != VTOMI(vp)->mi_zone)
5730 5734                  return (EIO);
5731 5735          /*
5732 5736           * Large file spec - need to base answer on info stored
5733 5737           * on original FSINFO response.
5734 5738           */
5735 5739          if (cmd == _PC_FILESIZEBITS) {
5736 5740                  unsigned long long ll;
5737 5741                  long l = 1;
5738 5742  
5739 5743                  ll = VTOMI(vp)->mi_maxfilesize;
5740 5744  
5741 5745                  if (ll == 0) {
5742 5746                          *valp = 0;
5743 5747                          return (0);
5744 5748                  }
5745 5749  
5746 5750                  if (ll & 0xffffffff00000000) {
5747 5751                          l += 32; ll >>= 32;
5748 5752                  }
5749 5753                  if (ll & 0xffff0000) {
5750 5754                          l += 16; ll >>= 16;
5751 5755                  }
5752 5756                  if (ll & 0xff00) {
5753 5757                          l += 8; ll >>= 8;
5754 5758                  }
5755 5759                  if (ll & 0xf0) {
5756 5760                          l += 4; ll >>= 4;
5757 5761                  }
5758 5762                  if (ll & 0xc) {
5759 5763                          l += 2; ll >>= 2;
5760 5764                  }
5761 5765                  if (ll & 0x2)
5762 5766                          l += 2;
5763 5767                  else if (ll & 0x1)
5764 5768                          l += 1;
5765 5769                  *valp = l;
5766 5770                  return (0);
5767 5771          }
5768 5772  
5769 5773          if (cmd == _PC_ACL_ENABLED) {
5770 5774                  *valp = _ACL_ACLENT_ENABLED;
5771 5775                  return (0);
5772 5776          }
5773 5777  
5774 5778          if (cmd == _PC_XATTR_EXISTS) {
5775 5779                  error = 0;
5776 5780                  *valp = 0;
5777 5781                  if (vp->v_vfsp->vfs_flag & VFS_XATTR) {
5778 5782                          vnode_t *avp;
5779 5783                          rnode_t *rp;
5780 5784                          int error = 0;
5781 5785                          mntinfo_t *mi = VTOMI(vp);
5782 5786  
5783 5787                          if (!(mi->mi_flags & MI_EXTATTR))
5784 5788                                  return (0);
5785 5789  
5786 5790                          rp = VTOR(vp);
5787 5791                          if (nfs_rw_enter_sig(&rp->r_rwlock, RW_READER,
5788 5792                              INTR(vp)))
5789 5793                                  return (EINTR);
5790 5794  
5791 5795                          error = nfs3lookup_dnlc(vp, XATTR_DIR_NAME, &avp, cr);
5792 5796                          if (error || avp == NULL)
5793 5797                                  error = acl_getxattrdir3(vp, &avp, 0, cr, 0);
5794 5798  
5795 5799                          nfs_rw_exit(&rp->r_rwlock);
5796 5800  
5797 5801                          if (error == 0 && avp != NULL) {
5798 5802                                  error = do_xattr_exists_check(avp, valp, cr);
5799 5803                                  VN_RELE(avp);
5800 5804                          } else if (error == ENOENT) {
5801 5805                                  error = 0;
5802 5806                                  *valp = 0;
5803 5807                          }
5804 5808                  }
5805 5809                  return (error);
5806 5810          }
5807 5811  
5808 5812          rp = VTOR(vp);
5809 5813          if (rp->r_pathconf != NULL) {
5810 5814                  mutex_enter(&rp->r_statelock);
5811 5815                  if (rp->r_pathconf != NULL && nfs3_pathconf_disable_cache) {
5812 5816                          kmem_free(rp->r_pathconf, sizeof (*rp->r_pathconf));
5813 5817                          rp->r_pathconf = NULL;
5814 5818                  }
5815 5819                  if (rp->r_pathconf != NULL) {
5816 5820                          error = 0;
5817 5821                          switch (cmd) {
5818 5822                          case _PC_LINK_MAX:
5819 5823                                  *valp = rp->r_pathconf->link_max;
5820 5824                                  break;
5821 5825                          case _PC_NAME_MAX:
5822 5826                                  *valp = rp->r_pathconf->name_max;
5823 5827                                  break;
5824 5828                          case _PC_PATH_MAX:
5825 5829                          case _PC_SYMLINK_MAX:
5826 5830                                  *valp = MAXPATHLEN;
5827 5831                                  break;
5828 5832                          case _PC_CHOWN_RESTRICTED:
5829 5833                                  *valp = rp->r_pathconf->chown_restricted;
5830 5834                                  break;
5831 5835                          case _PC_NO_TRUNC:
5832 5836                                  *valp = rp->r_pathconf->no_trunc;
5833 5837                                  break;
5834 5838                          default:
5835 5839                                  error = EINVAL;
5836 5840                                  break;
5837 5841                          }
5838 5842                          mutex_exit(&rp->r_statelock);
5839 5843  #ifdef DEBUG
5840 5844                          nfs3_pathconf_cache_hits++;
5841 5845  #endif
5842 5846                          return (error);
5843 5847                  }
5844 5848                  mutex_exit(&rp->r_statelock);
5845 5849          }
5846 5850  #ifdef DEBUG
5847 5851          nfs3_pathconf_cache_misses++;
5848 5852  #endif
5849 5853  
5850 5854          args.object = *VTOFH3(vp);
5851 5855          fi.vp = vp;
5852 5856          fi.fhp = (caddr_t)&args.object;
5853 5857          fi.copyproc = nfs3copyfh;
5854 5858          fi.lookupproc = nfs3lookup;
5855 5859          fi.xattrdirproc = acl_getxattrdir3;
5856 5860  
5857 5861          douprintf = 1;
5858 5862  
5859 5863          t = gethrtime();
5860 5864  
5861 5865          error = rfs3call(VTOMI(vp), NFSPROC3_PATHCONF,
5862 5866              xdr_nfs_fh3, (caddr_t)&args,
5863 5867              xdr_PATHCONF3res, (caddr_t)&res, cr,
5864 5868              &douprintf, &res.status, 0, &fi);
5865 5869  
5866 5870          if (error)
5867 5871                  return (error);
5868 5872  
5869 5873          error = geterrno3(res.status);
5870 5874  
5871 5875          if (!error) {
5872 5876                  nfs3_cache_post_op_attr(vp, &res.resok.obj_attributes, t, cr);
5873 5877                  if (!nfs3_pathconf_disable_cache) {
5874 5878                          mutex_enter(&rp->r_statelock);
5875 5879                          if (rp->r_pathconf == NULL) {
5876 5880                                  rp->r_pathconf = kmem_alloc(
5877 5881                                      sizeof (*rp->r_pathconf), KM_NOSLEEP);
5878 5882                                  if (rp->r_pathconf != NULL)
5879 5883                                          *rp->r_pathconf = res.resok.info;
5880 5884                          }
5881 5885                          mutex_exit(&rp->r_statelock);
5882 5886                  }
5883 5887                  switch (cmd) {
5884 5888                  case _PC_LINK_MAX:
5885 5889                          *valp = res.resok.info.link_max;
5886 5890                          break;
5887 5891                  case _PC_NAME_MAX:
5888 5892                          *valp = res.resok.info.name_max;
5889 5893                          break;
5890 5894                  case _PC_PATH_MAX:
5891 5895                  case _PC_SYMLINK_MAX:
5892 5896                          *valp = MAXPATHLEN;
5893 5897                          break;
5894 5898                  case _PC_CHOWN_RESTRICTED:
5895 5899                          *valp = res.resok.info.chown_restricted;
5896 5900                          break;
5897 5901                  case _PC_NO_TRUNC:
5898 5902                          *valp = res.resok.info.no_trunc;
5899 5903                          break;
5900 5904                  default:
5901 5905                          return (EINVAL);
5902 5906                  }
5903 5907          } else {
5904 5908                  nfs3_cache_post_op_attr(vp, &res.resfail.obj_attributes, t, cr);
5905 5909                  PURGE_STALE_FH(error, vp, cr);
5906 5910          }
5907 5911  
5908 5912          return (error);
5909 5913  }
5910 5914  
5911 5915  /*
5912 5916   * Called by async thread to do synchronous pageio. Do the i/o, wait
5913 5917   * for it to complete, and cleanup the page list when done.
5914 5918   */
5915 5919  static int
5916 5920  nfs3_sync_pageio(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len,
5917 5921          int flags, cred_t *cr)
5918 5922  {
5919 5923          int error;
5920 5924  
5921 5925          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
5922 5926          error = nfs3_rdwrlbn(vp, pp, io_off, io_len, flags, cr);
5923 5927          if (flags & B_READ)
5924 5928                  pvn_read_done(pp, (error ? B_ERROR : 0) | flags);
5925 5929          else
5926 5930                  pvn_write_done(pp, (error ? B_ERROR : 0) | flags);
5927 5931          return (error);
5928 5932  }
5929 5933  
5930 5934  /* ARGSUSED */
5931 5935  static int
5932 5936  nfs3_pageio(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len,
5933 5937          int flags, cred_t *cr, caller_context_t *ct)
5934 5938  {
5935 5939          int error;
5936 5940          rnode_t *rp;
5937 5941  
5938 5942          if (pp == NULL)
5939 5943                  return (EINVAL);
5940 5944          if (!(flags & B_ASYNC) && nfs_zone() != VTOMI(vp)->mi_zone)
5941 5945                  return (EIO);
5942 5946  
5943 5947          rp = VTOR(vp);
5944 5948          mutex_enter(&rp->r_statelock);
5945 5949          rp->r_count++;
5946 5950          mutex_exit(&rp->r_statelock);
5947 5951  
5948 5952          if (flags & B_ASYNC) {
5949 5953                  error = nfs_async_pageio(vp, pp, io_off, io_len, flags, cr,
5950 5954                      nfs3_sync_pageio);
5951 5955          } else
5952 5956                  error = nfs3_rdwrlbn(vp, pp, io_off, io_len, flags, cr);
5953 5957          mutex_enter(&rp->r_statelock);
5954 5958          rp->r_count--;
5955 5959          cv_broadcast(&rp->r_cv);
5956 5960          mutex_exit(&rp->r_statelock);
5957 5961          return (error);
5958 5962  }
5959 5963  
5960 5964  /* ARGSUSED */
5961 5965  static void
5962 5966  nfs3_dispose(vnode_t *vp, page_t *pp, int fl, int dn, cred_t *cr,
5963 5967          caller_context_t *ct)
5964 5968  {
5965 5969          int error;
5966 5970          rnode_t *rp;
5967 5971          page_t *plist;
5968 5972          page_t *pptr;
5969 5973          offset3 offset;
5970 5974          count3 len;
5971 5975          k_sigset_t smask;
5972 5976  
5973 5977          /*
5974 5978           * We should get called with fl equal to either B_FREE or
5975 5979           * B_INVAL.  Any other value is illegal.
5976 5980           *
5977 5981           * The page that we are either supposed to free or destroy
5978 5982           * should be exclusive locked and its io lock should not
5979 5983           * be held.
5980 5984           */
5981 5985          ASSERT(fl == B_FREE || fl == B_INVAL);
5982 5986          ASSERT((PAGE_EXCL(pp) && !page_iolock_assert(pp)) || panicstr);
5983 5987          rp = VTOR(vp);
5984 5988  
5985 5989          /*
5986 5990           * If the page doesn't need to be committed or we shouldn't
5987 5991           * even bother attempting to commit it, then just make sure
5988 5992           * that the p_fsdata byte is clear and then either free or
5989 5993           * destroy the page as appropriate.
5990 5994           */
5991 5995          if (pp->p_fsdata == C_NOCOMMIT || (rp->r_flags & RSTALE)) {
5992 5996                  pp->p_fsdata = C_NOCOMMIT;
5993 5997                  if (fl == B_FREE)
5994 5998                          page_free(pp, dn);
5995 5999                  else
5996 6000                          page_destroy(pp, dn);
5997 6001                  return;
5998 6002          }
5999 6003  
6000 6004          /*
6001 6005           * If there is a page invalidation operation going on, then
6002 6006           * if this is one of the pages being destroyed, then just
6003 6007           * clear the p_fsdata byte and then either free or destroy
6004 6008           * the page as appropriate.
6005 6009           */
6006 6010          mutex_enter(&rp->r_statelock);
6007 6011          if ((rp->r_flags & RTRUNCATE) && pp->p_offset >= rp->r_truncaddr) {
6008 6012                  mutex_exit(&rp->r_statelock);
6009 6013                  pp->p_fsdata = C_NOCOMMIT;
6010 6014                  if (fl == B_FREE)
6011 6015                          page_free(pp, dn);
6012 6016                  else
6013 6017                          page_destroy(pp, dn);
6014 6018                  return;
6015 6019          }
6016 6020  
6017 6021          /*
6018 6022           * If we are freeing this page and someone else is already
6019 6023           * waiting to do a commit, then just unlock the page and
6020 6024           * return.  That other thread will take care of commiting
6021 6025           * this page.  The page can be freed sometime after the
6022 6026           * commit has finished.  Otherwise, if the page is marked
6023 6027           * as delay commit, then we may be getting called from
6024 6028           * pvn_write_done, one page at a time.   This could result
6025 6029           * in one commit per page, so we end up doing lots of small
6026 6030           * commits instead of fewer larger commits.  This is bad,
6027 6031           * we want do as few commits as possible.
6028 6032           */
6029 6033          if (fl == B_FREE) {
6030 6034                  if (rp->r_flags & RCOMMITWAIT) {
6031 6035                          page_unlock(pp);
6032 6036                          mutex_exit(&rp->r_statelock);
6033 6037                          return;
6034 6038                  }
6035 6039                  if (pp->p_fsdata == C_DELAYCOMMIT) {
6036 6040                          pp->p_fsdata = C_COMMIT;
6037 6041                          page_unlock(pp);
6038 6042                          mutex_exit(&rp->r_statelock);
6039 6043                          return;
6040 6044                  }
6041 6045          }
6042 6046  
6043 6047          /*
6044 6048           * Check to see if there is a signal which would prevent an
6045 6049           * attempt to commit the pages from being successful.  If so,
6046 6050           * then don't bother with all of the work to gather pages and
6047 6051           * generate the unsuccessful RPC.  Just return from here and
6048 6052           * let the page be committed at some later time.
6049 6053           */
6050 6054          sigintr(&smask, VTOMI(vp)->mi_flags & MI_INT);
6051 6055          if (ttolwp(curthread) != NULL && ISSIG(curthread, JUSTLOOKING)) {
6052 6056                  sigunintr(&smask);
6053 6057                  page_unlock(pp);
6054 6058                  mutex_exit(&rp->r_statelock);
6055 6059                  return;
6056 6060          }
6057 6061          sigunintr(&smask);
6058 6062  
6059 6063          /*
6060 6064           * We are starting to need to commit pages, so let's try
6061 6065           * to commit as many as possible at once to reduce the
6062 6066           * overhead.
6063 6067           *
6064 6068           * Set the `commit inprogress' state bit.  We must
6065 6069           * first wait until any current one finishes.  Then
6066 6070           * we initialize the c_pages list with this page.
6067 6071           */
6068 6072          while (rp->r_flags & RCOMMIT) {
6069 6073                  rp->r_flags |= RCOMMITWAIT;
6070 6074                  cv_wait(&rp->r_commit.c_cv, &rp->r_statelock);
6071 6075                  rp->r_flags &= ~RCOMMITWAIT;
6072 6076          }
6073 6077          rp->r_flags |= RCOMMIT;
6074 6078          mutex_exit(&rp->r_statelock);
6075 6079          ASSERT(rp->r_commit.c_pages == NULL);
6076 6080          rp->r_commit.c_pages = pp;
6077 6081          rp->r_commit.c_commbase = (offset3)pp->p_offset;
6078 6082          rp->r_commit.c_commlen = PAGESIZE;
6079 6083  
6080 6084          /*
6081 6085           * Gather together all other pages which can be committed.
6082 6086           * They will all be chained off r_commit.c_pages.
6083 6087           */
6084 6088          nfs3_get_commit(vp);
6085 6089  
6086 6090          /*
6087 6091           * Clear the `commit inprogress' status and disconnect
6088 6092           * the list of pages to be committed from the rnode.
6089 6093           * At this same time, we also save the starting offset
6090 6094           * and length of data to be committed on the server.
6091 6095           */
6092 6096          plist = rp->r_commit.c_pages;
6093 6097          rp->r_commit.c_pages = NULL;
6094 6098          offset = rp->r_commit.c_commbase;
6095 6099          len = rp->r_commit.c_commlen;
6096 6100          mutex_enter(&rp->r_statelock);
6097 6101          rp->r_flags &= ~RCOMMIT;
6098 6102          cv_broadcast(&rp->r_commit.c_cv);
6099 6103          mutex_exit(&rp->r_statelock);
6100 6104  
6101 6105          if (curproc == proc_pageout || curproc == proc_fsflush ||
6102 6106              nfs_zone() != VTOMI(vp)->mi_zone) {
6103 6107                  nfs_async_commit(vp, plist, offset, len, cr, nfs3_async_commit);
6104 6108                  return;
6105 6109          }
6106 6110  
6107 6111          /*
6108 6112           * Actually generate the COMMIT3 over the wire operation.
6109 6113           */
6110 6114          error = nfs3_commit(vp, offset, len, cr);
6111 6115  
6112 6116          /*
6113 6117           * If we got an error during the commit, just unlock all
6114 6118           * of the pages.  The pages will get retransmitted to the
6115 6119           * server during a putpage operation.
6116 6120           */
6117 6121          if (error) {
6118 6122                  while (plist != NULL) {
6119 6123                          pptr = plist;
6120 6124                          page_sub(&plist, pptr);
6121 6125                          page_unlock(pptr);
6122 6126                  }
6123 6127                  return;
6124 6128          }
6125 6129  
6126 6130          /*
6127 6131           * We've tried as hard as we can to commit the data to stable
6128 6132           * storage on the server.  We release the rest of the pages
6129 6133           * and clear the commit required state.  They will be put
6130 6134           * onto the tail of the cachelist if they are nolonger
6131 6135           * mapped.
6132 6136           */
6133 6137          while (plist != pp) {
6134 6138                  pptr = plist;
6135 6139                  page_sub(&plist, pptr);
6136 6140                  pptr->p_fsdata = C_NOCOMMIT;
6137 6141                  (void) page_release(pptr, 1);
6138 6142          }
6139 6143  
6140 6144          /*
6141 6145           * It is possible that nfs3_commit didn't return error but
6142 6146           * some other thread has modified the page we are going
6143 6147           * to free/destroy.
6144 6148           *    In this case we need to rewrite the page. Do an explicit check
6145 6149           * before attempting to free/destroy the page. If modified, needs to
6146 6150           * be rewritten so unlock the page and return.
6147 6151           */
6148 6152          if (hat_ismod(pp)) {
6149 6153                  pp->p_fsdata = C_NOCOMMIT;
6150 6154                  page_unlock(pp);
6151 6155                  return;
6152 6156          }
6153 6157  
6154 6158          /*
6155 6159           * Now, as appropriate, either free or destroy the page
6156 6160           * that we were called with.
6157 6161           */
6158 6162          pp->p_fsdata = C_NOCOMMIT;
6159 6163          if (fl == B_FREE)
6160 6164                  page_free(pp, dn);
6161 6165          else
6162 6166                  page_destroy(pp, dn);
6163 6167  }
6164 6168  
6165 6169  static int
6166 6170  nfs3_commit(vnode_t *vp, offset3 offset, count3 count, cred_t *cr)
6167 6171  {
6168 6172          int error;
6169 6173          rnode_t *rp;
6170 6174          COMMIT3args args;
6171 6175          COMMIT3res res;
6172 6176          int douprintf;
6173 6177          cred_t *cred;
6174 6178  
6175 6179          rp = VTOR(vp);
6176 6180          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
6177 6181  
6178 6182          mutex_enter(&rp->r_statelock);
6179 6183          if (rp->r_cred != NULL) {
6180 6184                  cred = rp->r_cred;
6181 6185                  crhold(cred);
6182 6186          } else {
6183 6187                  rp->r_cred = cr;
6184 6188                  crhold(cr);
6185 6189                  cred = cr;
6186 6190                  crhold(cred);
6187 6191          }
6188 6192          mutex_exit(&rp->r_statelock);
6189 6193  
6190 6194          args.file = *VTOFH3(vp);
6191 6195          args.offset = offset;
6192 6196          args.count = count;
6193 6197  
6194 6198  doitagain:
6195 6199          douprintf = 1;
6196 6200          error = rfs3call(VTOMI(vp), NFSPROC3_COMMIT,
6197 6201              xdr_COMMIT3args, (caddr_t)&args,
6198 6202              xdr_COMMIT3res, (caddr_t)&res, cred,
6199 6203              &douprintf, &res.status, 0, NULL);
6200 6204  
6201 6205          crfree(cred);
6202 6206  
6203 6207          if (error)
6204 6208                  return (error);
6205 6209  
6206 6210          error = geterrno3(res.status);
6207 6211          if (!error) {
6208 6212                  ASSERT(rp->r_flags & RHAVEVERF);
6209 6213                  mutex_enter(&rp->r_statelock);
6210 6214                  if (rp->r_verf == res.resok.verf) {
6211 6215                          mutex_exit(&rp->r_statelock);
6212 6216                          return (0);
6213 6217                  }
6214 6218                  nfs3_set_mod(vp);
6215 6219                  rp->r_verf = res.resok.verf;
6216 6220                  mutex_exit(&rp->r_statelock);
6217 6221                  error = NFS_VERF_MISMATCH;
6218 6222          } else {
6219 6223                  if (error == EACCES) {
6220 6224                          mutex_enter(&rp->r_statelock);
6221 6225                          if (cred != cr) {
6222 6226                                  if (rp->r_cred != NULL)
6223 6227                                          crfree(rp->r_cred);
6224 6228                                  rp->r_cred = cr;
6225 6229                                  crhold(cr);
6226 6230                                  cred = cr;
6227 6231                                  crhold(cred);
6228 6232                                  mutex_exit(&rp->r_statelock);
6229 6233                                  goto doitagain;
6230 6234                          }
6231 6235                          mutex_exit(&rp->r_statelock);
6232 6236                  }
6233 6237                  /*
6234 6238                   * Can't do a PURGE_STALE_FH here because this
6235 6239                   * can cause a deadlock.  nfs3_commit can
6236 6240                   * be called from nfs3_dispose which can be called
6237 6241                   * indirectly via pvn_vplist_dirty.  PURGE_STALE_FH
6238 6242                   * can call back to pvn_vplist_dirty.
6239 6243                   */
6240 6244                  if (error == ESTALE) {
6241 6245                          mutex_enter(&rp->r_statelock);
6242 6246                          rp->r_flags |= RSTALE;
6243 6247                          if (!rp->r_error)
6244 6248                                  rp->r_error = error;
6245 6249                          mutex_exit(&rp->r_statelock);
6246 6250                          PURGE_ATTRCACHE(vp);
6247 6251                  } else {
6248 6252                          mutex_enter(&rp->r_statelock);
6249 6253                          if (!rp->r_error)
6250 6254                                  rp->r_error = error;
6251 6255                          mutex_exit(&rp->r_statelock);
6252 6256                  }
6253 6257          }
6254 6258  
6255 6259          return (error);
6256 6260  }
6257 6261  
6258 6262  static void
6259 6263  nfs3_set_mod(vnode_t *vp)
6260 6264  {
6261 6265          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
6262 6266  
6263 6267          pvn_vplist_setdirty(vp, nfs_setmod_check);
6264 6268  }
6265 6269  
6266 6270  /*
6267 6271   * This routine is used to gather together a page list of the pages
6268 6272   * which are to be committed on the server.  This routine must not
6269 6273   * be called if the calling thread holds any locked pages.
6270 6274   *
6271 6275   * The calling thread must have set RCOMMIT.  This bit is used to
6272 6276   * serialize access to the commit structure in the rnode.  As long
6273 6277   * as the thread has set RCOMMIT, then it can manipulate the commit
6274 6278   * structure without requiring any other locks.
6275 6279   */
6276 6280  static void
6277 6281  nfs3_get_commit(vnode_t *vp)
6278 6282  {
6279 6283          rnode_t *rp;
6280 6284          page_t *pp;
6281 6285          kmutex_t *vphm;
6282 6286  
6283 6287          rp = VTOR(vp);
6284 6288  
6285 6289          ASSERT(rp->r_flags & RCOMMIT);
6286 6290  
6287 6291          vphm = page_vnode_mutex(vp);
6288 6292          mutex_enter(vphm);
6289 6293  
6290 6294          /*
6291 6295           * If there are no pages associated with this vnode, then
6292 6296           * just return.
6293 6297           */
6294 6298          if ((pp = vp->v_pages) == NULL) {
6295 6299                  mutex_exit(vphm);
6296 6300                  return;
6297 6301          }
6298 6302  
6299 6303          /*
6300 6304           * Step through all of the pages associated with this vnode
6301 6305           * looking for pages which need to be committed.
6302 6306           */
6303 6307          do {
6304 6308                  /* Skip marker pages. */
6305 6309                  if (pp->p_hash == PVN_VPLIST_HASH_TAG)
6306 6310                          continue;
6307 6311  
6308 6312                  /*
6309 6313                   * If this page does not need to be committed or is
6310 6314                   * modified, then just skip it.
6311 6315                   */
6312 6316                  if (pp->p_fsdata == C_NOCOMMIT || hat_ismod(pp))
6313 6317                          continue;
6314 6318  
6315 6319                  /*
6316 6320                   * Attempt to lock the page.  If we can't, then
6317 6321                   * someone else is messing with it and we will
6318 6322                   * just skip it.
6319 6323                   */
6320 6324                  if (!page_trylock(pp, SE_EXCL))
6321 6325                          continue;
6322 6326  
6323 6327                  /*
6324 6328                   * If this page does not need to be committed or is
6325 6329                   * modified, then just skip it.  Recheck now that
6326 6330                   * the page is locked.
6327 6331                   */
6328 6332                  if (pp->p_fsdata == C_NOCOMMIT || hat_ismod(pp)) {
6329 6333                          page_unlock(pp);
6330 6334                          continue;
6331 6335                  }
6332 6336  
6333 6337                  if (PP_ISFREE(pp)) {
6334 6338                          cmn_err(CE_PANIC, "nfs3_get_commit: %p is free",
6335 6339                              (void *)pp);
6336 6340                  }
6337 6341  
6338 6342                  /*
6339 6343                   * The page needs to be committed and we locked it.
6340 6344                   * Update the base and length parameters and add it
6341 6345                   * to r_pages.
6342 6346                   */
6343 6347                  if (rp->r_commit.c_pages == NULL) {
6344 6348                          rp->r_commit.c_commbase = (offset3)pp->p_offset;
6345 6349                          rp->r_commit.c_commlen = PAGESIZE;
6346 6350                  } else if (pp->p_offset < rp->r_commit.c_commbase) {
6347 6351                          rp->r_commit.c_commlen = rp->r_commit.c_commbase -
6348 6352                              (offset3)pp->p_offset + rp->r_commit.c_commlen;
6349 6353                          rp->r_commit.c_commbase = (offset3)pp->p_offset;
6350 6354                  } else if ((rp->r_commit.c_commbase + rp->r_commit.c_commlen)
6351 6355                      <= pp->p_offset) {
6352 6356                          rp->r_commit.c_commlen = (offset3)pp->p_offset -
6353 6357                              rp->r_commit.c_commbase + PAGESIZE;
6354 6358                  }
6355 6359                  page_add(&rp->r_commit.c_pages, pp);
6356 6360          } while ((pp = pp->p_vpnext) != vp->v_pages);
6357 6361  
6358 6362          mutex_exit(vphm);
6359 6363  }
6360 6364  
6361 6365  /*
6362 6366   * This routine is used to gather together a page list of the pages
6363 6367   * which are to be committed on the server.  This routine must not
6364 6368   * be called if the calling thread holds any locked pages.
6365 6369   *
6366 6370   * The calling thread must have set RCOMMIT.  This bit is used to
6367 6371   * serialize access to the commit structure in the rnode.  As long
6368 6372   * as the thread has set RCOMMIT, then it can manipulate the commit
6369 6373   * structure without requiring any other locks.
6370 6374   */
6371 6375  static void
6372 6376  nfs3_get_commit_range(vnode_t *vp, u_offset_t soff, size_t len)
6373 6377  {
6374 6378  
6375 6379          rnode_t *rp;
6376 6380          page_t *pp;
6377 6381          u_offset_t end;
6378 6382          u_offset_t off;
6379 6383  
6380 6384          ASSERT(len != 0);
6381 6385  
6382 6386          rp = VTOR(vp);
6383 6387  
6384 6388          ASSERT(rp->r_flags & RCOMMIT);
6385 6389          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
6386 6390  
6387 6391          /*
6388 6392           * If there are no pages associated with this vnode, then
6389 6393           * just return.
6390 6394           */
6391 6395          if ((pp = vp->v_pages) == NULL)
6392 6396                  return;
6393 6397  
6394 6398          /*
6395 6399           * Calculate the ending offset.
6396 6400           */
6397 6401          end = soff + len;
6398 6402  
6399 6403          for (off = soff; off < end; off += PAGESIZE) {
6400 6404                  /*
6401 6405                   * Lookup each page by vp, offset.
6402 6406                   */
6403 6407                  if ((pp = page_lookup_nowait(vp, off, SE_EXCL)) == NULL)
6404 6408                          continue;
6405 6409  
6406 6410                  /*
6407 6411                   * If this page does not need to be committed or is
6408 6412                   * modified, then just skip it.
6409 6413                   */
6410 6414                  if (pp->p_fsdata == C_NOCOMMIT || hat_ismod(pp)) {
6411 6415                          page_unlock(pp);
6412 6416                          continue;
6413 6417                  }
6414 6418  
6415 6419                  ASSERT(PP_ISFREE(pp) == 0);
6416 6420  
6417 6421                  /*
6418 6422                   * The page needs to be committed and we locked it.
6419 6423                   * Update the base and length parameters and add it
6420 6424                   * to r_pages.
6421 6425                   */
6422 6426                  if (rp->r_commit.c_pages == NULL) {
6423 6427                          rp->r_commit.c_commbase = (offset3)pp->p_offset;
6424 6428                          rp->r_commit.c_commlen = PAGESIZE;
6425 6429                  } else {
6426 6430                          rp->r_commit.c_commlen = (offset3)pp->p_offset -
6427 6431                              rp->r_commit.c_commbase + PAGESIZE;
6428 6432                  }
6429 6433                  page_add(&rp->r_commit.c_pages, pp);
6430 6434          }
6431 6435  }
6432 6436  
6433 6437  static int
6434 6438  nfs3_putpage_commit(vnode_t *vp, offset_t poff, size_t plen, cred_t *cr)
6435 6439  {
6436 6440          int error;
6437 6441          writeverf3 write_verf;
6438 6442          rnode_t *rp = VTOR(vp);
6439 6443  
6440 6444          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
6441 6445          /*
6442 6446           * Flush the data portion of the file and then commit any
6443 6447           * portions which need to be committed.  This may need to
6444 6448           * be done twice if the server has changed state since
6445 6449           * data was last written.  The data will need to be
6446 6450           * rewritten to the server and then a new commit done.
6447 6451           *
6448 6452           * In fact, this may need to be done several times if the
6449 6453           * server is having problems and crashing while we are
6450 6454           * attempting to do this.
6451 6455           */
6452 6456  
6453 6457  top:
6454 6458          /*
6455 6459           * Do a flush based on the poff and plen arguments.  This
6456 6460           * will asynchronously write out any modified pages in the
6457 6461           * range specified by (poff, plen).  This starts all of the
6458 6462           * i/o operations which will be waited for in the next
6459 6463           * call to nfs3_putpage
6460 6464           */
6461 6465  
6462 6466          mutex_enter(&rp->r_statelock);
6463 6467          write_verf = rp->r_verf;
6464 6468          mutex_exit(&rp->r_statelock);
6465 6469  
6466 6470          error = nfs3_putpage(vp, poff, plen, B_ASYNC, cr, NULL);
6467 6471          if (error == EAGAIN)
6468 6472                  error = 0;
6469 6473  
6470 6474          /*
6471 6475           * Do a flush based on the poff and plen arguments.  This
6472 6476           * will synchronously write out any modified pages in the
6473 6477           * range specified by (poff, plen) and wait until all of
6474 6478           * the asynchronous i/o's in that range are done as well.
6475 6479           */
6476 6480          if (!error)
6477 6481                  error = nfs3_putpage(vp, poff, plen, 0, cr, NULL);
6478 6482  
6479 6483          if (error)
6480 6484                  return (error);
6481 6485  
6482 6486          mutex_enter(&rp->r_statelock);
6483 6487          if (rp->r_verf != write_verf) {
6484 6488                  mutex_exit(&rp->r_statelock);
6485 6489                  goto top;
6486 6490          }
6487 6491          mutex_exit(&rp->r_statelock);
6488 6492  
6489 6493          /*
6490 6494           * Now commit any pages which might need to be committed.
6491 6495           * If the error, NFS_VERF_MISMATCH, is returned, then
6492 6496           * start over with the flush operation.
6493 6497           */
6494 6498  
6495 6499          error = nfs3_commit_vp(vp, poff, plen, cr);
6496 6500  
6497 6501          if (error == NFS_VERF_MISMATCH)
6498 6502                  goto top;
6499 6503  
6500 6504          return (error);
6501 6505  }
6502 6506  
6503 6507  static int
6504 6508  nfs3_commit_vp(vnode_t *vp, u_offset_t poff, size_t plen, cred_t *cr)
6505 6509  {
6506 6510          rnode_t *rp;
6507 6511          page_t *plist;
6508 6512          offset3 offset;
6509 6513          count3 len;
6510 6514  
6511 6515  
6512 6516          rp = VTOR(vp);
6513 6517  
6514 6518          if (nfs_zone() != VTOMI(vp)->mi_zone)
6515 6519                  return (EIO);
6516 6520          /*
6517 6521           * Set the `commit inprogress' state bit.  We must
6518 6522           * first wait until any current one finishes.
6519 6523           */
6520 6524          mutex_enter(&rp->r_statelock);
6521 6525          while (rp->r_flags & RCOMMIT) {
6522 6526                  rp->r_flags |= RCOMMITWAIT;
6523 6527                  cv_wait(&rp->r_commit.c_cv, &rp->r_statelock);
6524 6528                  rp->r_flags &= ~RCOMMITWAIT;
6525 6529          }
6526 6530          rp->r_flags |= RCOMMIT;
6527 6531          mutex_exit(&rp->r_statelock);
6528 6532  
6529 6533          /*
6530 6534           * Gather together all of the pages which need to be
6531 6535           * committed.
6532 6536           */
6533 6537          if (plen == 0)
6534 6538                  nfs3_get_commit(vp);
6535 6539          else
6536 6540                  nfs3_get_commit_range(vp, poff, plen);
6537 6541  
6538 6542          /*
6539 6543           * Clear the `commit inprogress' bit and disconnect the
6540 6544           * page list which was gathered together in nfs3_get_commit.
6541 6545           */
6542 6546          plist = rp->r_commit.c_pages;
6543 6547          rp->r_commit.c_pages = NULL;
6544 6548          offset = rp->r_commit.c_commbase;
6545 6549          len = rp->r_commit.c_commlen;
6546 6550          mutex_enter(&rp->r_statelock);
6547 6551          rp->r_flags &= ~RCOMMIT;
6548 6552          cv_broadcast(&rp->r_commit.c_cv);
6549 6553          mutex_exit(&rp->r_statelock);
6550 6554  
6551 6555          /*
6552 6556           * If any pages need to be committed, commit them and
6553 6557           * then unlock them so that they can be freed some
6554 6558           * time later.
6555 6559           */
6556 6560          if (plist != NULL) {
6557 6561                  /*
6558 6562                   * No error occurred during the flush portion
6559 6563                   * of this operation, so now attempt to commit
6560 6564                   * the data to stable storage on the server.
6561 6565                   *
6562 6566                   * This will unlock all of the pages on the list.
6563 6567                   */
6564 6568                  return (nfs3_sync_commit(vp, plist, offset, len, cr));
6565 6569          }
6566 6570          return (0);
6567 6571  }
6568 6572  
6569 6573  static int
6570 6574  nfs3_sync_commit(vnode_t *vp, page_t *plist, offset3 offset, count3 count,
6571 6575          cred_t *cr)
6572 6576  {
6573 6577          int error;
6574 6578          page_t *pp;
6575 6579  
6576 6580          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
6577 6581          error = nfs3_commit(vp, offset, count, cr);
6578 6582  
6579 6583          /*
6580 6584           * If we got an error, then just unlock all of the pages
6581 6585           * on the list.
6582 6586           */
6583 6587          if (error) {
6584 6588                  while (plist != NULL) {
6585 6589                          pp = plist;
6586 6590                          page_sub(&plist, pp);
6587 6591                          page_unlock(pp);
6588 6592                  }
6589 6593                  return (error);
6590 6594          }
6591 6595          /*
6592 6596           * We've tried as hard as we can to commit the data to stable
6593 6597           * storage on the server.  We just unlock the pages and clear
6594 6598           * the commit required state.  They will get freed later.
6595 6599           */
6596 6600          while (plist != NULL) {
6597 6601                  pp = plist;
6598 6602                  page_sub(&plist, pp);
6599 6603                  pp->p_fsdata = C_NOCOMMIT;
6600 6604                  page_unlock(pp);
6601 6605          }
6602 6606  
6603 6607          return (error);
6604 6608  }
6605 6609  
6606 6610  static void
6607 6611  nfs3_async_commit(vnode_t *vp, page_t *plist, offset3 offset, count3 count,
6608 6612          cred_t *cr)
6609 6613  {
6610 6614          ASSERT(nfs_zone() == VTOMI(vp)->mi_zone);
6611 6615          (void) nfs3_sync_commit(vp, plist, offset, count, cr);
6612 6616  }
6613 6617  
6614 6618  /* ARGSUSED */
6615 6619  static int
6616 6620  nfs3_setsecattr(vnode_t *vp, vsecattr_t *vsecattr, int flag, cred_t *cr,
6617 6621          caller_context_t *ct)
6618 6622  {
6619 6623          int error;
6620 6624          mntinfo_t *mi;
6621 6625  
6622 6626          mi = VTOMI(vp);
6623 6627  
6624 6628          if (nfs_zone() != mi->mi_zone)
6625 6629                  return (EIO);
6626 6630  
6627 6631          if (mi->mi_flags & MI_ACL) {
6628 6632                  error = acl_setacl3(vp, vsecattr, flag, cr);
6629 6633                  if (mi->mi_flags & MI_ACL)
6630 6634                          return (error);
6631 6635          }
6632 6636  
6633 6637          return (ENOSYS);
6634 6638  }
6635 6639  
6636 6640  /* ARGSUSED */
6637 6641  static int
6638 6642  nfs3_getsecattr(vnode_t *vp, vsecattr_t *vsecattr, int flag, cred_t *cr,
6639 6643          caller_context_t *ct)
6640 6644  {
6641 6645          int error;
6642 6646          mntinfo_t *mi;
6643 6647  
6644 6648          mi = VTOMI(vp);
6645 6649  
6646 6650          if (nfs_zone() != mi->mi_zone)
6647 6651                  return (EIO);
6648 6652  
6649 6653          if (mi->mi_flags & MI_ACL) {
6650 6654                  error = acl_getacl3(vp, vsecattr, flag, cr);
6651 6655                  if (mi->mi_flags & MI_ACL)
6652 6656                          return (error);
6653 6657          }
6654 6658  
6655 6659          return (fs_fab_acl(vp, vsecattr, flag, cr, ct));
6656 6660  }
6657 6661  
6658 6662  /* ARGSUSED */
6659 6663  static int
6660 6664  nfs3_shrlock(vnode_t *vp, int cmd, struct shrlock *shr, int flag, cred_t *cr,
6661 6665          caller_context_t *ct)
6662 6666  {
6663 6667          int error;
6664 6668          struct shrlock nshr;
6665 6669          struct nfs_owner nfs_owner;
6666 6670          netobj lm_fh3;
6667 6671  
6668 6672          if (nfs_zone() != VTOMI(vp)->mi_zone)
6669 6673                  return (EIO);
6670 6674  
6671 6675          /*
6672 6676           * check for valid cmd parameter
6673 6677           */
6674 6678          if (cmd != F_SHARE && cmd != F_UNSHARE && cmd != F_HASREMOTELOCKS)
6675 6679                  return (EINVAL);
6676 6680  
6677 6681          /*
6678 6682           * Check access permissions
6679 6683           */
6680 6684          if (cmd == F_SHARE &&
6681 6685              (((shr->s_access & F_RDACC) && !(flag & FREAD)) ||
6682 6686              ((shr->s_access & F_WRACC) && !(flag & FWRITE))))
6683 6687                  return (EBADF);
6684 6688  
6685 6689          /*
6686 6690           * If the filesystem is mounted using local locking, pass the
6687 6691           * request off to the local share code.
6688 6692           */
6689 6693          if (VTOMI(vp)->mi_flags & MI_LLOCK)
6690 6694                  return (fs_shrlock(vp, cmd, shr, flag, cr, ct));
6691 6695  
6692 6696          switch (cmd) {
6693 6697          case F_SHARE:
6694 6698          case F_UNSHARE:
6695 6699                  lm_fh3.n_len = VTOFH3(vp)->fh3_length;
6696 6700                  lm_fh3.n_bytes = (char *)&(VTOFH3(vp)->fh3_u.data);
6697 6701  
6698 6702                  /*
6699 6703                   * If passed an owner that is too large to fit in an
6700 6704                   * nfs_owner it is likely a recursive call from the
6701 6705                   * lock manager client and pass it straight through.  If
6702 6706                   * it is not a nfs_owner then simply return an error.
6703 6707                   */
6704 6708                  if (shr->s_own_len > sizeof (nfs_owner.lowner)) {
6705 6709                          if (((struct nfs_owner *)shr->s_owner)->magic !=
6706 6710                              NFS_OWNER_MAGIC)
6707 6711                                  return (EINVAL);
6708 6712  
6709 6713                          if (error = lm4_shrlock(vp, cmd, shr, flag, &lm_fh3)) {
6710 6714                                  error = set_errno(error);
6711 6715                          }
6712 6716                          return (error);
6713 6717                  }
6714 6718                  /*
6715 6719                   * Remote share reservations owner is a combination of
6716 6720                   * a magic number, hostname, and the local owner
6717 6721                   */
6718 6722                  bzero(&nfs_owner, sizeof (nfs_owner));
6719 6723                  nfs_owner.magic = NFS_OWNER_MAGIC;
6720 6724                  (void) strncpy(nfs_owner.hname, uts_nodename(),
6721 6725                      sizeof (nfs_owner.hname));
6722 6726                  bcopy(shr->s_owner, nfs_owner.lowner, shr->s_own_len);
6723 6727                  nshr.s_access = shr->s_access;
6724 6728                  nshr.s_deny = shr->s_deny;
6725 6729                  nshr.s_sysid = 0;
6726 6730                  nshr.s_pid = ttoproc(curthread)->p_pid;
6727 6731                  nshr.s_own_len = sizeof (nfs_owner);
6728 6732                  nshr.s_owner = (caddr_t)&nfs_owner;
6729 6733  
6730 6734                  if (error = lm4_shrlock(vp, cmd, &nshr, flag, &lm_fh3)) {
6731 6735                          error = set_errno(error);
6732 6736                  }
6733 6737  
6734 6738                  break;
6735 6739  
6736 6740          case F_HASREMOTELOCKS:
6737 6741                  /*
6738 6742                   * NFS client can't store remote locks itself
6739 6743                   */
6740 6744                  shr->s_access = 0;
6741 6745                  error = 0;
6742 6746                  break;
6743 6747  
6744 6748          default:
6745 6749                  error = EINVAL;
6746 6750                  break;
6747 6751          }
6748 6752  
6749 6753          return (error);
6750 6754  }
  
    | 
      ↓ open down ↓ | 
    1213 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX