Print this page
    
OS-5148 ftruncate at offset should emit proper events
Reviewed by: Bryan Cantrill <bryan@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
OS-3462 rename on NFSv4 filesystem induces panic
OS-3294 add support for inotify
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/common/fs/nfs/nfs4_vnops.c
          +++ new/usr/src/uts/common/fs/nfs/nfs4_vnops.c
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  
  22   22  /*
  23   23   * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
  24   24   */
  25   25  
  26   26  /*
  
    | 
      ↓ open down ↓ | 
    26 lines elided | 
    
      ↑ open up ↑ | 
  
  27   27   * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
  28   28   * Use is subject to license terms.
  29   29   */
  30   30  
  31   31  /*
  32   32   *      Copyright 1983,1984,1985,1986,1987,1988,1989 AT&T.
  33   33   *      All Rights Reserved
  34   34   */
  35   35  
  36   36  /*
  37      - * Copyright (c) 2013, Joyent, Inc. All rights reserved.
       37 + * Copyright (c) 2014, Joyent, Inc. All rights reserved.
  38   38   */
  39   39  
  40   40  #include <sys/param.h>
  41   41  #include <sys/types.h>
  42   42  #include <sys/systm.h>
  43   43  #include <sys/cred.h>
  44   44  #include <sys/time.h>
  45   45  #include <sys/vnode.h>
  46   46  #include <sys/vfs.h>
  47   47  #include <sys/vfs_opreg.h>
  48   48  #include <sys/file.h>
  49   49  #include <sys/filio.h>
  50   50  #include <sys/uio.h>
  51   51  #include <sys/buf.h>
  52   52  #include <sys/mman.h>
  53   53  #include <sys/pathname.h>
  54   54  #include <sys/dirent.h>
  55   55  #include <sys/debug.h>
  56   56  #include <sys/vmsystm.h>
  57   57  #include <sys/fcntl.h>
  58   58  #include <sys/flock.h>
  59   59  #include <sys/swap.h>
  60   60  #include <sys/errno.h>
  61   61  #include <sys/strsubr.h>
  62   62  #include <sys/sysmacros.h>
  63   63  #include <sys/kmem.h>
  64   64  #include <sys/cmn_err.h>
  65   65  #include <sys/pathconf.h>
  66   66  #include <sys/utsname.h>
  67   67  #include <sys/dnlc.h>
  68   68  #include <sys/acl.h>
  69   69  #include <sys/systeminfo.h>
  70   70  #include <sys/policy.h>
  71   71  #include <sys/sdt.h>
  72   72  #include <sys/list.h>
  73   73  #include <sys/stat.h>
  74   74  #include <sys/zone.h>
  75   75  
  76   76  #include <rpc/types.h>
  77   77  #include <rpc/auth.h>
  78   78  #include <rpc/clnt.h>
  79   79  
  80   80  #include <nfs/nfs.h>
  81   81  #include <nfs/nfs_clnt.h>
  82   82  #include <nfs/nfs_acl.h>
  83   83  #include <nfs/lm.h>
  84   84  #include <nfs/nfs4.h>
  85   85  #include <nfs/nfs4_kprot.h>
  86   86  #include <nfs/rnode4.h>
  87   87  #include <nfs/nfs4_clnt.h>
  88   88  
  89   89  #include <vm/hat.h>
  90   90  #include <vm/as.h>
  91   91  #include <vm/page.h>
  92   92  #include <vm/pvn.h>
  93   93  #include <vm/seg.h>
  94   94  #include <vm/seg_map.h>
  95   95  #include <vm/seg_kpm.h>
  96   96  #include <vm/seg_vn.h>
  97   97  
  98   98  #include <fs/fs_subr.h>
  99   99  
 100  100  #include <sys/ddi.h>
 101  101  #include <sys/int_fmtio.h>
 102  102  #include <sys/fs/autofs.h>
 103  103  
 104  104  typedef struct {
 105  105          nfs4_ga_res_t   *di_garp;
 106  106          cred_t          *di_cred;
 107  107          hrtime_t        di_time_call;
 108  108  } dirattr_info_t;
 109  109  
 110  110  typedef enum nfs4_acl_op {
 111  111          NFS4_ACL_GET,
 112  112          NFS4_ACL_SET
 113  113  } nfs4_acl_op_t;
 114  114  
 115  115  static struct lm_sysid *nfs4_find_sysid(mntinfo4_t *mi);
 116  116  
 117  117  static void     nfs4_update_dircaches(change_info4 *, vnode_t *, vnode_t *,
 118  118                          char *, dirattr_info_t *);
 119  119  
 120  120  static void     nfs4close_otw(rnode4_t *, cred_t *, nfs4_open_owner_t *,
 121  121                      nfs4_open_stream_t *, int *, int *, nfs4_close_type_t,
 122  122                      nfs4_error_t *, int *);
 123  123  static int      nfs4_rdwrlbn(vnode_t *, page_t *, u_offset_t, size_t, int,
 124  124                          cred_t *);
 125  125  static int      nfs4write(vnode_t *, caddr_t, u_offset_t, int, cred_t *,
 126  126                          stable_how4 *);
 127  127  static int      nfs4read(vnode_t *, caddr_t, offset_t, int, size_t *,
 128  128                          cred_t *, bool_t, struct uio *);
 129  129  static int      nfs4setattr(vnode_t *, struct vattr *, int, cred_t *,
 130  130                          vsecattr_t *);
 131  131  static int      nfs4openattr(vnode_t *, vnode_t **, int, cred_t *);
 132  132  static int      nfs4lookup(vnode_t *, char *, vnode_t **, cred_t *, int);
 133  133  static int      nfs4lookup_xattr(vnode_t *, char *, vnode_t **, int, cred_t *);
 134  134  static int      nfs4lookupvalidate_otw(vnode_t *, char *, vnode_t **, cred_t *);
 135  135  static int      nfs4lookupnew_otw(vnode_t *, char *, vnode_t **, cred_t *);
 136  136  static int      nfs4mknod(vnode_t *, char *, struct vattr *, enum vcexcl,
 137  137                          int, vnode_t **, cred_t *);
 138  138  static int      nfs4open_otw(vnode_t *, char *, struct vattr *, vnode_t **,
 139  139                          cred_t *, int, int, enum createmode4, int);
 140  140  static int      nfs4rename(vnode_t *, char *, vnode_t *, char *, cred_t *,
 141  141                          caller_context_t *);
 142  142  static int      nfs4rename_persistent_fh(vnode_t *, char *, vnode_t *,
 143  143                          vnode_t *, char *, cred_t *, nfsstat4 *);
 144  144  static int      nfs4rename_volatile_fh(vnode_t *, char *, vnode_t *,
 145  145                          vnode_t *, char *, cred_t *, nfsstat4 *);
 146  146  static int      do_nfs4readdir(vnode_t *, rddir4_cache *, cred_t *);
 147  147  static void     nfs4readdir(vnode_t *, rddir4_cache *, cred_t *);
 148  148  static int      nfs4_bio(struct buf *, stable_how4 *, cred_t *, bool_t);
 149  149  static int      nfs4_getapage(vnode_t *, u_offset_t, size_t, uint_t *,
 150  150                          page_t *[], size_t, struct seg *, caddr_t,
 151  151                          enum seg_rw, cred_t *);
 152  152  static void     nfs4_readahead(vnode_t *, u_offset_t, caddr_t, struct seg *,
 153  153                          cred_t *);
 154  154  static int      nfs4_sync_putapage(vnode_t *, page_t *, u_offset_t, size_t,
 155  155                          int, cred_t *);
 156  156  static int      nfs4_sync_pageio(vnode_t *, page_t *, u_offset_t, size_t,
 157  157                          int, cred_t *);
 158  158  static int      nfs4_commit(vnode_t *, offset4, count4, cred_t *);
 159  159  static void     nfs4_set_mod(vnode_t *);
 160  160  static void     nfs4_get_commit(vnode_t *);
 161  161  static void     nfs4_get_commit_range(vnode_t *, u_offset_t, size_t);
 162  162  static int      nfs4_putpage_commit(vnode_t *, offset_t, size_t, cred_t *);
 163  163  static int      nfs4_commit_vp(vnode_t *, u_offset_t, size_t, cred_t *, int);
 164  164  static int      nfs4_sync_commit(vnode_t *, page_t *, offset3, count3,
 165  165                          cred_t *);
 166  166  static void     do_nfs4_async_commit(vnode_t *, page_t *, offset3, count3,
 167  167                          cred_t *);
 168  168  static int      nfs4_update_attrcache(nfsstat4, nfs4_ga_res_t *,
 169  169                          hrtime_t, vnode_t *, cred_t *);
 170  170  static int      nfs4_open_non_reg_file(vnode_t **, int, cred_t *);
 171  171  static int      nfs4_safelock(vnode_t *, const struct flock64 *, cred_t *);
 172  172  static void     nfs4_register_lock_locally(vnode_t *, struct flock64 *, int,
 173  173                          u_offset_t);
 174  174  static int      nfs4_lockrelease(vnode_t *, int, offset_t, cred_t *);
 175  175  static int      nfs4_block_and_wait(clock_t *, rnode4_t *);
 176  176  static cred_t  *state_to_cred(nfs4_open_stream_t *);
 177  177  static void     denied_to_flk(LOCK4denied *, flock64_t *, LOCKT4args *);
 178  178  static pid_t    lo_to_pid(lock_owner4 *);
 179  179  static void     nfs4_reinstitute_local_lock_state(vnode_t *, flock64_t *,
 180  180                          cred_t *, nfs4_lock_owner_t *);
 181  181  static void     push_reinstate(vnode_t *, int, flock64_t *, cred_t *,
 182  182                          nfs4_lock_owner_t *);
 183  183  static int      open_and_get_osp(vnode_t *, cred_t *, nfs4_open_stream_t **);
 184  184  static void     nfs4_delmap_callback(struct as *, void *, uint_t);
 185  185  static void     nfs4_free_delmapcall(nfs4_delmapcall_t *);
 186  186  static nfs4_delmapcall_t        *nfs4_init_delmapcall();
 187  187  static int      nfs4_find_and_delete_delmapcall(rnode4_t *, int *);
 188  188  static int      nfs4_is_acl_mask_valid(uint_t, nfs4_acl_op_t);
 189  189  static int      nfs4_create_getsecattr_return(vsecattr_t *, vsecattr_t *,
 190  190                          uid_t, gid_t, int);
 191  191  
 192  192  /*
 193  193   * Routines that implement the setting of v4 args for the misc. ops
 194  194   */
 195  195  static void     nfs4args_lock_free(nfs_argop4 *);
 196  196  static void     nfs4args_lockt_free(nfs_argop4 *);
 197  197  static void     nfs4args_setattr(nfs_argop4 *, vattr_t *, vsecattr_t *,
 198  198                          int, rnode4_t *, cred_t *, bitmap4, int *,
 199  199                          nfs4_stateid_types_t *);
 200  200  static void     nfs4args_setattr_free(nfs_argop4 *);
 201  201  static int      nfs4args_verify(nfs_argop4 *, vattr_t *, enum nfs_opnum4,
 202  202                          bitmap4);
 203  203  static void     nfs4args_verify_free(nfs_argop4 *);
 204  204  static void     nfs4args_write(nfs_argop4 *, stable_how4, rnode4_t *, cred_t *,
 205  205                          WRITE4args **, nfs4_stateid_types_t *);
 206  206  
 207  207  /*
 208  208   * These are the vnode ops functions that implement the vnode interface to
 209  209   * the networked file system.  See more comments below at nfs4_vnodeops.
 210  210   */
 211  211  static int      nfs4_open(vnode_t **, int, cred_t *, caller_context_t *);
 212  212  static int      nfs4_close(vnode_t *, int, int, offset_t, cred_t *,
 213  213                          caller_context_t *);
 214  214  static int      nfs4_read(vnode_t *, struct uio *, int, cred_t *,
 215  215                          caller_context_t *);
 216  216  static int      nfs4_write(vnode_t *, struct uio *, int, cred_t *,
 217  217                          caller_context_t *);
 218  218  static int      nfs4_ioctl(vnode_t *, int, intptr_t, int, cred_t *, int *,
 219  219                          caller_context_t *);
 220  220  static int      nfs4_setattr(vnode_t *, struct vattr *, int, cred_t *,
 221  221                          caller_context_t *);
 222  222  static int      nfs4_access(vnode_t *, int, int, cred_t *, caller_context_t *);
 223  223  static int      nfs4_readlink(vnode_t *, struct uio *, cred_t *,
 224  224                          caller_context_t *);
 225  225  static int      nfs4_fsync(vnode_t *, int, cred_t *, caller_context_t *);
 226  226  static int      nfs4_create(vnode_t *, char *, struct vattr *, enum vcexcl,
 227  227                          int, vnode_t **, cred_t *, int, caller_context_t *,
 228  228                          vsecattr_t *);
 229  229  static int      nfs4_remove(vnode_t *, char *, cred_t *, caller_context_t *,
 230  230                          int);
 231  231  static int      nfs4_link(vnode_t *, vnode_t *, char *, cred_t *,
 232  232                          caller_context_t *, int);
 233  233  static int      nfs4_rename(vnode_t *, char *, vnode_t *, char *, cred_t *,
 234  234                          caller_context_t *, int);
 235  235  static int      nfs4_mkdir(vnode_t *, char *, struct vattr *, vnode_t **,
 236  236                          cred_t *, caller_context_t *, int, vsecattr_t *);
 237  237  static int      nfs4_rmdir(vnode_t *, char *, vnode_t *, cred_t *,
 238  238                          caller_context_t *, int);
 239  239  static int      nfs4_symlink(vnode_t *, char *, struct vattr *, char *,
 240  240                          cred_t *, caller_context_t *, int);
 241  241  static int      nfs4_readdir(vnode_t *, struct uio *, cred_t *, int *,
 242  242                          caller_context_t *, int);
 243  243  static int      nfs4_seek(vnode_t *, offset_t, offset_t *, caller_context_t *);
 244  244  static int      nfs4_getpage(vnode_t *, offset_t, size_t, uint_t *,
 245  245                          page_t *[], size_t, struct seg *, caddr_t,
 246  246                          enum seg_rw, cred_t *, caller_context_t *);
 247  247  static int      nfs4_putpage(vnode_t *, offset_t, size_t, int, cred_t *,
 248  248                          caller_context_t *);
 249  249  static int      nfs4_map(vnode_t *, offset_t, struct as *, caddr_t *, size_t,
 250  250                          uchar_t, uchar_t, uint_t, cred_t *, caller_context_t *);
 251  251  static int      nfs4_addmap(vnode_t *, offset_t, struct as *, caddr_t, size_t,
 252  252                          uchar_t, uchar_t, uint_t, cred_t *, caller_context_t *);
 253  253  static int      nfs4_cmp(vnode_t *, vnode_t *, caller_context_t *);
 254  254  static int      nfs4_frlock(vnode_t *, int, struct flock64 *, int, offset_t,
 255  255                          struct flk_callback *, cred_t *, caller_context_t *);
 256  256  static int      nfs4_space(vnode_t *, int, struct flock64 *, int, offset_t,
 257  257                          cred_t *, caller_context_t *);
 258  258  static int      nfs4_delmap(vnode_t *, offset_t, struct as *, caddr_t, size_t,
 259  259                          uint_t, uint_t, uint_t, cred_t *, caller_context_t *);
 260  260  static int      nfs4_pageio(vnode_t *, page_t *, u_offset_t, size_t, int,
 261  261                          cred_t *, caller_context_t *);
 262  262  static void     nfs4_dispose(vnode_t *, page_t *, int, int, cred_t *,
 263  263                          caller_context_t *);
 264  264  static int      nfs4_setsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
 265  265                          caller_context_t *);
 266  266  /*
 267  267   * These vnode ops are required to be called from outside this source file,
 268  268   * e.g. by ephemeral mount stub vnode ops, and so may not be declared
 269  269   * as static.
 270  270   */
 271  271  int     nfs4_getattr(vnode_t *, struct vattr *, int, cred_t *,
 272  272              caller_context_t *);
 273  273  void    nfs4_inactive(vnode_t *, cred_t *, caller_context_t *);
 274  274  int     nfs4_lookup(vnode_t *, char *, vnode_t **,
 275  275              struct pathname *, int, vnode_t *, cred_t *,
 276  276              caller_context_t *, int *, pathname_t *);
 277  277  int     nfs4_fid(vnode_t *, fid_t *, caller_context_t *);
 278  278  int     nfs4_rwlock(vnode_t *, int, caller_context_t *);
 279  279  void    nfs4_rwunlock(vnode_t *, int, caller_context_t *);
 280  280  int     nfs4_realvp(vnode_t *, vnode_t **, caller_context_t *);
 281  281  int     nfs4_pathconf(vnode_t *, int, ulong_t *, cred_t *,
 282  282              caller_context_t *);
 283  283  int     nfs4_getsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
 284  284              caller_context_t *);
 285  285  int     nfs4_shrlock(vnode_t *, int, struct shrlock *, int, cred_t *,
 286  286              caller_context_t *);
 287  287  
 288  288  /*
 289  289   * Used for nfs4_commit_vp() to indicate if we should
 290  290   * wait on pending writes.
 291  291   */
 292  292  #define NFS4_WRITE_NOWAIT       0
 293  293  #define NFS4_WRITE_WAIT         1
 294  294  
 295  295  #define NFS4_BASE_WAIT_TIME 1   /* 1 second */
 296  296  
 297  297  /*
 298  298   * Error flags used to pass information about certain special errors
 299  299   * which need to be handled specially.
 300  300   */
 301  301  #define NFS_EOF                 -98
 302  302  #define NFS_VERF_MISMATCH       -97
 303  303  
 304  304  /*
 305  305   * Flags used to differentiate between which operation drove the
 306  306   * potential CLOSE OTW. (see nfs4_close_otw_if_necessary)
 307  307   */
 308  308  #define NFS4_CLOSE_OP           0x1
 309  309  #define NFS4_DELMAP_OP          0x2
 310  310  #define NFS4_INACTIVE_OP        0x3
 311  311  
 312  312  #define ISVDEV(t) ((t == VBLK) || (t == VCHR) || (t == VFIFO))
 313  313  
 314  314  /* ALIGN64 aligns the given buffer and adjust buffer size to 64 bit */
 315  315  #define ALIGN64(x, ptr, sz)                                             \
 316  316          x = ((uintptr_t)(ptr)) & (sizeof (uint64_t) - 1);               \
 317  317          if (x) {                                                        \
 318  318                  x = sizeof (uint64_t) - (x);                            \
 319  319                  sz -= (x);                                              \
 320  320                  ptr += (x);                                             \
 321  321          }
 322  322  
 323  323  #ifdef DEBUG
 324  324  int nfs4_client_attr_debug = 0;
 325  325  int nfs4_client_state_debug = 0;
 326  326  int nfs4_client_shadow_debug = 0;
 327  327  int nfs4_client_lock_debug = 0;
 328  328  int nfs4_seqid_sync = 0;
 329  329  int nfs4_client_map_debug = 0;
 330  330  static int nfs4_pageio_debug = 0;
 331  331  int nfs4_client_inactive_debug = 0;
 332  332  int nfs4_client_recov_debug = 0;
 333  333  int nfs4_client_failover_debug = 0;
 334  334  int nfs4_client_call_debug = 0;
 335  335  int nfs4_client_lookup_debug = 0;
 336  336  int nfs4_client_zone_debug = 0;
 337  337  int nfs4_lost_rqst_debug = 0;
 338  338  int nfs4_rdattrerr_debug = 0;
 339  339  int nfs4_open_stream_debug = 0;
 340  340  
 341  341  int nfs4read_error_inject;
 342  342  
 343  343  static int nfs4_create_misses = 0;
 344  344  
 345  345  static int nfs4_readdir_cache_shorts = 0;
 346  346  static int nfs4_readdir_readahead = 0;
 347  347  
 348  348  static int nfs4_bio_do_stop = 0;
 349  349  
 350  350  static int nfs4_lostpage = 0;   /* number of times we lost original page */
 351  351  
 352  352  int nfs4_mmap_debug = 0;
 353  353  
 354  354  static int nfs4_pathconf_cache_hits = 0;
 355  355  static int nfs4_pathconf_cache_misses = 0;
 356  356  
 357  357  int nfs4close_all_cnt;
 358  358  int nfs4close_one_debug = 0;
 359  359  int nfs4close_notw_debug = 0;
 360  360  
 361  361  int denied_to_flk_debug = 0;
 362  362  void *lockt_denied_debug;
 363  363  
 364  364  #endif
 365  365  
 366  366  /*
 367  367   * How long to wait before trying again if OPEN_CONFIRM gets ETIMEDOUT
 368  368   * or NFS4ERR_RESOURCE.
 369  369   */
 370  370  static int confirm_retry_sec = 30;
 371  371  
 372  372  static int nfs4_lookup_neg_cache = 1;
 373  373  
 374  374  /*
 375  375   * number of pages to read ahead
 376  376   * optimized for 100 base-T.
 377  377   */
 378  378  static int nfs4_nra = 4;
 379  379  
 380  380  static int nfs4_do_symlink_cache = 1;
 381  381  
 382  382  static int nfs4_pathconf_disable_cache = 0;
 383  383  
 384  384  /*
 385  385   * These are the vnode ops routines which implement the vnode interface to
 386  386   * the networked file system.  These routines just take their parameters,
 387  387   * make them look networkish by putting the right info into interface structs,
 388  388   * and then calling the appropriate remote routine(s) to do the work.
 389  389   *
 390  390   * Note on directory name lookup cacheing:  If we detect a stale fhandle,
 391  391   * we purge the directory cache relative to that vnode.  This way, the
 392  392   * user won't get burned by the cache repeatedly.  See <nfs/rnode4.h> for
 393  393   * more details on rnode locking.
 394  394   */
 395  395  
 396  396  struct vnodeops *nfs4_vnodeops;
 397  397  
 398  398  const fs_operation_def_t nfs4_vnodeops_template[] = {
 399  399          VOPNAME_OPEN,           { .vop_open = nfs4_open },
 400  400          VOPNAME_CLOSE,          { .vop_close = nfs4_close },
 401  401          VOPNAME_READ,           { .vop_read = nfs4_read },
 402  402          VOPNAME_WRITE,          { .vop_write = nfs4_write },
 403  403          VOPNAME_IOCTL,          { .vop_ioctl = nfs4_ioctl },
 404  404          VOPNAME_GETATTR,        { .vop_getattr = nfs4_getattr },
 405  405          VOPNAME_SETATTR,        { .vop_setattr = nfs4_setattr },
 406  406          VOPNAME_ACCESS,         { .vop_access = nfs4_access },
 407  407          VOPNAME_LOOKUP,         { .vop_lookup = nfs4_lookup },
 408  408          VOPNAME_CREATE,         { .vop_create = nfs4_create },
 409  409          VOPNAME_REMOVE,         { .vop_remove = nfs4_remove },
 410  410          VOPNAME_LINK,           { .vop_link = nfs4_link },
 411  411          VOPNAME_RENAME,         { .vop_rename = nfs4_rename },
 412  412          VOPNAME_MKDIR,          { .vop_mkdir = nfs4_mkdir },
 413  413          VOPNAME_RMDIR,          { .vop_rmdir = nfs4_rmdir },
 414  414          VOPNAME_READDIR,        { .vop_readdir = nfs4_readdir },
 415  415          VOPNAME_SYMLINK,        { .vop_symlink = nfs4_symlink },
 416  416          VOPNAME_READLINK,       { .vop_readlink = nfs4_readlink },
 417  417          VOPNAME_FSYNC,          { .vop_fsync = nfs4_fsync },
 418  418          VOPNAME_INACTIVE,       { .vop_inactive = nfs4_inactive },
 419  419          VOPNAME_FID,            { .vop_fid = nfs4_fid },
 420  420          VOPNAME_RWLOCK,         { .vop_rwlock = nfs4_rwlock },
 421  421          VOPNAME_RWUNLOCK,       { .vop_rwunlock = nfs4_rwunlock },
 422  422          VOPNAME_SEEK,           { .vop_seek = nfs4_seek },
 423  423          VOPNAME_FRLOCK,         { .vop_frlock = nfs4_frlock },
 424  424          VOPNAME_SPACE,          { .vop_space = nfs4_space },
 425  425          VOPNAME_REALVP,         { .vop_realvp = nfs4_realvp },
 426  426          VOPNAME_GETPAGE,        { .vop_getpage = nfs4_getpage },
 427  427          VOPNAME_PUTPAGE,        { .vop_putpage = nfs4_putpage },
 428  428          VOPNAME_MAP,            { .vop_map = nfs4_map },
 429  429          VOPNAME_ADDMAP,         { .vop_addmap = nfs4_addmap },
 430  430          VOPNAME_DELMAP,         { .vop_delmap = nfs4_delmap },
 431  431          /* no separate nfs4_dump */
 432  432          VOPNAME_DUMP,           { .vop_dump = nfs_dump },
 433  433          VOPNAME_PATHCONF,       { .vop_pathconf = nfs4_pathconf },
 434  434          VOPNAME_PAGEIO,         { .vop_pageio = nfs4_pageio },
 435  435          VOPNAME_DISPOSE,        { .vop_dispose = nfs4_dispose },
 436  436          VOPNAME_SETSECATTR,     { .vop_setsecattr = nfs4_setsecattr },
 437  437          VOPNAME_GETSECATTR,     { .vop_getsecattr = nfs4_getsecattr },
 438  438          VOPNAME_SHRLOCK,        { .vop_shrlock = nfs4_shrlock },
 439  439          VOPNAME_VNEVENT,        { .vop_vnevent = fs_vnevent_support },
 440  440          NULL,                   NULL
 441  441  };
 442  442  
 443  443  /*
 444  444   * The following are subroutines and definitions to set args or get res
 445  445   * for the different nfsv4 ops
 446  446   */
 447  447  
 448  448  void
 449  449  nfs4args_lookup_free(nfs_argop4 *argop, int arglen)
 450  450  {
 451  451          int             i;
 452  452  
 453  453          for (i = 0; i < arglen; i++) {
 454  454                  if (argop[i].argop == OP_LOOKUP) {
 455  455                          kmem_free(
 456  456                              argop[i].nfs_argop4_u.oplookup.
 457  457                              objname.utf8string_val,
 458  458                              argop[i].nfs_argop4_u.oplookup.
 459  459                              objname.utf8string_len);
 460  460                  }
 461  461          }
 462  462  }
 463  463  
 464  464  static void
 465  465  nfs4args_lock_free(nfs_argop4 *argop)
 466  466  {
 467  467          locker4 *locker = &argop->nfs_argop4_u.oplock.locker;
 468  468  
 469  469          if (locker->new_lock_owner == TRUE) {
 470  470                  open_to_lock_owner4 *open_owner;
 471  471  
 472  472                  open_owner = &locker->locker4_u.open_owner;
 473  473                  if (open_owner->lock_owner.owner_val != NULL) {
 474  474                          kmem_free(open_owner->lock_owner.owner_val,
 475  475                              open_owner->lock_owner.owner_len);
 476  476                  }
 477  477          }
 478  478  }
 479  479  
 480  480  static void
 481  481  nfs4args_lockt_free(nfs_argop4 *argop)
 482  482  {
 483  483          lock_owner4 *lowner = &argop->nfs_argop4_u.oplockt.owner;
 484  484  
 485  485          if (lowner->owner_val != NULL) {
 486  486                  kmem_free(lowner->owner_val, lowner->owner_len);
 487  487          }
 488  488  }
 489  489  
 490  490  static void
 491  491  nfs4args_setattr(nfs_argop4 *argop, vattr_t *vap, vsecattr_t *vsap, int flags,
 492  492      rnode4_t *rp, cred_t *cr, bitmap4 supp, int *error,
 493  493      nfs4_stateid_types_t *sid_types)
 494  494  {
 495  495          fattr4          *attr = &argop->nfs_argop4_u.opsetattr.obj_attributes;
 496  496          mntinfo4_t      *mi;
 497  497  
 498  498          argop->argop = OP_SETATTR;
 499  499          /*
 500  500           * The stateid is set to 0 if client is not modifying the size
 501  501           * and otherwise to whatever nfs4_get_stateid() returns.
 502  502           *
 503  503           * XXX Note: nfs4_get_stateid() returns 0 if no lockowner and/or no
 504  504           * state struct could be found for the process/file pair.  We may
 505  505           * want to change this in the future (by OPENing the file).  See
 506  506           * bug # 4474852.
 507  507           */
 508  508          if (vap->va_mask & AT_SIZE) {
 509  509  
 510  510                  ASSERT(rp != NULL);
 511  511                  mi = VTOMI4(RTOV4(rp));
 512  512  
 513  513                  argop->nfs_argop4_u.opsetattr.stateid =
 514  514                      nfs4_get_stateid(cr, rp, curproc->p_pidp->pid_id, mi,
 515  515                      OP_SETATTR, sid_types, FALSE);
 516  516          } else {
 517  517                  bzero(&argop->nfs_argop4_u.opsetattr.stateid,
 518  518                      sizeof (stateid4));
 519  519          }
 520  520  
 521  521          *error = vattr_to_fattr4(vap, vsap, attr, flags, OP_SETATTR, supp);
 522  522          if (*error)
 523  523                  bzero(attr, sizeof (*attr));
 524  524  }
 525  525  
 526  526  static void
 527  527  nfs4args_setattr_free(nfs_argop4 *argop)
 528  528  {
 529  529          nfs4_fattr4_free(&argop->nfs_argop4_u.opsetattr.obj_attributes);
 530  530  }
 531  531  
 532  532  static int
 533  533  nfs4args_verify(nfs_argop4 *argop, vattr_t *vap, enum nfs_opnum4 op,
 534  534      bitmap4 supp)
 535  535  {
 536  536          fattr4 *attr;
 537  537          int error = 0;
 538  538  
 539  539          argop->argop = op;
 540  540          switch (op) {
 541  541          case OP_VERIFY:
 542  542                  attr = &argop->nfs_argop4_u.opverify.obj_attributes;
 543  543                  break;
 544  544          case OP_NVERIFY:
 545  545                  attr = &argop->nfs_argop4_u.opnverify.obj_attributes;
 546  546                  break;
 547  547          default:
 548  548                  return (EINVAL);
 549  549          }
 550  550          if (!error)
 551  551                  error = vattr_to_fattr4(vap, NULL, attr, 0, op, supp);
 552  552          if (error)
 553  553                  bzero(attr, sizeof (*attr));
 554  554          return (error);
 555  555  }
 556  556  
 557  557  static void
 558  558  nfs4args_verify_free(nfs_argop4 *argop)
 559  559  {
 560  560          switch (argop->argop) {
 561  561          case OP_VERIFY:
 562  562                  nfs4_fattr4_free(&argop->nfs_argop4_u.opverify.obj_attributes);
 563  563                  break;
 564  564          case OP_NVERIFY:
 565  565                  nfs4_fattr4_free(&argop->nfs_argop4_u.opnverify.obj_attributes);
 566  566                  break;
 567  567          default:
 568  568                  break;
 569  569          }
 570  570  }
 571  571  
 572  572  static void
 573  573  nfs4args_write(nfs_argop4 *argop, stable_how4 stable, rnode4_t *rp, cred_t *cr,
 574  574      WRITE4args **wargs_pp, nfs4_stateid_types_t *sid_tp)
 575  575  {
 576  576          WRITE4args *wargs = &argop->nfs_argop4_u.opwrite;
 577  577          mntinfo4_t *mi = VTOMI4(RTOV4(rp));
 578  578  
 579  579          argop->argop = OP_WRITE;
 580  580          wargs->stable = stable;
 581  581          wargs->stateid = nfs4_get_w_stateid(cr, rp, curproc->p_pidp->pid_id,
 582  582              mi, OP_WRITE, sid_tp);
 583  583          wargs->mblk = NULL;
 584  584          *wargs_pp = wargs;
 585  585  }
 586  586  
 587  587  void
 588  588  nfs4args_copen_free(OPEN4cargs *open_args)
 589  589  {
 590  590          if (open_args->owner.owner_val) {
 591  591                  kmem_free(open_args->owner.owner_val,
 592  592                      open_args->owner.owner_len);
 593  593          }
 594  594          if ((open_args->opentype == OPEN4_CREATE) &&
 595  595              (open_args->mode != EXCLUSIVE4)) {
 596  596                  nfs4_fattr4_free(&open_args->createhow4_u.createattrs);
 597  597          }
 598  598  }
 599  599  
 600  600  /*
 601  601   * XXX:  This is referenced in modstubs.s
 602  602   */
 603  603  struct vnodeops *
 604  604  nfs4_getvnodeops(void)
 605  605  {
 606  606          return (nfs4_vnodeops);
 607  607  }
 608  608  
 609  609  /*
 610  610   * The OPEN operation opens a regular file.
 611  611   */
 612  612  /*ARGSUSED3*/
 613  613  static int
 614  614  nfs4_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
 615  615  {
 616  616          vnode_t *dvp = NULL;
 617  617          rnode4_t *rp, *drp;
 618  618          int error;
 619  619          int just_been_created;
 620  620          char fn[MAXNAMELEN];
 621  621  
 622  622          NFS4_DEBUG(nfs4_client_state_debug, (CE_NOTE, "nfs4_open: "));
 623  623          if (nfs_zone() != VTOMI4(*vpp)->mi_zone)
 624  624                  return (EIO);
 625  625          rp = VTOR4(*vpp);
 626  626  
 627  627          /*
 628  628           * Check to see if opening something besides a regular file;
 629  629           * if so skip the OTW call
 630  630           */
 631  631          if ((*vpp)->v_type != VREG) {
 632  632                  error = nfs4_open_non_reg_file(vpp, flag, cr);
 633  633                  return (error);
 634  634          }
 635  635  
 636  636          /*
 637  637           * XXX - would like a check right here to know if the file is
 638  638           * executable or not, so as to skip OTW
 639  639           */
 640  640  
 641  641          if ((error = vtodv(*vpp, &dvp, cr, TRUE)) != 0)
 642  642                  return (error);
 643  643  
 644  644          drp = VTOR4(dvp);
 645  645          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_READER, INTR4(dvp)))
 646  646                  return (EINTR);
 647  647  
 648  648          if ((error = vtoname(*vpp, fn, MAXNAMELEN)) != 0) {
 649  649                  nfs_rw_exit(&drp->r_rwlock);
 650  650                  return (error);
 651  651          }
 652  652  
 653  653          /*
 654  654           * See if this file has just been CREATEd.
 655  655           * If so, clear the flag and update the dnlc, which was previously
 656  656           * skipped in nfs4_create.
 657  657           * XXX need better serilization on this.
 658  658           * XXX move this into the nf4open_otw call, after we have
 659  659           * XXX acquired the open owner seqid sync.
 660  660           */
 661  661          mutex_enter(&rp->r_statev4_lock);
 662  662          if (rp->created_v4) {
 663  663                  rp->created_v4 = 0;
 664  664                  mutex_exit(&rp->r_statev4_lock);
 665  665  
 666  666                  dnlc_update(dvp, fn, *vpp);
 667  667                  /* This is needed so we don't bump the open ref count */
 668  668                  just_been_created = 1;
 669  669          } else {
 670  670                  mutex_exit(&rp->r_statev4_lock);
 671  671                  just_been_created = 0;
 672  672          }
 673  673  
 674  674          /*
 675  675           * If caller specified O_TRUNC/FTRUNC, then be sure to set
 676  676           * FWRITE (to drive successful setattr(size=0) after open)
 677  677           */
 678  678          if (flag & FTRUNC)
 679  679                  flag |= FWRITE;
 680  680  
 681  681          error = nfs4open_otw(dvp, fn, NULL, vpp, cr, 0, flag, 0,
 682  682              just_been_created);
 683  683  
 684  684          if (!error && !((*vpp)->v_flag & VROOT))
 685  685                  dnlc_update(dvp, fn, *vpp);
 686  686  
 687  687          nfs_rw_exit(&drp->r_rwlock);
 688  688  
 689  689          /* release the hold from vtodv */
 690  690          VN_RELE(dvp);
 691  691  
 692  692          /* exchange the shadow for the master vnode, if needed */
 693  693  
 694  694          if (error == 0 && IS_SHADOW(*vpp, rp))
 695  695                  sv_exchange(vpp);
 696  696  
 697  697          return (error);
 698  698  }
 699  699  
 700  700  /*
 701  701   * See if there's a "lost open" request to be saved and recovered.
 702  702   */
 703  703  static void
 704  704  nfs4open_save_lost_rqst(int error, nfs4_lost_rqst_t *lost_rqstp,
 705  705      nfs4_open_owner_t *oop, cred_t *cr, vnode_t *vp,
 706  706      vnode_t *dvp, OPEN4cargs *open_args)
 707  707  {
 708  708          vfs_t *vfsp;
 709  709          char *srccfp;
 710  710  
 711  711          vfsp = (dvp ? dvp->v_vfsp : vp->v_vfsp);
 712  712  
 713  713          if (error != ETIMEDOUT && error != EINTR &&
 714  714              !NFS4_FRC_UNMT_ERR(error, vfsp)) {
 715  715                  lost_rqstp->lr_op = 0;
 716  716                  return;
 717  717          }
 718  718  
 719  719          NFS4_DEBUG(nfs4_lost_rqst_debug, (CE_NOTE,
 720  720              "nfs4open_save_lost_rqst: error %d", error));
 721  721  
 722  722          lost_rqstp->lr_op = OP_OPEN;
 723  723  
 724  724          /*
 725  725           * The vp (if it is not NULL) and dvp are held and rele'd via
 726  726           * the recovery code.  See nfs4_save_lost_rqst.
 727  727           */
 728  728          lost_rqstp->lr_vp = vp;
 729  729          lost_rqstp->lr_dvp = dvp;
 730  730          lost_rqstp->lr_oop = oop;
 731  731          lost_rqstp->lr_osp = NULL;
 732  732          lost_rqstp->lr_lop = NULL;
 733  733          lost_rqstp->lr_cr = cr;
 734  734          lost_rqstp->lr_flk = NULL;
 735  735          lost_rqstp->lr_oacc = open_args->share_access;
 736  736          lost_rqstp->lr_odeny = open_args->share_deny;
 737  737          lost_rqstp->lr_oclaim = open_args->claim;
 738  738          if (open_args->claim == CLAIM_DELEGATE_CUR) {
 739  739                  lost_rqstp->lr_ostateid =
 740  740                      open_args->open_claim4_u.delegate_cur_info.delegate_stateid;
 741  741                  srccfp = open_args->open_claim4_u.delegate_cur_info.cfile;
 742  742          } else {
 743  743                  srccfp = open_args->open_claim4_u.cfile;
 744  744          }
 745  745          lost_rqstp->lr_ofile.utf8string_len = 0;
 746  746          lost_rqstp->lr_ofile.utf8string_val = NULL;
 747  747          (void) str_to_utf8(srccfp, &lost_rqstp->lr_ofile);
 748  748          lost_rqstp->lr_putfirst = FALSE;
 749  749  }
 750  750  
 751  751  struct nfs4_excl_time {
 752  752          uint32 seconds;
 753  753          uint32 nseconds;
 754  754  };
 755  755  
 756  756  /*
 757  757   * The OPEN operation creates and/or opens a regular file
 758  758   *
 759  759   * ARGSUSED
 760  760   */
 761  761  static int
 762  762  nfs4open_otw(vnode_t *dvp, char *file_name, struct vattr *in_va,
 763  763      vnode_t **vpp, cred_t *cr, int create_flag, int open_flag,
 764  764      enum createmode4 createmode, int file_just_been_created)
 765  765  {
 766  766          rnode4_t *rp;
 767  767          rnode4_t *drp = VTOR4(dvp);
 768  768          vnode_t *vp = NULL;
 769  769          vnode_t *vpi = *vpp;
 770  770          bool_t needrecov = FALSE;
 771  771  
 772  772          int doqueue = 1;
 773  773  
 774  774          COMPOUND4args_clnt args;
 775  775          COMPOUND4res_clnt res;
 776  776          nfs_argop4 *argop;
 777  777          nfs_resop4 *resop;
 778  778          int argoplist_size;
 779  779          int idx_open, idx_fattr;
 780  780  
 781  781          GETFH4res *gf_res = NULL;
 782  782          OPEN4res *op_res = NULL;
 783  783          nfs4_ga_res_t *garp;
 784  784          fattr4 *attr = NULL;
 785  785          struct nfs4_excl_time verf;
 786  786          bool_t did_excl_setup = FALSE;
 787  787          int created_osp;
 788  788  
 789  789          OPEN4cargs *open_args;
 790  790          nfs4_open_owner_t       *oop = NULL;
 791  791          nfs4_open_stream_t      *osp = NULL;
 792  792          seqid4 seqid = 0;
 793  793          bool_t retry_open = FALSE;
 794  794          nfs4_recov_state_t recov_state;
 795  795          nfs4_lost_rqst_t lost_rqst;
 796  796          nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS };
 797  797          hrtime_t t;
 798  798          int acc = 0;
 799  799          cred_t *cred_otw = NULL;        /* cred used to do the RPC call */
 800  800          cred_t *ncr = NULL;
 801  801  
 802  802          nfs4_sharedfh_t *otw_sfh;
 803  803          nfs4_sharedfh_t *orig_sfh;
 804  804          int fh_differs = 0;
 805  805          int numops, setgid_flag;
 806  806          int num_bseqid_retry = NFS4_NUM_RETRY_BAD_SEQID + 1;
 807  807  
 808  808          /*
 809  809           * Make sure we properly deal with setting the right gid on
 810  810           * a newly created file to reflect the parent's setgid bit
 811  811           */
 812  812          setgid_flag = 0;
 813  813          if (create_flag && in_va) {
 814  814  
 815  815                  /*
 816  816                   * If there is grpid mount flag used or
 817  817                   * the parent's directory has the setgid bit set
 818  818                   * _and_ the client was able to get a valid mapping
 819  819                   * for the parent dir's owner_group, we want to
 820  820                   * append NVERIFY(owner_group == dva.va_gid) and
 821  821                   * SETATTR to the CREATE compound.
 822  822                   */
 823  823                  mutex_enter(&drp->r_statelock);
 824  824                  if ((VTOMI4(dvp)->mi_flags & MI4_GRPID ||
 825  825                      drp->r_attr.va_mode & VSGID) &&
 826  826                      drp->r_attr.va_gid != GID_NOBODY) {
 827  827                          in_va->va_mask |= AT_GID;
 828  828                          in_va->va_gid = drp->r_attr.va_gid;
 829  829                          setgid_flag = 1;
 830  830                  }
 831  831                  mutex_exit(&drp->r_statelock);
 832  832          }
 833  833  
 834  834          /*
 835  835           * Normal/non-create compound:
 836  836           * PUTFH(dfh) + OPEN(create) + GETFH + GETATTR(new)
 837  837           *
 838  838           * Open(create) compound no setgid:
 839  839           * PUTFH(dfh) + SAVEFH + OPEN(create) + GETFH + GETATTR(new) +
 840  840           * RESTOREFH + GETATTR
 841  841           *
 842  842           * Open(create) setgid:
 843  843           * PUTFH(dfh) + OPEN(create) + GETFH + GETATTR(new) +
 844  844           * SAVEFH + PUTFH(dfh) + GETATTR(dvp) + RESTOREFH +
 845  845           * NVERIFY(grp) + SETATTR
 846  846           */
 847  847          if (setgid_flag) {
 848  848                  numops = 10;
 849  849                  idx_open = 1;
 850  850                  idx_fattr = 3;
 851  851          } else if (create_flag) {
 852  852                  numops = 7;
 853  853                  idx_open = 2;
 854  854                  idx_fattr = 4;
 855  855          } else {
 856  856                  numops = 4;
 857  857                  idx_open = 1;
 858  858                  idx_fattr = 3;
 859  859          }
 860  860  
 861  861          args.array_len = numops;
 862  862          argoplist_size = numops * sizeof (nfs_argop4);
 863  863          argop = kmem_alloc(argoplist_size, KM_SLEEP);
 864  864  
 865  865          NFS4_DEBUG(nfs4_client_state_debug, (CE_NOTE, "nfs4open_otw: "
 866  866              "open %s open flag 0x%x cred %p", file_name, open_flag,
 867  867              (void *)cr));
 868  868  
 869  869          ASSERT(nfs_zone() == VTOMI4(dvp)->mi_zone);
 870  870          if (create_flag) {
 871  871                  /*
 872  872                   * We are to create a file.  Initialize the passed in vnode
 873  873                   * pointer.
 874  874                   */
 875  875                  vpi = NULL;
 876  876          } else {
 877  877                  /*
 878  878                   * Check to see if the client owns a read delegation and is
 879  879                   * trying to open for write.  If so, then return the delegation
 880  880                   * to avoid the server doing a cb_recall and returning DELAY.
 881  881                   * NB - we don't use the statev4_lock here because we'd have
 882  882                   * to drop the lock anyway and the result would be stale.
 883  883                   */
 884  884                  if ((open_flag & FWRITE) &&
 885  885                      VTOR4(vpi)->r_deleg_type == OPEN_DELEGATE_READ)
 886  886                          (void) nfs4delegreturn(VTOR4(vpi), NFS4_DR_REOPEN);
 887  887  
 888  888                  /*
 889  889                   * If the file has a delegation, then do an access check up
 890  890                   * front.  This avoids having to an access check later after
 891  891                   * we've already done start_op, which could deadlock.
 892  892                   */
 893  893                  if (VTOR4(vpi)->r_deleg_type != OPEN_DELEGATE_NONE) {
 894  894                          if (open_flag & FREAD &&
 895  895                              nfs4_access(vpi, VREAD, 0, cr, NULL) == 0)
 896  896                                  acc |= VREAD;
 897  897                          if (open_flag & FWRITE &&
 898  898                              nfs4_access(vpi, VWRITE, 0, cr, NULL) == 0)
 899  899                                  acc |= VWRITE;
 900  900                  }
 901  901          }
 902  902  
 903  903          drp = VTOR4(dvp);
 904  904  
 905  905          recov_state.rs_flags = 0;
 906  906          recov_state.rs_num_retry_despite_err = 0;
 907  907          cred_otw = cr;
 908  908  
 909  909  recov_retry:
 910  910          fh_differs = 0;
 911  911          nfs4_error_zinit(&e);
 912  912  
 913  913          e.error = nfs4_start_op(VTOMI4(dvp), dvp, vpi, &recov_state);
 914  914          if (e.error) {
 915  915                  if (ncr != NULL)
 916  916                          crfree(ncr);
 917  917                  kmem_free(argop, argoplist_size);
 918  918                  return (e.error);
 919  919          }
 920  920  
 921  921          args.ctag = TAG_OPEN;
 922  922          args.array_len = numops;
 923  923          args.array = argop;
 924  924  
 925  925          /* putfh directory fh */
 926  926          argop[0].argop = OP_CPUTFH;
 927  927          argop[0].nfs_argop4_u.opcputfh.sfh = drp->r_fh;
 928  928  
 929  929          /* OPEN: either op 1 or op 2 depending upon create/setgid flags */
 930  930          argop[idx_open].argop = OP_COPEN;
 931  931          open_args = &argop[idx_open].nfs_argop4_u.opcopen;
 932  932          open_args->claim = CLAIM_NULL;
 933  933  
 934  934          /* name of file */
 935  935          open_args->open_claim4_u.cfile = file_name;
 936  936          open_args->owner.owner_len = 0;
 937  937          open_args->owner.owner_val = NULL;
 938  938  
 939  939          if (create_flag) {
 940  940                  /* CREATE a file */
 941  941                  open_args->opentype = OPEN4_CREATE;
 942  942                  open_args->mode = createmode;
 943  943                  if (createmode == EXCLUSIVE4) {
 944  944                          if (did_excl_setup == FALSE) {
 945  945                                  verf.seconds = zone_get_hostid(NULL);
 946  946                                  if (verf.seconds != 0)
 947  947                                          verf.nseconds = newnum();
 948  948                                  else {
 949  949                                          timestruc_t now;
 950  950  
 951  951                                          gethrestime(&now);
 952  952                                          verf.seconds = now.tv_sec;
 953  953                                          verf.nseconds = now.tv_nsec;
 954  954                                  }
 955  955                                  /*
 956  956                                   * Since the server will use this value for the
 957  957                                   * mtime, make sure that it can't overflow. Zero
 958  958                                   * out the MSB. The actual value does not matter
 959  959                                   * here, only its uniqeness.
 960  960                                   */
 961  961                                  verf.seconds &= INT32_MAX;
 962  962                                  did_excl_setup = TRUE;
 963  963                          }
 964  964  
 965  965                          /* Now copy over verifier to OPEN4args. */
 966  966                          open_args->createhow4_u.createverf = *(uint64_t *)&verf;
 967  967                  } else {
 968  968                          int v_error;
 969  969                          bitmap4 supp_attrs;
 970  970                          servinfo4_t *svp;
 971  971  
 972  972                          attr = &open_args->createhow4_u.createattrs;
 973  973  
 974  974                          svp = drp->r_server;
 975  975                          (void) nfs_rw_enter_sig(&svp->sv_lock, RW_READER, 0);
 976  976                          supp_attrs = svp->sv_supp_attrs;
 977  977                          nfs_rw_exit(&svp->sv_lock);
 978  978  
 979  979                          /* GUARDED4 or UNCHECKED4 */
 980  980                          v_error = vattr_to_fattr4(in_va, NULL, attr, 0, OP_OPEN,
 981  981                              supp_attrs);
 982  982                          if (v_error) {
 983  983                                  bzero(attr, sizeof (*attr));
 984  984                                  nfs4args_copen_free(open_args);
 985  985                                  nfs4_end_op(VTOMI4(dvp), dvp, vpi,
 986  986                                      &recov_state, FALSE);
 987  987                                  if (ncr != NULL)
 988  988                                          crfree(ncr);
 989  989                                  kmem_free(argop, argoplist_size);
 990  990                                  return (v_error);
 991  991                          }
 992  992                  }
 993  993          } else {
 994  994                  /* NO CREATE */
 995  995                  open_args->opentype = OPEN4_NOCREATE;
 996  996          }
 997  997  
 998  998          if (recov_state.rs_sp != NULL) {
 999  999                  mutex_enter(&recov_state.rs_sp->s_lock);
1000 1000                  open_args->owner.clientid = recov_state.rs_sp->clientid;
1001 1001                  mutex_exit(&recov_state.rs_sp->s_lock);
1002 1002          } else {
1003 1003                  /* XXX should we just fail here? */
1004 1004                  open_args->owner.clientid = 0;
1005 1005          }
1006 1006  
1007 1007          /*
1008 1008           * This increments oop's ref count or creates a temporary 'just_created'
1009 1009           * open owner that will become valid when this OPEN/OPEN_CONFIRM call
1010 1010           * completes.
1011 1011           */
1012 1012          mutex_enter(&VTOMI4(dvp)->mi_lock);
1013 1013  
1014 1014          /* See if a permanent or just created open owner exists */
1015 1015          oop = find_open_owner_nolock(cr, NFS4_JUST_CREATED, VTOMI4(dvp));
1016 1016          if (!oop) {
1017 1017                  /*
1018 1018                   * This open owner does not exist so create a temporary
1019 1019                   * just created one.
1020 1020                   */
1021 1021                  oop = create_open_owner(cr, VTOMI4(dvp));
1022 1022                  ASSERT(oop != NULL);
1023 1023          }
1024 1024          mutex_exit(&VTOMI4(dvp)->mi_lock);
1025 1025  
1026 1026          /* this length never changes, do alloc before seqid sync */
1027 1027          open_args->owner.owner_len = sizeof (oop->oo_name);
1028 1028          open_args->owner.owner_val =
1029 1029              kmem_alloc(open_args->owner.owner_len, KM_SLEEP);
1030 1030  
1031 1031          e.error = nfs4_start_open_seqid_sync(oop, VTOMI4(dvp));
1032 1032          if (e.error == EAGAIN) {
1033 1033                  open_owner_rele(oop);
1034 1034                  nfs4args_copen_free(open_args);
1035 1035                  nfs4_end_op(VTOMI4(dvp), dvp, vpi, &recov_state, TRUE);
1036 1036                  if (ncr != NULL) {
1037 1037                          crfree(ncr);
1038 1038                          ncr = NULL;
1039 1039                  }
1040 1040                  goto recov_retry;
1041 1041          }
1042 1042  
1043 1043          /* Check to see if we need to do the OTW call */
1044 1044          if (!create_flag) {
1045 1045                  if (!nfs4_is_otw_open_necessary(oop, open_flag, vpi,
1046 1046                      file_just_been_created, &e.error, acc, &recov_state)) {
1047 1047  
1048 1048                          /*
1049 1049                           * The OTW open is not necessary.  Either
1050 1050                           * the open can succeed without it (eg.
1051 1051                           * delegation, error == 0) or the open
1052 1052                           * must fail due to an access failure
1053 1053                           * (error != 0).  In either case, tidy
1054 1054                           * up and return.
1055 1055                           */
1056 1056  
1057 1057                          nfs4_end_open_seqid_sync(oop);
1058 1058                          open_owner_rele(oop);
1059 1059                          nfs4args_copen_free(open_args);
1060 1060                          nfs4_end_op(VTOMI4(dvp), dvp, vpi, &recov_state, FALSE);
1061 1061                          if (ncr != NULL)
1062 1062                                  crfree(ncr);
1063 1063                          kmem_free(argop, argoplist_size);
1064 1064                          return (e.error);
1065 1065                  }
1066 1066          }
1067 1067  
1068 1068          bcopy(&oop->oo_name, open_args->owner.owner_val,
1069 1069              open_args->owner.owner_len);
1070 1070  
1071 1071          seqid = nfs4_get_open_seqid(oop) + 1;
1072 1072          open_args->seqid = seqid;
1073 1073          open_args->share_access = 0;
1074 1074          if (open_flag & FREAD)
1075 1075                  open_args->share_access |= OPEN4_SHARE_ACCESS_READ;
1076 1076          if (open_flag & FWRITE)
1077 1077                  open_args->share_access |= OPEN4_SHARE_ACCESS_WRITE;
1078 1078          open_args->share_deny = OPEN4_SHARE_DENY_NONE;
1079 1079  
1080 1080  
1081 1081  
1082 1082          /*
1083 1083           * getfh w/sanity check for idx_open/idx_fattr
1084 1084           */
1085 1085          ASSERT((idx_open + 1) == (idx_fattr - 1));
1086 1086          argop[idx_open + 1].argop = OP_GETFH;
1087 1087  
1088 1088          /* getattr */
1089 1089          argop[idx_fattr].argop = OP_GETATTR;
1090 1090          argop[idx_fattr].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
1091 1091          argop[idx_fattr].nfs_argop4_u.opgetattr.mi = VTOMI4(dvp);
1092 1092  
1093 1093          if (setgid_flag) {
1094 1094                  vattr_t _v;
1095 1095                  servinfo4_t *svp;
1096 1096                  bitmap4 supp_attrs;
1097 1097  
1098 1098                  svp = drp->r_server;
1099 1099                  (void) nfs_rw_enter_sig(&svp->sv_lock, RW_READER, 0);
1100 1100                  supp_attrs = svp->sv_supp_attrs;
1101 1101                  nfs_rw_exit(&svp->sv_lock);
1102 1102  
1103 1103                  /*
1104 1104                   * For setgid case, we need to:
1105 1105                   * 4:savefh(new) 5:putfh(dir) 6:getattr(dir) 7:restorefh(new)
1106 1106                   */
1107 1107                  argop[4].argop = OP_SAVEFH;
1108 1108  
1109 1109                  argop[5].argop = OP_CPUTFH;
1110 1110                  argop[5].nfs_argop4_u.opcputfh.sfh = drp->r_fh;
1111 1111  
1112 1112                  argop[6].argop = OP_GETATTR;
1113 1113                  argop[6].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
1114 1114                  argop[6].nfs_argop4_u.opgetattr.mi = VTOMI4(dvp);
1115 1115  
1116 1116                  argop[7].argop = OP_RESTOREFH;
1117 1117  
1118 1118                  /*
1119 1119                   * nverify
1120 1120                   */
1121 1121                  _v.va_mask = AT_GID;
1122 1122                  _v.va_gid = in_va->va_gid;
1123 1123                  if (!(e.error = nfs4args_verify(&argop[8], &_v, OP_NVERIFY,
1124 1124                      supp_attrs))) {
1125 1125  
1126 1126                          /*
1127 1127                           * setattr
1128 1128                           *
1129 1129                           * We _know_ we're not messing with AT_SIZE or
1130 1130                           * AT_XTIME, so no need for stateid or flags.
1131 1131                           * Also we specify NULL rp since we're only
1132 1132                           * interested in setting owner_group attributes.
1133 1133                           */
1134 1134                          nfs4args_setattr(&argop[9], &_v, NULL, 0, NULL, cr,
1135 1135                              supp_attrs, &e.error, 0);
1136 1136                          if (e.error)
1137 1137                                  nfs4args_verify_free(&argop[8]);
1138 1138                  }
1139 1139  
1140 1140                  if (e.error) {
1141 1141                          /*
1142 1142                           * XXX - Revisit the last argument to nfs4_end_op()
1143 1143                           *       once 5020486 is fixed.
1144 1144                           */
1145 1145                          nfs4_end_open_seqid_sync(oop);
1146 1146                          open_owner_rele(oop);
1147 1147                          nfs4args_copen_free(open_args);
1148 1148                          nfs4_end_op(VTOMI4(dvp), dvp, vpi, &recov_state, TRUE);
1149 1149                          if (ncr != NULL)
1150 1150                                  crfree(ncr);
1151 1151                          kmem_free(argop, argoplist_size);
1152 1152                          return (e.error);
1153 1153                  }
1154 1154          } else if (create_flag) {
1155 1155                  argop[1].argop = OP_SAVEFH;
1156 1156  
1157 1157                  argop[5].argop = OP_RESTOREFH;
1158 1158  
1159 1159                  argop[6].argop = OP_GETATTR;
1160 1160                  argop[6].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
1161 1161                  argop[6].nfs_argop4_u.opgetattr.mi = VTOMI4(dvp);
1162 1162          }
1163 1163  
1164 1164          NFS4_DEBUG(nfs4_client_call_debug, (CE_NOTE,
1165 1165              "nfs4open_otw: %s call, nm %s, rp %s",
1166 1166              needrecov ? "recov" : "first", file_name,
1167 1167              rnode4info(VTOR4(dvp))));
1168 1168  
1169 1169          t = gethrtime();
1170 1170  
1171 1171          rfs4call(VTOMI4(dvp), &args, &res, cred_otw, &doqueue, 0, &e);
1172 1172  
1173 1173          if (!e.error && nfs4_need_to_bump_seqid(&res))
1174 1174                  nfs4_set_open_seqid(seqid, oop, args.ctag);
1175 1175  
1176 1176          needrecov = nfs4_needs_recovery(&e, TRUE, dvp->v_vfsp);
1177 1177  
1178 1178          if (e.error || needrecov) {
1179 1179                  bool_t abort = FALSE;
1180 1180  
1181 1181                  if (needrecov) {
1182 1182                          nfs4_bseqid_entry_t *bsep = NULL;
1183 1183  
1184 1184                          nfs4open_save_lost_rqst(e.error, &lost_rqst, oop,
1185 1185                              cred_otw, vpi, dvp, open_args);
1186 1186  
1187 1187                          if (!e.error && res.status == NFS4ERR_BAD_SEQID) {
1188 1188                                  bsep = nfs4_create_bseqid_entry(oop, NULL,
1189 1189                                      vpi, 0, args.ctag, open_args->seqid);
1190 1190                                  num_bseqid_retry--;
1191 1191                          }
1192 1192  
1193 1193                          abort = nfs4_start_recovery(&e, VTOMI4(dvp), dvp, vpi,
1194 1194                              NULL, lost_rqst.lr_op == OP_OPEN ?
1195 1195                              &lost_rqst : NULL, OP_OPEN, bsep, NULL, NULL);
1196 1196  
1197 1197                          if (bsep)
1198 1198                                  kmem_free(bsep, sizeof (*bsep));
1199 1199                          /* give up if we keep getting BAD_SEQID */
1200 1200                          if (num_bseqid_retry == 0)
1201 1201                                  abort = TRUE;
1202 1202                          if (abort == TRUE && e.error == 0)
1203 1203                                  e.error = geterrno4(res.status);
1204 1204                  }
1205 1205                  nfs4_end_open_seqid_sync(oop);
1206 1206                  open_owner_rele(oop);
1207 1207                  nfs4_end_op(VTOMI4(dvp), dvp, vpi, &recov_state, needrecov);
1208 1208                  nfs4args_copen_free(open_args);
1209 1209                  if (setgid_flag) {
1210 1210                          nfs4args_verify_free(&argop[8]);
1211 1211                          nfs4args_setattr_free(&argop[9]);
1212 1212                  }
1213 1213                  if (!e.error)
1214 1214                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
1215 1215                  if (ncr != NULL) {
1216 1216                          crfree(ncr);
1217 1217                          ncr = NULL;
1218 1218                  }
1219 1219                  if (!needrecov || abort == TRUE || e.error == EINTR ||
1220 1220                      NFS4_FRC_UNMT_ERR(e.error, dvp->v_vfsp)) {
1221 1221                          kmem_free(argop, argoplist_size);
1222 1222                          return (e.error);
1223 1223                  }
1224 1224                  goto recov_retry;
1225 1225          }
1226 1226  
1227 1227          /*
1228 1228           * Will check and update lease after checking the rflag for
1229 1229           * OPEN_CONFIRM in the successful OPEN call.
1230 1230           */
1231 1231          if (res.status != NFS4_OK && res.array_len <= idx_fattr + 1) {
1232 1232  
1233 1233                  /*
1234 1234                   * XXX what if we're crossing mount points from server1:/drp
1235 1235                   * to server2:/drp/rp.
1236 1236                   */
1237 1237  
1238 1238                  /* Signal our end of use of the open seqid */
1239 1239                  nfs4_end_open_seqid_sync(oop);
1240 1240  
1241 1241                  /*
1242 1242                   * This will destroy the open owner if it was just created,
1243 1243                   * and no one else has put a reference on it.
1244 1244                   */
1245 1245                  open_owner_rele(oop);
1246 1246                  if (create_flag && (createmode != EXCLUSIVE4) &&
1247 1247                      res.status == NFS4ERR_BADOWNER)
1248 1248                          nfs4_log_badowner(VTOMI4(dvp), OP_OPEN);
1249 1249  
1250 1250                  e.error = geterrno4(res.status);
1251 1251                  nfs4args_copen_free(open_args);
1252 1252                  if (setgid_flag) {
1253 1253                          nfs4args_verify_free(&argop[8]);
1254 1254                          nfs4args_setattr_free(&argop[9]);
1255 1255                  }
1256 1256                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
1257 1257                  nfs4_end_op(VTOMI4(dvp), dvp, vpi, &recov_state, needrecov);
1258 1258                  /*
1259 1259                   * If the reply is NFS4ERR_ACCESS, it may be because
1260 1260                   * we are root (no root net access).  If the real uid
1261 1261                   * is not root, then retry with the real uid instead.
1262 1262                   */
1263 1263                  if (ncr != NULL) {
1264 1264                          crfree(ncr);
1265 1265                          ncr = NULL;
1266 1266                  }
1267 1267                  if (res.status == NFS4ERR_ACCESS &&
1268 1268                      (ncr = crnetadjust(cred_otw)) != NULL) {
1269 1269                          cred_otw = ncr;
1270 1270                          goto recov_retry;
1271 1271                  }
1272 1272                  kmem_free(argop, argoplist_size);
1273 1273                  return (e.error);
1274 1274          }
1275 1275  
1276 1276          resop = &res.array[idx_open];  /* open res */
1277 1277          op_res = &resop->nfs_resop4_u.opopen;
1278 1278  
1279 1279  #ifdef DEBUG
1280 1280          /*
1281 1281           * verify attrset bitmap
1282 1282           */
1283 1283          if (create_flag &&
1284 1284              (createmode == UNCHECKED4 || createmode == GUARDED4)) {
1285 1285                  /* make sure attrset returned is what we asked for */
1286 1286                  /* XXX Ignore this 'error' for now */
1287 1287                  if (attr->attrmask != op_res->attrset)
1288 1288                          /* EMPTY */;
1289 1289          }
1290 1290  #endif
1291 1291  
1292 1292          if (op_res->rflags & OPEN4_RESULT_LOCKTYPE_POSIX) {
1293 1293                  mutex_enter(&VTOMI4(dvp)->mi_lock);
1294 1294                  VTOMI4(dvp)->mi_flags |= MI4_POSIX_LOCK;
1295 1295                  mutex_exit(&VTOMI4(dvp)->mi_lock);
1296 1296          }
1297 1297  
1298 1298          resop = &res.array[idx_open + 1];  /* getfh res */
1299 1299          gf_res = &resop->nfs_resop4_u.opgetfh;
1300 1300  
1301 1301          otw_sfh = sfh4_get(&gf_res->object, VTOMI4(dvp));
1302 1302  
1303 1303          /*
1304 1304           * The open stateid has been updated on the server but not
1305 1305           * on the client yet.  There is a path: makenfs4node->nfs4_attr_cache->
1306 1306           * flush_pages->VOP_PUTPAGE->...->nfs4write where we will issue an OTW
1307 1307           * WRITE call.  That, however, will use the old stateid, so go ahead
1308 1308           * and upate the open stateid now, before any call to makenfs4node.
1309 1309           */
1310 1310          if (vpi) {
1311 1311                  nfs4_open_stream_t      *tmp_osp;
1312 1312                  rnode4_t                *tmp_rp = VTOR4(vpi);
1313 1313  
1314 1314                  tmp_osp = find_open_stream(oop, tmp_rp);
1315 1315                  if (tmp_osp) {
1316 1316                          tmp_osp->open_stateid = op_res->stateid;
1317 1317                          mutex_exit(&tmp_osp->os_sync_lock);
1318 1318                          open_stream_rele(tmp_osp, tmp_rp);
1319 1319                  }
1320 1320  
1321 1321                  /*
1322 1322                   * We must determine if the file handle given by the otw open
1323 1323                   * is the same as the file handle which was passed in with
1324 1324                   * *vpp.  This case can be reached if the file we are trying
1325 1325                   * to open has been removed and another file has been created
1326 1326                   * having the same file name.  The passed in vnode is released
1327 1327                   * later.
1328 1328                   */
1329 1329                  orig_sfh = VTOR4(vpi)->r_fh;
1330 1330                  fh_differs = nfs4cmpfh(&orig_sfh->sfh_fh, &otw_sfh->sfh_fh);
1331 1331          }
1332 1332  
1333 1333          garp = &res.array[idx_fattr].nfs_resop4_u.opgetattr.ga_res;
1334 1334  
1335 1335          if (create_flag || fh_differs) {
1336 1336                  int rnode_err = 0;
1337 1337  
1338 1338                  vp = makenfs4node(otw_sfh, garp, dvp->v_vfsp, t, cr,
1339 1339                      dvp, fn_get(VTOSV(dvp)->sv_name, file_name, otw_sfh));
1340 1340  
1341 1341                  if (e.error)
1342 1342                          PURGE_ATTRCACHE4(vp);
1343 1343                  /*
1344 1344                   * For the newly created vp case, make sure the rnode
1345 1345                   * isn't bad before using it.
1346 1346                   */
1347 1347                  mutex_enter(&(VTOR4(vp))->r_statelock);
1348 1348                  if (VTOR4(vp)->r_flags & R4RECOVERR)
1349 1349                          rnode_err = EIO;
1350 1350                  mutex_exit(&(VTOR4(vp))->r_statelock);
1351 1351  
1352 1352                  if (rnode_err) {
1353 1353                          nfs4_end_open_seqid_sync(oop);
1354 1354                          nfs4args_copen_free(open_args);
1355 1355                          if (setgid_flag) {
1356 1356                                  nfs4args_verify_free(&argop[8]);
1357 1357                                  nfs4args_setattr_free(&argop[9]);
1358 1358                          }
1359 1359                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
1360 1360                          nfs4_end_op(VTOMI4(dvp), dvp, vpi, &recov_state,
1361 1361                              needrecov);
1362 1362                          open_owner_rele(oop);
1363 1363                          VN_RELE(vp);
1364 1364                          if (ncr != NULL)
1365 1365                                  crfree(ncr);
1366 1366                          sfh4_rele(&otw_sfh);
1367 1367                          kmem_free(argop, argoplist_size);
1368 1368                          return (EIO);
1369 1369                  }
1370 1370          } else {
1371 1371                  vp = vpi;
1372 1372          }
1373 1373          sfh4_rele(&otw_sfh);
1374 1374  
1375 1375          /*
1376 1376           * It seems odd to get a full set of attrs and then not update
1377 1377           * the object's attrcache in the non-create case.  Create case uses
1378 1378           * the attrs since makenfs4node checks to see if the attrs need to
1379 1379           * be updated (and then updates them).  The non-create case should
1380 1380           * update attrs also.
1381 1381           */
1382 1382          if (! create_flag && ! fh_differs && !e.error) {
1383 1383                  nfs4_attr_cache(vp, garp, t, cr, TRUE, NULL);
1384 1384          }
1385 1385  
1386 1386          nfs4_error_zinit(&e);
1387 1387          if (op_res->rflags & OPEN4_RESULT_CONFIRM) {
1388 1388                  /* This does not do recovery for vp explicitly. */
1389 1389                  nfs4open_confirm(vp, &seqid, &op_res->stateid, cred_otw, FALSE,
1390 1390                      &retry_open, oop, FALSE, &e, &num_bseqid_retry);
1391 1391  
1392 1392                  if (e.error || e.stat) {
1393 1393                          nfs4_end_open_seqid_sync(oop);
1394 1394                          nfs4args_copen_free(open_args);
1395 1395                          if (setgid_flag) {
1396 1396                                  nfs4args_verify_free(&argop[8]);
1397 1397                                  nfs4args_setattr_free(&argop[9]);
1398 1398                          }
1399 1399                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
1400 1400                          nfs4_end_op(VTOMI4(dvp), dvp, vpi, &recov_state,
1401 1401                              needrecov);
1402 1402                          open_owner_rele(oop);
1403 1403                          if (create_flag || fh_differs) {
1404 1404                                  /* rele the makenfs4node */
1405 1405                                  VN_RELE(vp);
1406 1406                          }
1407 1407                          if (ncr != NULL) {
1408 1408                                  crfree(ncr);
1409 1409                                  ncr = NULL;
1410 1410                          }
1411 1411                          if (retry_open == TRUE) {
1412 1412                                  NFS4_DEBUG(nfs4_client_recov_debug, (CE_NOTE,
1413 1413                                      "nfs4open_otw: retry the open since OPEN "
1414 1414                                      "CONFIRM failed with error %d stat %d",
1415 1415                                      e.error, e.stat));
1416 1416                                  if (create_flag && createmode == GUARDED4) {
1417 1417                                          NFS4_DEBUG(nfs4_client_recov_debug,
1418 1418                                              (CE_NOTE, "nfs4open_otw: switch "
1419 1419                                              "createmode from GUARDED4 to "
1420 1420                                              "UNCHECKED4"));
1421 1421                                          createmode = UNCHECKED4;
1422 1422                                  }
1423 1423                                  goto recov_retry;
1424 1424                          }
1425 1425                          if (!e.error) {
1426 1426                                  if (create_flag && (createmode != EXCLUSIVE4) &&
1427 1427                                      e.stat == NFS4ERR_BADOWNER)
1428 1428                                          nfs4_log_badowner(VTOMI4(dvp), OP_OPEN);
1429 1429  
1430 1430                                  e.error = geterrno4(e.stat);
1431 1431                          }
1432 1432                          kmem_free(argop, argoplist_size);
1433 1433                          return (e.error);
1434 1434                  }
1435 1435          }
1436 1436  
1437 1437          rp = VTOR4(vp);
1438 1438  
1439 1439          mutex_enter(&rp->r_statev4_lock);
1440 1440          if (create_flag)
1441 1441                  rp->created_v4 = 1;
1442 1442          mutex_exit(&rp->r_statev4_lock);
1443 1443  
1444 1444          mutex_enter(&oop->oo_lock);
1445 1445          /* Doesn't matter if 'oo_just_created' already was set as this */
1446 1446          oop->oo_just_created = NFS4_PERM_CREATED;
1447 1447          if (oop->oo_cred_otw)
1448 1448                  crfree(oop->oo_cred_otw);
1449 1449          oop->oo_cred_otw = cred_otw;
1450 1450          crhold(oop->oo_cred_otw);
1451 1451          mutex_exit(&oop->oo_lock);
1452 1452  
1453 1453          /* returns with 'os_sync_lock' held */
1454 1454          osp = find_or_create_open_stream(oop, rp, &created_osp);
1455 1455          if (!osp) {
1456 1456                  NFS4_DEBUG(nfs4_client_state_debug, (CE_NOTE,
1457 1457                      "nfs4open_otw: failed to create an open stream"));
1458 1458                  NFS4_DEBUG(nfs4_seqid_sync, (CE_NOTE, "nfs4open_otw: "
1459 1459                      "signal our end of use of the open seqid"));
1460 1460  
1461 1461                  nfs4_end_open_seqid_sync(oop);
1462 1462                  open_owner_rele(oop);
1463 1463                  nfs4args_copen_free(open_args);
1464 1464                  if (setgid_flag) {
1465 1465                          nfs4args_verify_free(&argop[8]);
1466 1466                          nfs4args_setattr_free(&argop[9]);
1467 1467                  }
1468 1468                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
1469 1469                  nfs4_end_op(VTOMI4(dvp), dvp, vpi, &recov_state, needrecov);
1470 1470                  if (create_flag || fh_differs)
1471 1471                          VN_RELE(vp);
1472 1472                  if (ncr != NULL)
1473 1473                          crfree(ncr);
1474 1474  
1475 1475                  kmem_free(argop, argoplist_size);
1476 1476                  return (EINVAL);
1477 1477  
1478 1478          }
1479 1479  
1480 1480          osp->open_stateid = op_res->stateid;
1481 1481  
1482 1482          if (open_flag & FREAD)
1483 1483                  osp->os_share_acc_read++;
1484 1484          if (open_flag & FWRITE)
1485 1485                  osp->os_share_acc_write++;
1486 1486          osp->os_share_deny_none++;
1487 1487  
1488 1488          /*
1489 1489           * Need to reset this bitfield for the possible case where we were
1490 1490           * going to OTW CLOSE the file, got a non-recoverable error, and before
1491 1491           * we could retry the CLOSE, OPENed the file again.
1492 1492           */
1493 1493          ASSERT(osp->os_open_owner->oo_seqid_inuse);
1494 1494          osp->os_final_close = 0;
1495 1495          osp->os_force_close = 0;
1496 1496  #ifdef DEBUG
1497 1497          if (osp->os_failed_reopen)
1498 1498                  NFS4_DEBUG(nfs4_open_stream_debug, (CE_NOTE, "nfs4open_otw:"
1499 1499                      " clearing os_failed_reopen for osp %p, cr %p, rp %s",
1500 1500                      (void *)osp, (void *)cr, rnode4info(rp)));
1501 1501  #endif
1502 1502          osp->os_failed_reopen = 0;
1503 1503  
1504 1504          mutex_exit(&osp->os_sync_lock);
1505 1505  
1506 1506          nfs4_end_open_seqid_sync(oop);
1507 1507  
1508 1508          if (created_osp && recov_state.rs_sp != NULL) {
1509 1509                  mutex_enter(&recov_state.rs_sp->s_lock);
1510 1510                  nfs4_inc_state_ref_count_nolock(recov_state.rs_sp, VTOMI4(dvp));
1511 1511                  mutex_exit(&recov_state.rs_sp->s_lock);
1512 1512          }
1513 1513  
1514 1514          /* get rid of our reference to find oop */
1515 1515          open_owner_rele(oop);
1516 1516  
1517 1517          open_stream_rele(osp, rp);
1518 1518  
1519 1519          /* accept delegation, if any */
1520 1520          nfs4_delegation_accept(rp, CLAIM_NULL, op_res, garp, cred_otw);
1521 1521  
1522 1522          nfs4_end_op(VTOMI4(dvp), dvp, vpi, &recov_state, needrecov);
1523 1523  
1524 1524          if (createmode == EXCLUSIVE4 &&
1525 1525              (in_va->va_mask & ~(AT_GID | AT_SIZE))) {
1526 1526                  NFS4_DEBUG(nfs4_client_state_debug, (CE_NOTE, "nfs4open_otw:"
1527 1527                      " EXCLUSIVE4: sending a SETATTR"));
1528 1528                  /*
1529 1529                   * If doing an exclusive create, then generate
1530 1530                   * a SETATTR to set the initial attributes.
1531 1531                   * Try to set the mtime and the atime to the
1532 1532                   * server's current time.  It is somewhat
1533 1533                   * expected that these fields will be used to
1534 1534                   * store the exclusive create cookie.  If not,
1535 1535                   * server implementors will need to know that
1536 1536                   * a SETATTR will follow an exclusive create
1537 1537                   * and the cookie should be destroyed if
1538 1538                   * appropriate.
1539 1539                   *
1540 1540                   * The AT_GID and AT_SIZE bits are turned off
1541 1541                   * so that the SETATTR request will not attempt
1542 1542                   * to process these.  The gid will be set
1543 1543                   * separately if appropriate.  The size is turned
1544 1544                   * off because it is assumed that a new file will
1545 1545                   * be created empty and if the file wasn't empty,
1546 1546                   * then the exclusive create will have failed
1547 1547                   * because the file must have existed already.
1548 1548                   * Therefore, no truncate operation is needed.
1549 1549                   */
1550 1550                  in_va->va_mask &= ~(AT_GID | AT_SIZE);
1551 1551                  in_va->va_mask |= (AT_MTIME | AT_ATIME);
1552 1552  
1553 1553                  e.error = nfs4setattr(vp, in_va, 0, cr, NULL);
1554 1554                  if (e.error) {
1555 1555                          /*
1556 1556                           * Couldn't correct the attributes of
1557 1557                           * the newly created file and the
1558 1558                           * attributes are wrong.  Remove the
1559 1559                           * file and return an error to the
1560 1560                           * application.
1561 1561                           */
1562 1562                          /* XXX will this take care of client state ? */
1563 1563                          NFS4_DEBUG(nfs4_client_state_debug, (CE_NOTE,
1564 1564                              "nfs4open_otw: EXCLUSIVE4: error %d on SETATTR:"
1565 1565                              " remove file", e.error));
1566 1566                          VN_RELE(vp);
1567 1567                          (void) nfs4_remove(dvp, file_name, cr, NULL, 0);
1568 1568                          /*
1569 1569                           * Since we've reled the vnode and removed
1570 1570                           * the file we now need to return the error.
1571 1571                           * At this point we don't want to update the
1572 1572                           * dircaches, call nfs4_waitfor_purge_complete
1573 1573                           * or set vpp to vp so we need to skip these
1574 1574                           * as well.
1575 1575                           */
1576 1576                          goto skip_update_dircaches;
1577 1577                  }
1578 1578          }
1579 1579  
1580 1580          /*
1581 1581           * If we created or found the correct vnode, due to create_flag or
1582 1582           * fh_differs being set, then update directory cache attribute, readdir
1583 1583           * and dnlc caches.
1584 1584           */
1585 1585          if (create_flag || fh_differs) {
1586 1586                  dirattr_info_t dinfo, *dinfop;
1587 1587  
1588 1588                  /*
1589 1589                   * Make sure getattr succeeded before using results.
1590 1590                   * note: op 7 is getattr(dir) for both flavors of
1591 1591                   * open(create).
1592 1592                   */
1593 1593                  if (create_flag && res.status == NFS4_OK) {
1594 1594                          dinfo.di_time_call = t;
1595 1595                          dinfo.di_cred = cr;
1596 1596                          dinfo.di_garp =
1597 1597                              &res.array[6].nfs_resop4_u.opgetattr.ga_res;
1598 1598                          dinfop = &dinfo;
1599 1599                  } else {
1600 1600                          dinfop = NULL;
1601 1601                  }
1602 1602  
1603 1603                  nfs4_update_dircaches(&op_res->cinfo, dvp, vp, file_name,
1604 1604                      dinfop);
1605 1605          }
1606 1606  
1607 1607          /*
1608 1608           * If the page cache for this file was flushed from actions
1609 1609           * above, it was done asynchronously and if that is true,
1610 1610           * there is a need to wait here for it to complete.  This must
1611 1611           * be done outside of start_fop/end_fop.
1612 1612           */
1613 1613          (void) nfs4_waitfor_purge_complete(vp);
1614 1614  
1615 1615          /*
1616 1616           * It is implicit that we are in the open case (create_flag == 0) since
1617 1617           * fh_differs can only be set to a non-zero value in the open case.
1618 1618           */
1619 1619          if (fh_differs != 0 && vpi != NULL)
1620 1620                  VN_RELE(vpi);
1621 1621  
1622 1622          /*
1623 1623           * Be sure to set *vpp to the correct value before returning.
1624 1624           */
1625 1625          *vpp = vp;
1626 1626  
1627 1627  skip_update_dircaches:
1628 1628  
1629 1629          nfs4args_copen_free(open_args);
1630 1630          if (setgid_flag) {
1631 1631                  nfs4args_verify_free(&argop[8]);
1632 1632                  nfs4args_setattr_free(&argop[9]);
1633 1633          }
1634 1634          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
1635 1635  
1636 1636          if (ncr)
1637 1637                  crfree(ncr);
1638 1638          kmem_free(argop, argoplist_size);
1639 1639          return (e.error);
1640 1640  }
1641 1641  
1642 1642  /*
1643 1643   * Reopen an open instance.  cf. nfs4open_otw().
1644 1644   *
1645 1645   * Errors are returned by the nfs4_error_t parameter.
1646 1646   * - ep->error contains an errno value or zero.
1647 1647   * - if it is zero, ep->stat is set to an NFS status code, if any.
1648 1648   *   If the file could not be reopened, but the caller should continue, the
1649 1649   *   file is marked dead and no error values are returned.  If the caller
1650 1650   *   should stop recovering open files and start over, either the ep->error
1651 1651   *   value or ep->stat will indicate an error (either something that requires
1652 1652   *   recovery or EAGAIN).  Note that some recovery (e.g., expired volatile
1653 1653   *   filehandles) may be handled silently by this routine.
1654 1654   * - if it is EINTR, ETIMEDOUT, or NFS4_FRC_UNMT_ERR, recovery for lost state
1655 1655   *   will be started, so the caller should not do it.
1656 1656   *
1657 1657   * Gotos:
1658 1658   * - kill_file : reopen failed in such a fashion to constitute marking the
1659 1659   *    file dead and setting the open stream's 'os_failed_reopen' as 1.  This
1660 1660   *   is for cases where recovery is not possible.
1661 1661   * - failed_reopen : same as above, except that the file has already been
1662 1662   *   marked dead, so no need to do it again.
1663 1663   * - bailout : reopen failed but we are able to recover and retry the reopen -
1664 1664   *   either within this function immediately or via the calling function.
1665 1665   */
1666 1666  
1667 1667  void
1668 1668  nfs4_reopen(vnode_t *vp, nfs4_open_stream_t *osp, nfs4_error_t *ep,
1669 1669      open_claim_type4 claim, bool_t frc_use_claim_previous,
1670 1670      bool_t is_recov)
1671 1671  {
1672 1672          COMPOUND4args_clnt args;
1673 1673          COMPOUND4res_clnt res;
1674 1674          nfs_argop4 argop[4];
1675 1675          nfs_resop4 *resop;
1676 1676          OPEN4res *op_res = NULL;
1677 1677          OPEN4cargs *open_args;
1678 1678          GETFH4res *gf_res;
1679 1679          rnode4_t *rp = VTOR4(vp);
1680 1680          int doqueue = 1;
1681 1681          cred_t *cr = NULL, *cred_otw = NULL;
1682 1682          nfs4_open_owner_t *oop = NULL;
1683 1683          seqid4 seqid;
1684 1684          nfs4_ga_res_t *garp;
1685 1685          char fn[MAXNAMELEN];
1686 1686          nfs4_recov_state_t recov = {NULL, 0};
1687 1687          nfs4_lost_rqst_t lost_rqst;
1688 1688          mntinfo4_t *mi = VTOMI4(vp);
1689 1689          bool_t abort;
1690 1690          char *failed_msg = "";
1691 1691          int fh_different;
1692 1692          hrtime_t t;
1693 1693          nfs4_bseqid_entry_t *bsep = NULL;
1694 1694  
1695 1695          ASSERT(nfs4_consistent_type(vp));
1696 1696          ASSERT(nfs_zone() == mi->mi_zone);
1697 1697  
1698 1698          nfs4_error_zinit(ep);
1699 1699  
1700 1700          /* this is the cred used to find the open owner */
1701 1701          cr = state_to_cred(osp);
1702 1702          if (cr == NULL) {
1703 1703                  failed_msg = "Couldn't reopen: no cred";
1704 1704                  goto kill_file;
1705 1705          }
1706 1706          /* use this cred for OTW operations */
1707 1707          cred_otw = nfs4_get_otw_cred(cr, mi, osp->os_open_owner);
1708 1708  
1709 1709  top:
1710 1710          nfs4_error_zinit(ep);
1711 1711  
1712 1712          if (mi->mi_vfsp->vfs_flag & VFS_UNMOUNTED) {
1713 1713                  /* File system has been unmounted, quit */
1714 1714                  ep->error = EIO;
1715 1715                  failed_msg = "Couldn't reopen: file system has been unmounted";
1716 1716                  goto kill_file;
1717 1717          }
1718 1718  
1719 1719          oop = osp->os_open_owner;
1720 1720  
1721 1721          ASSERT(oop != NULL);
1722 1722          if (oop == NULL) {      /* be defensive in non-DEBUG */
1723 1723                  failed_msg = "can't reopen: no open owner";
1724 1724                  goto kill_file;
1725 1725          }
1726 1726          open_owner_hold(oop);
1727 1727  
1728 1728          ep->error = nfs4_start_open_seqid_sync(oop, mi);
1729 1729          if (ep->error) {
1730 1730                  open_owner_rele(oop);
1731 1731                  oop = NULL;
1732 1732                  goto bailout;
1733 1733          }
1734 1734  
1735 1735          /*
1736 1736           * If the rnode has a delegation and the delegation has been
1737 1737           * recovered and the server didn't request a recall and the caller
1738 1738           * didn't specifically ask for CLAIM_PREVIOUS (nfs4frlock during
1739 1739           * recovery) and the rnode hasn't been marked dead, then install
1740 1740           * the delegation stateid in the open stream.  Otherwise, proceed
1741 1741           * with a CLAIM_PREVIOUS or CLAIM_NULL OPEN.
1742 1742           */
1743 1743          mutex_enter(&rp->r_statev4_lock);
1744 1744          if (rp->r_deleg_type != OPEN_DELEGATE_NONE &&
1745 1745              !rp->r_deleg_return_pending &&
1746 1746              (rp->r_deleg_needs_recovery == OPEN_DELEGATE_NONE) &&
1747 1747              !rp->r_deleg_needs_recall &&
1748 1748              claim != CLAIM_DELEGATE_CUR && !frc_use_claim_previous &&
1749 1749              !(rp->r_flags & R4RECOVERR)) {
1750 1750                  mutex_enter(&osp->os_sync_lock);
1751 1751                  osp->os_delegation = 1;
1752 1752                  osp->open_stateid = rp->r_deleg_stateid;
1753 1753                  mutex_exit(&osp->os_sync_lock);
1754 1754                  mutex_exit(&rp->r_statev4_lock);
1755 1755                  goto bailout;
1756 1756          }
1757 1757          mutex_exit(&rp->r_statev4_lock);
1758 1758  
1759 1759          /*
1760 1760           * If the file failed recovery, just quit.  This failure need not
1761 1761           * affect other reopens, so don't return an error.
1762 1762           */
1763 1763          mutex_enter(&rp->r_statelock);
1764 1764          if (rp->r_flags & R4RECOVERR) {
1765 1765                  mutex_exit(&rp->r_statelock);
1766 1766                  ep->error = 0;
1767 1767                  goto failed_reopen;
1768 1768          }
1769 1769          mutex_exit(&rp->r_statelock);
1770 1770  
1771 1771          /*
1772 1772           * argop is empty here
1773 1773           *
1774 1774           * PUTFH, OPEN, GETATTR
1775 1775           */
1776 1776          args.ctag = TAG_REOPEN;
1777 1777          args.array_len = 4;
1778 1778          args.array = argop;
1779 1779  
1780 1780          NFS4_DEBUG(nfs4_client_failover_debug, (CE_NOTE,
1781 1781              "nfs4_reopen: file is type %d, id %s",
1782 1782              vp->v_type, rnode4info(VTOR4(vp))));
1783 1783  
1784 1784          argop[0].argop = OP_CPUTFH;
1785 1785  
1786 1786          if (claim != CLAIM_PREVIOUS) {
1787 1787                  /*
1788 1788                   * if this is a file mount then
1789 1789                   * use the mntinfo parentfh
1790 1790                   */
1791 1791                  argop[0].nfs_argop4_u.opcputfh.sfh =
1792 1792                      (vp->v_flag & VROOT) ? mi->mi_srvparentfh :
1793 1793                      VTOSV(vp)->sv_dfh;
1794 1794          } else {
1795 1795                  /* putfh fh to reopen */
1796 1796                  argop[0].nfs_argop4_u.opcputfh.sfh = rp->r_fh;
1797 1797          }
1798 1798  
1799 1799          argop[1].argop = OP_COPEN;
1800 1800          open_args = &argop[1].nfs_argop4_u.opcopen;
1801 1801          open_args->claim = claim;
1802 1802  
1803 1803          if (claim == CLAIM_NULL) {
1804 1804  
1805 1805                  if ((ep->error = vtoname(vp, fn, MAXNAMELEN)) != 0) {
1806 1806                          nfs_cmn_err(ep->error, CE_WARN, "nfs4_reopen: vtoname "
1807 1807                              "failed for vp 0x%p for CLAIM_NULL with %m",
1808 1808                              (void *)vp);
1809 1809                          failed_msg = "Couldn't reopen: vtoname failed for "
1810 1810                              "CLAIM_NULL";
1811 1811                          /* nothing allocated yet */
1812 1812                          goto kill_file;
1813 1813                  }
1814 1814  
1815 1815                  open_args->open_claim4_u.cfile = fn;
1816 1816          } else if (claim == CLAIM_PREVIOUS) {
1817 1817  
1818 1818                  /*
1819 1819                   * We have two cases to deal with here:
1820 1820                   * 1) We're being called to reopen files in order to satisfy
1821 1821                   *    a lock operation request which requires us to explicitly
1822 1822                   *    reopen files which were opened under a delegation.  If
1823 1823                   *    we're in recovery, we *must* use CLAIM_PREVIOUS.  In
1824 1824                   *    that case, frc_use_claim_previous is TRUE and we must
1825 1825                   *    use the rnode's current delegation type (r_deleg_type).
1826 1826                   * 2) We're reopening files during some form of recovery.
1827 1827                   *    In this case, frc_use_claim_previous is FALSE and we
1828 1828                   *    use the delegation type appropriate for recovery
1829 1829                   *    (r_deleg_needs_recovery).
1830 1830                   */
1831 1831                  mutex_enter(&rp->r_statev4_lock);
1832 1832                  open_args->open_claim4_u.delegate_type =
1833 1833                      frc_use_claim_previous ?
1834 1834                      rp->r_deleg_type :
1835 1835                      rp->r_deleg_needs_recovery;
1836 1836                  mutex_exit(&rp->r_statev4_lock);
1837 1837  
1838 1838          } else if (claim == CLAIM_DELEGATE_CUR) {
1839 1839  
1840 1840                  if ((ep->error = vtoname(vp, fn, MAXNAMELEN)) != 0) {
1841 1841                          nfs_cmn_err(ep->error, CE_WARN, "nfs4_reopen: vtoname "
1842 1842                              "failed for vp 0x%p for CLAIM_DELEGATE_CUR "
1843 1843                              "with %m", (void *)vp);
1844 1844                          failed_msg = "Couldn't reopen: vtoname failed for "
1845 1845                              "CLAIM_DELEGATE_CUR";
1846 1846                          /* nothing allocated yet */
1847 1847                          goto kill_file;
1848 1848                  }
1849 1849  
1850 1850                  mutex_enter(&rp->r_statev4_lock);
1851 1851                  open_args->open_claim4_u.delegate_cur_info.delegate_stateid =
1852 1852                      rp->r_deleg_stateid;
1853 1853                  mutex_exit(&rp->r_statev4_lock);
1854 1854  
1855 1855                  open_args->open_claim4_u.delegate_cur_info.cfile = fn;
1856 1856          }
1857 1857          open_args->opentype = OPEN4_NOCREATE;
1858 1858          open_args->owner.clientid = mi2clientid(mi);
1859 1859          open_args->owner.owner_len = sizeof (oop->oo_name);
1860 1860          open_args->owner.owner_val =
1861 1861              kmem_alloc(open_args->owner.owner_len, KM_SLEEP);
1862 1862          bcopy(&oop->oo_name, open_args->owner.owner_val,
1863 1863              open_args->owner.owner_len);
1864 1864          open_args->share_access = 0;
1865 1865          open_args->share_deny = 0;
1866 1866  
1867 1867          mutex_enter(&osp->os_sync_lock);
1868 1868          NFS4_DEBUG(nfs4_client_recov_debug, (CE_NOTE, "nfs4_reopen: osp %p rp "
1869 1869              "%p: read acc %"PRIu64" write acc %"PRIu64": open ref count %d: "
1870 1870              "mmap read %"PRIu64" mmap write %"PRIu64" claim %d ",
1871 1871              (void *)osp, (void *)rp, osp->os_share_acc_read,
1872 1872              osp->os_share_acc_write, osp->os_open_ref_count,
1873 1873              osp->os_mmap_read, osp->os_mmap_write, claim));
1874 1874  
1875 1875          if (osp->os_share_acc_read || osp->os_mmap_read)
1876 1876                  open_args->share_access |= OPEN4_SHARE_ACCESS_READ;
1877 1877          if (osp->os_share_acc_write || osp->os_mmap_write)
1878 1878                  open_args->share_access |= OPEN4_SHARE_ACCESS_WRITE;
1879 1879          if (osp->os_share_deny_read)
1880 1880                  open_args->share_deny |= OPEN4_SHARE_DENY_READ;
1881 1881          if (osp->os_share_deny_write)
1882 1882                  open_args->share_deny |= OPEN4_SHARE_DENY_WRITE;
1883 1883          mutex_exit(&osp->os_sync_lock);
1884 1884  
1885 1885          seqid = nfs4_get_open_seqid(oop) + 1;
1886 1886          open_args->seqid = seqid;
1887 1887  
1888 1888          /* Construct the getfh part of the compound */
1889 1889          argop[2].argop = OP_GETFH;
1890 1890  
1891 1891          /* Construct the getattr part of the compound */
1892 1892          argop[3].argop = OP_GETATTR;
1893 1893          argop[3].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
1894 1894          argop[3].nfs_argop4_u.opgetattr.mi = mi;
1895 1895  
1896 1896          t = gethrtime();
1897 1897  
1898 1898          rfs4call(mi, &args, &res, cred_otw, &doqueue, 0, ep);
1899 1899  
1900 1900          if (ep->error) {
1901 1901                  if (!is_recov && !frc_use_claim_previous &&
1902 1902                      (ep->error == EINTR || ep->error == ETIMEDOUT ||
1903 1903                      NFS4_FRC_UNMT_ERR(ep->error, vp->v_vfsp))) {
1904 1904                          nfs4open_save_lost_rqst(ep->error, &lost_rqst, oop,
1905 1905                              cred_otw, vp, NULL, open_args);
1906 1906                          abort = nfs4_start_recovery(ep,
1907 1907                              VTOMI4(vp), vp, NULL, NULL,
1908 1908                              lost_rqst.lr_op == OP_OPEN ?
1909 1909                              &lost_rqst : NULL, OP_OPEN, NULL, NULL, NULL);
1910 1910                          nfs4args_copen_free(open_args);
1911 1911                          goto bailout;
1912 1912                  }
1913 1913  
1914 1914                  nfs4args_copen_free(open_args);
1915 1915  
1916 1916                  if (ep->error == EACCES && cred_otw != cr) {
1917 1917                          crfree(cred_otw);
1918 1918                          cred_otw = cr;
1919 1919                          crhold(cred_otw);
1920 1920                          nfs4_end_open_seqid_sync(oop);
1921 1921                          open_owner_rele(oop);
1922 1922                          oop = NULL;
1923 1923                          goto top;
1924 1924                  }
1925 1925                  if (ep->error == ETIMEDOUT)
1926 1926                          goto bailout;
1927 1927                  failed_msg = "Couldn't reopen: rpc error";
1928 1928                  goto kill_file;
1929 1929          }
1930 1930  
1931 1931          if (nfs4_need_to_bump_seqid(&res))
1932 1932                  nfs4_set_open_seqid(seqid, oop, args.ctag);
1933 1933  
1934 1934          switch (res.status) {
1935 1935          case NFS4_OK:
1936 1936                  if (recov.rs_flags & NFS4_RS_DELAY_MSG) {
1937 1937                          mutex_enter(&rp->r_statelock);
1938 1938                          rp->r_delay_interval = 0;
1939 1939                          mutex_exit(&rp->r_statelock);
1940 1940                  }
1941 1941                  break;
1942 1942          case NFS4ERR_BAD_SEQID:
1943 1943                  bsep = nfs4_create_bseqid_entry(oop, NULL, vp, 0,
1944 1944                      args.ctag, open_args->seqid);
1945 1945  
1946 1946                  abort = nfs4_start_recovery(ep, VTOMI4(vp), vp, NULL,
1947 1947                      NULL, lost_rqst.lr_op == OP_OPEN ? &lost_rqst :
1948 1948                      NULL, OP_OPEN, bsep, NULL, NULL);
1949 1949  
1950 1950                  nfs4args_copen_free(open_args);
1951 1951                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
1952 1952                  nfs4_end_open_seqid_sync(oop);
1953 1953                  open_owner_rele(oop);
1954 1954                  oop = NULL;
1955 1955                  kmem_free(bsep, sizeof (*bsep));
1956 1956  
1957 1957                  goto kill_file;
1958 1958          case NFS4ERR_NO_GRACE:
1959 1959                  nfs4args_copen_free(open_args);
1960 1960                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
1961 1961                  nfs4_end_open_seqid_sync(oop);
1962 1962                  open_owner_rele(oop);
1963 1963                  oop = NULL;
1964 1964                  if (claim == CLAIM_PREVIOUS) {
1965 1965                          /*
1966 1966                           * Retry as a plain open. We don't need to worry about
1967 1967                           * checking the changeinfo: it is acceptable for a
1968 1968                           * client to re-open a file and continue processing
1969 1969                           * (in the absence of locks).
1970 1970                           */
1971 1971                          NFS4_DEBUG(nfs4_client_recov_debug, (CE_NOTE,
1972 1972                              "nfs4_reopen: CLAIM_PREVIOUS: NFS4ERR_NO_GRACE; "
1973 1973                              "will retry as CLAIM_NULL"));
1974 1974                          claim = CLAIM_NULL;
1975 1975                          nfs4_mi_kstat_inc_no_grace(mi);
1976 1976                          goto top;
1977 1977                  }
1978 1978                  failed_msg =
1979 1979                      "Couldn't reopen: tried reclaim outside grace period. ";
1980 1980                  goto kill_file;
1981 1981          case NFS4ERR_GRACE:
1982 1982                  nfs4_set_grace_wait(mi);
1983 1983                  nfs4args_copen_free(open_args);
1984 1984                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
1985 1985                  nfs4_end_open_seqid_sync(oop);
1986 1986                  open_owner_rele(oop);
1987 1987                  oop = NULL;
1988 1988                  ep->error = nfs4_wait_for_grace(mi, &recov);
1989 1989                  if (ep->error != 0)
1990 1990                          goto bailout;
1991 1991                  goto top;
1992 1992          case NFS4ERR_DELAY:
1993 1993                  nfs4_set_delay_wait(vp);
1994 1994                  nfs4args_copen_free(open_args);
1995 1995                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
1996 1996                  nfs4_end_open_seqid_sync(oop);
1997 1997                  open_owner_rele(oop);
1998 1998                  oop = NULL;
1999 1999                  ep->error = nfs4_wait_for_delay(vp, &recov);
2000 2000                  nfs4_mi_kstat_inc_delay(mi);
2001 2001                  if (ep->error != 0)
2002 2002                          goto bailout;
2003 2003                  goto top;
2004 2004          case NFS4ERR_FHEXPIRED:
2005 2005                  /* recover filehandle and retry */
2006 2006                  abort = nfs4_start_recovery(ep,
2007 2007                      mi, vp, NULL, NULL, NULL, OP_OPEN, NULL, NULL, NULL);
2008 2008                  nfs4args_copen_free(open_args);
2009 2009                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
2010 2010                  nfs4_end_open_seqid_sync(oop);
2011 2011                  open_owner_rele(oop);
2012 2012                  oop = NULL;
2013 2013                  if (abort == FALSE)
2014 2014                          goto top;
2015 2015                  failed_msg = "Couldn't reopen: recovery aborted";
2016 2016                  goto kill_file;
2017 2017          case NFS4ERR_RESOURCE:
2018 2018          case NFS4ERR_STALE_CLIENTID:
2019 2019          case NFS4ERR_WRONGSEC:
2020 2020          case NFS4ERR_EXPIRED:
2021 2021                  /*
2022 2022                   * Do not mark the file dead and let the calling
2023 2023                   * function initiate recovery.
2024 2024                   */
2025 2025                  nfs4args_copen_free(open_args);
2026 2026                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
2027 2027                  nfs4_end_open_seqid_sync(oop);
2028 2028                  open_owner_rele(oop);
2029 2029                  oop = NULL;
2030 2030                  goto bailout;
2031 2031          case NFS4ERR_ACCESS:
2032 2032                  if (cred_otw != cr) {
2033 2033                          crfree(cred_otw);
2034 2034                          cred_otw = cr;
2035 2035                          crhold(cred_otw);
2036 2036                          nfs4args_copen_free(open_args);
2037 2037                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
2038 2038                          nfs4_end_open_seqid_sync(oop);
2039 2039                          open_owner_rele(oop);
2040 2040                          oop = NULL;
2041 2041                          goto top;
2042 2042                  }
2043 2043                  /* fall through */
2044 2044          default:
2045 2045                  NFS4_DEBUG(nfs4_client_failover_debug, (CE_NOTE,
2046 2046                      "nfs4_reopen: r_server 0x%p, mi_curr_serv 0x%p, rnode %s",
2047 2047                      (void*)VTOR4(vp)->r_server, (void*)mi->mi_curr_serv,
2048 2048                      rnode4info(VTOR4(vp))));
2049 2049                  failed_msg = "Couldn't reopen: NFSv4 error";
2050 2050                  nfs4args_copen_free(open_args);
2051 2051                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
2052 2052                  goto kill_file;
2053 2053          }
2054 2054  
2055 2055          resop = &res.array[1];  /* open res */
2056 2056          op_res = &resop->nfs_resop4_u.opopen;
2057 2057  
2058 2058          garp = &res.array[3].nfs_resop4_u.opgetattr.ga_res;
2059 2059  
2060 2060          /*
2061 2061           * Check if the path we reopened really is the same
2062 2062           * file. We could end up in a situation where the file
2063 2063           * was removed and a new file created with the same name.
2064 2064           */
2065 2065          resop = &res.array[2];
2066 2066          gf_res = &resop->nfs_resop4_u.opgetfh;
2067 2067          (void) nfs_rw_enter_sig(&mi->mi_fh_lock, RW_READER, 0);
2068 2068          fh_different = (nfs4cmpfh(&rp->r_fh->sfh_fh, &gf_res->object) != 0);
2069 2069          if (fh_different) {
2070 2070                  if (mi->mi_fh_expire_type == FH4_PERSISTENT ||
2071 2071                      mi->mi_fh_expire_type & FH4_NOEXPIRE_WITH_OPEN) {
2072 2072                          /* Oops, we don't have the same file */
2073 2073                          if (mi->mi_fh_expire_type == FH4_PERSISTENT)
2074 2074                                  failed_msg = "Couldn't reopen: Persistent "
2075 2075                                      "file handle changed";
2076 2076                          else
2077 2077                                  failed_msg = "Couldn't reopen: Volatile "
2078 2078                                      "(no expire on open) file handle changed";
2079 2079  
2080 2080                          nfs4args_copen_free(open_args);
2081 2081                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
2082 2082                          nfs_rw_exit(&mi->mi_fh_lock);
2083 2083                          goto kill_file;
2084 2084  
2085 2085                  } else {
2086 2086                          /*
2087 2087                           * We have volatile file handles that don't compare.
2088 2088                           * If the fids are the same then we assume that the
2089 2089                           * file handle expired but the rnode still refers to
2090 2090                           * the same file object.
2091 2091                           *
2092 2092                           * First check that we have fids or not.
2093 2093                           * If we don't we have a dumb server so we will
2094 2094                           * just assume every thing is ok for now.
2095 2095                           */
2096 2096                          if (!ep->error && garp->n4g_va.va_mask & AT_NODEID &&
2097 2097                              rp->r_attr.va_mask & AT_NODEID &&
2098 2098                              rp->r_attr.va_nodeid != garp->n4g_va.va_nodeid) {
2099 2099                                  /*
2100 2100                                   * We have fids, but they don't
2101 2101                                   * compare. So kill the file.
2102 2102                                   */
2103 2103                                  failed_msg =
2104 2104                                      "Couldn't reopen: file handle changed"
2105 2105                                      " due to mismatched fids";
2106 2106                                  nfs4args_copen_free(open_args);
2107 2107                                  (void) xdr_free(xdr_COMPOUND4res_clnt,
2108 2108                                      (caddr_t)&res);
2109 2109                                  nfs_rw_exit(&mi->mi_fh_lock);
2110 2110                                  goto kill_file;
2111 2111                          } else {
2112 2112                                  /*
2113 2113                                   * We have volatile file handles that refers
2114 2114                                   * to the same file (at least they have the
2115 2115                                   * same fid) or we don't have fids so we
2116 2116                                   * can't tell. :(. We'll be a kind and accepting
2117 2117                                   * client so we'll update the rnode's file
2118 2118                                   * handle with the otw handle.
2119 2119                                   *
2120 2120                                   * We need to drop mi->mi_fh_lock since
2121 2121                                   * sh4_update acquires it. Since there is
2122 2122                                   * only one recovery thread there is no
2123 2123                                   * race.
2124 2124                                   */
2125 2125                                  nfs_rw_exit(&mi->mi_fh_lock);
2126 2126                                  sfh4_update(rp->r_fh, &gf_res->object);
2127 2127                          }
2128 2128                  }
2129 2129          } else {
2130 2130                  nfs_rw_exit(&mi->mi_fh_lock);
2131 2131          }
2132 2132  
2133 2133          ASSERT(nfs4_consistent_type(vp));
2134 2134  
2135 2135          /*
2136 2136           * If the server wanted an OPEN_CONFIRM but that fails, just start
2137 2137           * over.  Presumably if there is a persistent error it will show up
2138 2138           * when we resend the OPEN.
2139 2139           */
2140 2140          if (op_res->rflags & OPEN4_RESULT_CONFIRM) {
2141 2141                  bool_t retry_open = FALSE;
2142 2142  
2143 2143                  nfs4open_confirm(vp, &seqid, &op_res->stateid,
2144 2144                      cred_otw, is_recov, &retry_open,
2145 2145                      oop, FALSE, ep, NULL);
2146 2146                  if (ep->error || ep->stat) {
2147 2147                          nfs4args_copen_free(open_args);
2148 2148                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
2149 2149                          nfs4_end_open_seqid_sync(oop);
2150 2150                          open_owner_rele(oop);
2151 2151                          oop = NULL;
2152 2152                          goto top;
2153 2153                  }
2154 2154          }
2155 2155  
2156 2156          mutex_enter(&osp->os_sync_lock);
2157 2157          osp->open_stateid = op_res->stateid;
2158 2158          osp->os_delegation = 0;
2159 2159          /*
2160 2160           * Need to reset this bitfield for the possible case where we were
2161 2161           * going to OTW CLOSE the file, got a non-recoverable error, and before
2162 2162           * we could retry the CLOSE, OPENed the file again.
2163 2163           */
2164 2164          ASSERT(osp->os_open_owner->oo_seqid_inuse);
2165 2165          osp->os_final_close = 0;
2166 2166          osp->os_force_close = 0;
2167 2167          if (claim == CLAIM_DELEGATE_CUR || claim == CLAIM_PREVIOUS)
2168 2168                  osp->os_dc_openacc = open_args->share_access;
2169 2169          mutex_exit(&osp->os_sync_lock);
2170 2170  
2171 2171          nfs4_end_open_seqid_sync(oop);
2172 2172  
2173 2173          /* accept delegation, if any */
2174 2174          nfs4_delegation_accept(rp, claim, op_res, garp, cred_otw);
2175 2175  
2176 2176          nfs4args_copen_free(open_args);
2177 2177  
2178 2178          nfs4_attr_cache(vp, garp, t, cr, TRUE, NULL);
2179 2179  
2180 2180          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
2181 2181  
2182 2182          ASSERT(nfs4_consistent_type(vp));
2183 2183  
2184 2184          open_owner_rele(oop);
2185 2185          crfree(cr);
2186 2186          crfree(cred_otw);
2187 2187          return;
2188 2188  
2189 2189  kill_file:
2190 2190          nfs4_fail_recov(vp, failed_msg, ep->error, ep->stat);
2191 2191  failed_reopen:
2192 2192          NFS4_DEBUG(nfs4_open_stream_debug, (CE_NOTE,
2193 2193              "nfs4_reopen: setting os_failed_reopen for osp %p, cr %p, rp %s",
2194 2194              (void *)osp, (void *)cr, rnode4info(rp)));
2195 2195          mutex_enter(&osp->os_sync_lock);
2196 2196          osp->os_failed_reopen = 1;
2197 2197          mutex_exit(&osp->os_sync_lock);
2198 2198  bailout:
2199 2199          if (oop != NULL) {
2200 2200                  nfs4_end_open_seqid_sync(oop);
2201 2201                  open_owner_rele(oop);
2202 2202          }
2203 2203          if (cr != NULL)
2204 2204                  crfree(cr);
2205 2205          if (cred_otw != NULL)
2206 2206                  crfree(cred_otw);
2207 2207  }
2208 2208  
2209 2209  /* for . and .. OPENs */
2210 2210  /* ARGSUSED */
2211 2211  static int
2212 2212  nfs4_open_non_reg_file(vnode_t **vpp, int flag, cred_t *cr)
2213 2213  {
2214 2214          rnode4_t *rp;
2215 2215          nfs4_ga_res_t gar;
2216 2216  
2217 2217          ASSERT(nfs_zone() == VTOMI4(*vpp)->mi_zone);
2218 2218  
2219 2219          /*
2220 2220           * If close-to-open consistency checking is turned off or
2221 2221           * if there is no cached data, we can avoid
2222 2222           * the over the wire getattr.  Otherwise, force a
2223 2223           * call to the server to get fresh attributes and to
2224 2224           * check caches. This is required for close-to-open
2225 2225           * consistency.
2226 2226           */
2227 2227          rp = VTOR4(*vpp);
2228 2228          if (VTOMI4(*vpp)->mi_flags & MI4_NOCTO ||
2229 2229              (rp->r_dir == NULL && !nfs4_has_pages(*vpp)))
2230 2230                  return (0);
2231 2231  
2232 2232          gar.n4g_va.va_mask = AT_ALL;
2233 2233          return (nfs4_getattr_otw(*vpp, &gar, cr, 0));
2234 2234  }
2235 2235  
2236 2236  /*
2237 2237   * CLOSE a file
2238 2238   */
2239 2239  /* ARGSUSED */
2240 2240  static int
2241 2241  nfs4_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
2242 2242      caller_context_t *ct)
2243 2243  {
2244 2244          rnode4_t        *rp;
2245 2245          int              error = 0;
2246 2246          int              r_error = 0;
2247 2247          int              n4error = 0;
2248 2248          nfs4_error_t     e = { 0, NFS4_OK, RPC_SUCCESS };
2249 2249  
2250 2250          /*
2251 2251           * Remove client state for this (lockowner, file) pair.
2252 2252           * Issue otw v4 call to have the server do the same.
2253 2253           */
2254 2254  
2255 2255          rp = VTOR4(vp);
2256 2256  
2257 2257          /*
2258 2258           * zone_enter(2) prevents processes from changing zones with NFS files
2259 2259           * open; if we happen to get here from the wrong zone we can't do
2260 2260           * anything over the wire.
2261 2261           */
2262 2262          if (VTOMI4(vp)->mi_zone != nfs_zone()) {
2263 2263                  /*
2264 2264                   * We could attempt to clean up locks, except we're sure
2265 2265                   * that the current process didn't acquire any locks on
2266 2266                   * the file: any attempt to lock a file belong to another zone
2267 2267                   * will fail, and one can't lock an NFS file and then change
2268 2268                   * zones, as that fails too.
2269 2269                   *
2270 2270                   * Returning an error here is the sane thing to do.  A
2271 2271                   * subsequent call to VN_RELE() which translates to a
2272 2272                   * nfs4_inactive() will clean up state: if the zone of the
2273 2273                   * vnode's origin is still alive and kicking, the inactive
2274 2274                   * thread will handle the request (from the correct zone), and
2275 2275                   * everything (minus the OTW close call) should be OK.  If the
2276 2276                   * zone is going away nfs4_async_inactive() will throw away
2277 2277                   * delegations, open streams and cached pages inline.
2278 2278                   */
2279 2279                  return (EIO);
2280 2280          }
2281 2281  
2282 2282          /*
2283 2283           * If we are using local locking for this filesystem, then
2284 2284           * release all of the SYSV style record locks.  Otherwise,
2285 2285           * we are doing network locking and we need to release all
2286 2286           * of the network locks.  All of the locks held by this
2287 2287           * process on this file are released no matter what the
2288 2288           * incoming reference count is.
2289 2289           */
2290 2290          if (VTOMI4(vp)->mi_flags & MI4_LLOCK) {
2291 2291                  cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
2292 2292                  cleanshares(vp, ttoproc(curthread)->p_pid);
2293 2293          } else
2294 2294                  e.error = nfs4_lockrelease(vp, flag, offset, cr);
2295 2295  
2296 2296          if (e.error) {
2297 2297                  struct lm_sysid *lmsid;
2298 2298                  lmsid = nfs4_find_sysid(VTOMI4(vp));
2299 2299                  if (lmsid == NULL) {
2300 2300                          DTRACE_PROBE2(unknown__sysid, int, e.error,
2301 2301                              vnode_t *, vp);
2302 2302                  } else {
2303 2303                          cleanlocks(vp, ttoproc(curthread)->p_pid,
2304 2304                              (lm_sysidt(lmsid) | LM_SYSID_CLIENT));
2305 2305  
2306 2306                          lm_rel_sysid(lmsid);
2307 2307                  }
2308 2308                  return (e.error);
2309 2309          }
2310 2310  
2311 2311          if (count > 1)
2312 2312                  return (0);
2313 2313  
2314 2314          /*
2315 2315           * If the file has been `unlinked', then purge the
2316 2316           * DNLC so that this vnode will get reycled quicker
2317 2317           * and the .nfs* file on the server will get removed.
2318 2318           */
2319 2319          if (rp->r_unldvp != NULL)
2320 2320                  dnlc_purge_vp(vp);
2321 2321  
2322 2322          /*
2323 2323           * If the file was open for write and there are pages,
2324 2324           * do a synchronous flush and commit of all of the
2325 2325           * dirty and uncommitted pages.
2326 2326           */
2327 2327          ASSERT(!e.error);
2328 2328          if ((flag & FWRITE) && nfs4_has_pages(vp))
2329 2329                  error = nfs4_putpage_commit(vp, 0, 0, cr);
2330 2330  
2331 2331          mutex_enter(&rp->r_statelock);
2332 2332          r_error = rp->r_error;
2333 2333          rp->r_error = 0;
2334 2334          mutex_exit(&rp->r_statelock);
2335 2335  
2336 2336          /*
2337 2337           * If this file type is one for which no explicit 'open' was
2338 2338           * done, then bail now (ie. no need for protocol 'close'). If
2339 2339           * there was an error w/the vm subsystem, return _that_ error,
2340 2340           * otherwise, return any errors that may've been reported via
2341 2341           * the rnode.
2342 2342           */
2343 2343          if (vp->v_type != VREG)
2344 2344                  return (error ? error : r_error);
2345 2345  
2346 2346          /*
2347 2347           * The sync putpage commit may have failed above, but since
2348 2348           * we're working w/a regular file, we need to do the protocol
2349 2349           * 'close' (nfs4close_one will figure out if an otw close is
2350 2350           * needed or not). Report any errors _after_ doing the protocol
2351 2351           * 'close'.
2352 2352           */
2353 2353          nfs4close_one(vp, NULL, cr, flag, NULL, &e, CLOSE_NORM, 0, 0, 0);
2354 2354          n4error = e.error ? e.error : geterrno4(e.stat);
2355 2355  
2356 2356          /*
2357 2357           * Error reporting prio (Hi -> Lo)
2358 2358           *
2359 2359           *   i) nfs4_putpage_commit (error)
2360 2360           *  ii) rnode's (r_error)
2361 2361           * iii) nfs4close_one (n4error)
2362 2362           */
2363 2363          return (error ? error : (r_error ? r_error : n4error));
2364 2364  }
2365 2365  
2366 2366  /*
2367 2367   * Initialize *lost_rqstp.
2368 2368   */
2369 2369  
2370 2370  static void
2371 2371  nfs4close_save_lost_rqst(int error, nfs4_lost_rqst_t *lost_rqstp,
2372 2372      nfs4_open_owner_t *oop, nfs4_open_stream_t *osp, cred_t *cr,
2373 2373      vnode_t *vp)
2374 2374  {
2375 2375          if (error != ETIMEDOUT && error != EINTR &&
2376 2376              !NFS4_FRC_UNMT_ERR(error, vp->v_vfsp)) {
2377 2377                  lost_rqstp->lr_op = 0;
2378 2378                  return;
2379 2379          }
2380 2380  
2381 2381          NFS4_DEBUG(nfs4_lost_rqst_debug, (CE_NOTE,
2382 2382              "nfs4close_save_lost_rqst: error %d", error));
2383 2383  
2384 2384          lost_rqstp->lr_op = OP_CLOSE;
2385 2385          /*
2386 2386           * The vp is held and rele'd via the recovery code.
2387 2387           * See nfs4_save_lost_rqst.
2388 2388           */
2389 2389          lost_rqstp->lr_vp = vp;
2390 2390          lost_rqstp->lr_dvp = NULL;
2391 2391          lost_rqstp->lr_oop = oop;
2392 2392          lost_rqstp->lr_osp = osp;
2393 2393          ASSERT(osp != NULL);
2394 2394          ASSERT(mutex_owned(&osp->os_sync_lock));
2395 2395          osp->os_pending_close = 1;
2396 2396          lost_rqstp->lr_lop = NULL;
2397 2397          lost_rqstp->lr_cr = cr;
2398 2398          lost_rqstp->lr_flk = NULL;
2399 2399          lost_rqstp->lr_putfirst = FALSE;
2400 2400  }
2401 2401  
2402 2402  /*
2403 2403   * Assumes you already have the open seqid sync grabbed as well as the
2404 2404   * 'os_sync_lock'.  Note: this will release the open seqid sync and
2405 2405   * 'os_sync_lock' if client recovery starts.  Calling functions have to
2406 2406   * be prepared to handle this.
2407 2407   *
2408 2408   * 'recov' is returned as 1 if the CLOSE operation detected client recovery
2409 2409   * was needed and was started, and that the calling function should retry
2410 2410   * this function; otherwise it is returned as 0.
2411 2411   *
2412 2412   * Errors are returned via the nfs4_error_t parameter.
2413 2413   */
2414 2414  static void
2415 2415  nfs4close_otw(rnode4_t *rp, cred_t *cred_otw, nfs4_open_owner_t *oop,
2416 2416      nfs4_open_stream_t *osp, int *recov, int *did_start_seqid_syncp,
2417 2417      nfs4_close_type_t close_type, nfs4_error_t *ep, int *have_sync_lockp)
2418 2418  {
2419 2419          COMPOUND4args_clnt args;
2420 2420          COMPOUND4res_clnt res;
2421 2421          CLOSE4args *close_args;
2422 2422          nfs_resop4 *resop;
2423 2423          nfs_argop4 argop[3];
2424 2424          int doqueue = 1;
2425 2425          mntinfo4_t *mi;
2426 2426          seqid4 seqid;
2427 2427          vnode_t *vp;
2428 2428          bool_t needrecov = FALSE;
2429 2429          nfs4_lost_rqst_t lost_rqst;
2430 2430          hrtime_t t;
2431 2431  
2432 2432          ASSERT(nfs_zone() == VTOMI4(RTOV4(rp))->mi_zone);
2433 2433  
2434 2434          ASSERT(MUTEX_HELD(&osp->os_sync_lock));
2435 2435  
2436 2436          NFS4_DEBUG(nfs4_client_state_debug, (CE_NOTE, "nfs4close_otw"));
2437 2437  
2438 2438          /* Only set this to 1 if recovery is started */
2439 2439          *recov = 0;
2440 2440  
2441 2441          /* do the OTW call to close the file */
2442 2442  
2443 2443          if (close_type == CLOSE_RESEND)
2444 2444                  args.ctag = TAG_CLOSE_LOST;
2445 2445          else if (close_type == CLOSE_AFTER_RESEND)
2446 2446                  args.ctag = TAG_CLOSE_UNDO;
2447 2447          else
2448 2448                  args.ctag = TAG_CLOSE;
2449 2449  
2450 2450          args.array_len = 3;
2451 2451          args.array = argop;
2452 2452  
2453 2453          vp = RTOV4(rp);
2454 2454  
2455 2455          mi = VTOMI4(vp);
2456 2456  
2457 2457          /* putfh target fh */
2458 2458          argop[0].argop = OP_CPUTFH;
2459 2459          argop[0].nfs_argop4_u.opcputfh.sfh = rp->r_fh;
2460 2460  
2461 2461          argop[1].argop = OP_GETATTR;
2462 2462          argop[1].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
2463 2463          argop[1].nfs_argop4_u.opgetattr.mi = mi;
2464 2464  
2465 2465          argop[2].argop = OP_CLOSE;
2466 2466          close_args = &argop[2].nfs_argop4_u.opclose;
2467 2467  
2468 2468          seqid = nfs4_get_open_seqid(oop) + 1;
2469 2469  
2470 2470          close_args->seqid = seqid;
2471 2471          close_args->open_stateid = osp->open_stateid;
2472 2472  
2473 2473          NFS4_DEBUG(nfs4_client_call_debug, (CE_NOTE,
2474 2474              "nfs4close_otw: %s call, rp %s", needrecov ? "recov" : "first",
2475 2475              rnode4info(rp)));
2476 2476  
2477 2477          t = gethrtime();
2478 2478  
2479 2479          rfs4call(mi, &args, &res, cred_otw, &doqueue, 0, ep);
2480 2480  
2481 2481          if (!ep->error && nfs4_need_to_bump_seqid(&res)) {
2482 2482                  nfs4_set_open_seqid(seqid, oop, args.ctag);
2483 2483          }
2484 2484  
2485 2485          needrecov = nfs4_needs_recovery(ep, TRUE, mi->mi_vfsp);
2486 2486          if (ep->error && !needrecov) {
2487 2487                  /*
2488 2488                   * if there was an error and no recovery is to be done
2489 2489                   * then then set up the file to flush its cache if
2490 2490                   * needed for the next caller.
2491 2491                   */
2492 2492                  mutex_enter(&rp->r_statelock);
2493 2493                  PURGE_ATTRCACHE4_LOCKED(rp);
2494 2494                  rp->r_flags &= ~R4WRITEMODIFIED;
2495 2495                  mutex_exit(&rp->r_statelock);
2496 2496                  return;
2497 2497          }
2498 2498  
2499 2499          if (needrecov) {
2500 2500                  bool_t abort;
2501 2501                  nfs4_bseqid_entry_t *bsep = NULL;
2502 2502  
2503 2503                  if (close_type != CLOSE_RESEND)
2504 2504                          nfs4close_save_lost_rqst(ep->error, &lost_rqst, oop,
2505 2505                              osp, cred_otw, vp);
2506 2506  
2507 2507                  if (!ep->error && res.status == NFS4ERR_BAD_SEQID)
2508 2508                          bsep = nfs4_create_bseqid_entry(oop, NULL, vp,
2509 2509                              0, args.ctag, close_args->seqid);
2510 2510  
2511 2511                  NFS4_DEBUG(nfs4_client_recov_debug, (CE_NOTE,
2512 2512                      "nfs4close_otw: initiating recovery. error %d "
2513 2513                      "res.status %d", ep->error, res.status));
2514 2514  
2515 2515                  /*
2516 2516                   * Drop the 'os_sync_lock' here so we don't hit
2517 2517                   * a potential recursive mutex_enter via an
2518 2518                   * 'open_stream_hold()'.
2519 2519                   */
2520 2520                  mutex_exit(&osp->os_sync_lock);
2521 2521                  *have_sync_lockp = 0;
2522 2522                  abort = nfs4_start_recovery(ep, VTOMI4(vp), vp, NULL, NULL,
2523 2523                      (close_type != CLOSE_RESEND &&
2524 2524                      lost_rqst.lr_op == OP_CLOSE) ? &lost_rqst : NULL,
2525 2525                      OP_CLOSE, bsep, NULL, NULL);
2526 2526  
2527 2527                  /* drop open seq sync, and let the calling function regrab it */
2528 2528                  nfs4_end_open_seqid_sync(oop);
2529 2529                  *did_start_seqid_syncp = 0;
2530 2530  
2531 2531                  if (bsep)
2532 2532                          kmem_free(bsep, sizeof (*bsep));
2533 2533                  /*
2534 2534                   * For signals, the caller wants to quit, so don't say to
2535 2535                   * retry.  For forced unmount, if it's a user thread, it
2536 2536                   * wants to quit.  If it's a recovery thread, the retry
2537 2537                   * will happen higher-up on the call stack.  Either way,
2538 2538                   * don't say to retry.
2539 2539                   */
2540 2540                  if (abort == FALSE && ep->error != EINTR &&
2541 2541                      !NFS4_FRC_UNMT_ERR(ep->error, mi->mi_vfsp) &&
2542 2542                      close_type != CLOSE_RESEND &&
2543 2543                      close_type != CLOSE_AFTER_RESEND)
2544 2544                          *recov = 1;
2545 2545                  else
2546 2546                          *recov = 0;
2547 2547  
2548 2548                  if (!ep->error)
2549 2549                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
2550 2550                  return;
2551 2551          }
2552 2552  
2553 2553          if (res.status) {
2554 2554                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
2555 2555                  return;
2556 2556          }
2557 2557  
2558 2558          mutex_enter(&rp->r_statev4_lock);
2559 2559          rp->created_v4 = 0;
2560 2560          mutex_exit(&rp->r_statev4_lock);
2561 2561  
2562 2562          resop = &res.array[2];
2563 2563          osp->open_stateid = resop->nfs_resop4_u.opclose.open_stateid;
2564 2564          osp->os_valid = 0;
2565 2565  
2566 2566          /*
2567 2567           * This removes the reference obtained at OPEN; ie, when the
2568 2568           * open stream structure was created.
2569 2569           *
2570 2570           * We don't have to worry about calling 'open_stream_rele'
2571 2571           * since we our currently holding a reference to the open
2572 2572           * stream which means the count cannot go to 0 with this
2573 2573           * decrement.
2574 2574           */
2575 2575          ASSERT(osp->os_ref_count >= 2);
2576 2576          osp->os_ref_count--;
2577 2577  
2578 2578          if (!ep->error)
2579 2579                  nfs4_attr_cache(vp,
2580 2580                      &res.array[1].nfs_resop4_u.opgetattr.ga_res,
2581 2581                      t, cred_otw, TRUE, NULL);
2582 2582  
2583 2583          NFS4_DEBUG(nfs4_client_state_debug, (CE_NOTE, "nfs4close_otw:"
2584 2584              " returning %d", ep->error));
2585 2585  
2586 2586          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
2587 2587  }
2588 2588  
2589 2589  /* ARGSUSED */
2590 2590  static int
2591 2591  nfs4_read(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr,
2592 2592      caller_context_t *ct)
2593 2593  {
2594 2594          rnode4_t *rp;
2595 2595          u_offset_t off;
2596 2596          offset_t diff;
2597 2597          uint_t on;
2598 2598          uint_t n;
2599 2599          caddr_t base;
2600 2600          uint_t flags;
2601 2601          int error;
2602 2602          mntinfo4_t *mi;
2603 2603  
2604 2604          rp = VTOR4(vp);
2605 2605  
2606 2606          ASSERT(nfs_rw_lock_held(&rp->r_rwlock, RW_READER));
2607 2607  
2608 2608          if (IS_SHADOW(vp, rp))
2609 2609                  vp = RTOV4(rp);
2610 2610  
2611 2611          if (vp->v_type != VREG)
2612 2612                  return (EISDIR);
2613 2613  
2614 2614          mi = VTOMI4(vp);
2615 2615  
2616 2616          if (nfs_zone() != mi->mi_zone)
2617 2617                  return (EIO);
2618 2618  
2619 2619          if (uiop->uio_resid == 0)
2620 2620                  return (0);
2621 2621  
2622 2622          if (uiop->uio_loffset < 0 || uiop->uio_loffset + uiop->uio_resid < 0)
2623 2623                  return (EINVAL);
2624 2624  
2625 2625          mutex_enter(&rp->r_statelock);
2626 2626          if (rp->r_flags & R4RECOVERRP)
2627 2627                  error = (rp->r_error ? rp->r_error : EIO);
2628 2628          else
2629 2629                  error = 0;
2630 2630          mutex_exit(&rp->r_statelock);
2631 2631          if (error)
2632 2632                  return (error);
2633 2633  
2634 2634          /*
2635 2635           * Bypass VM if caching has been disabled (e.g., locking) or if
2636 2636           * using client-side direct I/O and the file is not mmap'd and
2637 2637           * there are no cached pages.
2638 2638           */
2639 2639          if ((vp->v_flag & VNOCACHE) ||
2640 2640              (((rp->r_flags & R4DIRECTIO) || (mi->mi_flags & MI4_DIRECTIO)) &&
2641 2641              rp->r_mapcnt == 0 && rp->r_inmap == 0 && !nfs4_has_pages(vp))) {
2642 2642                  size_t resid = 0;
2643 2643  
2644 2644                  return (nfs4read(vp, NULL, uiop->uio_loffset,
2645 2645                      uiop->uio_resid, &resid, cr, FALSE, uiop));
2646 2646          }
2647 2647  
2648 2648          error = 0;
2649 2649  
2650 2650          do {
2651 2651                  off = uiop->uio_loffset & MAXBMASK; /* mapping offset */
2652 2652                  on = uiop->uio_loffset & MAXBOFFSET; /* Relative offset */
2653 2653                  n = MIN(MAXBSIZE - on, uiop->uio_resid);
2654 2654  
2655 2655                  if (error = nfs4_validate_caches(vp, cr))
2656 2656                          break;
2657 2657  
2658 2658                  mutex_enter(&rp->r_statelock);
2659 2659                  while (rp->r_flags & R4INCACHEPURGE) {
2660 2660                          if (!cv_wait_sig(&rp->r_cv, &rp->r_statelock)) {
2661 2661                                  mutex_exit(&rp->r_statelock);
2662 2662                                  return (EINTR);
2663 2663                          }
2664 2664                  }
2665 2665                  diff = rp->r_size - uiop->uio_loffset;
2666 2666                  mutex_exit(&rp->r_statelock);
2667 2667                  if (diff <= 0)
2668 2668                          break;
2669 2669                  if (diff < n)
2670 2670                          n = (uint_t)diff;
2671 2671  
2672 2672                  if (vpm_enable) {
2673 2673                          /*
2674 2674                           * Copy data.
2675 2675                           */
2676 2676                          error = vpm_data_copy(vp, off + on, n, uiop,
2677 2677                              1, NULL, 0, S_READ);
2678 2678                  } else {
2679 2679                          base = segmap_getmapflt(segkmap, vp, off + on, n, 1,
2680 2680                              S_READ);
2681 2681  
2682 2682                          error = uiomove(base + on, n, UIO_READ, uiop);
2683 2683                  }
2684 2684  
2685 2685                  if (!error) {
2686 2686                          /*
2687 2687                           * If read a whole block or read to eof,
2688 2688                           * won't need this buffer again soon.
2689 2689                           */
2690 2690                          mutex_enter(&rp->r_statelock);
2691 2691                          if (n + on == MAXBSIZE ||
2692 2692                              uiop->uio_loffset == rp->r_size)
2693 2693                                  flags = SM_DONTNEED;
2694 2694                          else
2695 2695                                  flags = 0;
2696 2696                          mutex_exit(&rp->r_statelock);
2697 2697                          if (vpm_enable) {
2698 2698                                  error = vpm_sync_pages(vp, off, n, flags);
2699 2699                          } else {
2700 2700                                  error = segmap_release(segkmap, base, flags);
2701 2701                          }
2702 2702                  } else {
2703 2703                          if (vpm_enable) {
2704 2704                                  (void) vpm_sync_pages(vp, off, n, 0);
2705 2705                          } else {
2706 2706                                  (void) segmap_release(segkmap, base, 0);
2707 2707                          }
2708 2708                  }
2709 2709          } while (!error && uiop->uio_resid > 0);
2710 2710  
2711 2711          return (error);
2712 2712  }
2713 2713  
2714 2714  /* ARGSUSED */
2715 2715  static int
2716 2716  nfs4_write(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr,
2717 2717      caller_context_t *ct)
2718 2718  {
2719 2719          rlim64_t limit = uiop->uio_llimit;
2720 2720          rnode4_t *rp;
2721 2721          u_offset_t off;
2722 2722          caddr_t base;
2723 2723          uint_t flags;
2724 2724          int remainder;
2725 2725          size_t n;
2726 2726          int on;
2727 2727          int error;
2728 2728          int resid;
2729 2729          u_offset_t offset;
2730 2730          mntinfo4_t *mi;
2731 2731          uint_t bsize;
2732 2732  
2733 2733          rp = VTOR4(vp);
2734 2734  
2735 2735          if (IS_SHADOW(vp, rp))
2736 2736                  vp = RTOV4(rp);
2737 2737  
2738 2738          if (vp->v_type != VREG)
2739 2739                  return (EISDIR);
2740 2740  
2741 2741          mi = VTOMI4(vp);
2742 2742  
2743 2743          if (nfs_zone() != mi->mi_zone)
2744 2744                  return (EIO);
2745 2745  
2746 2746          if (uiop->uio_resid == 0)
2747 2747                  return (0);
2748 2748  
2749 2749          mutex_enter(&rp->r_statelock);
2750 2750          if (rp->r_flags & R4RECOVERRP)
2751 2751                  error = (rp->r_error ? rp->r_error : EIO);
2752 2752          else
2753 2753                  error = 0;
2754 2754          mutex_exit(&rp->r_statelock);
2755 2755          if (error)
2756 2756                  return (error);
2757 2757  
2758 2758          if (ioflag & FAPPEND) {
2759 2759                  struct vattr va;
2760 2760  
2761 2761                  /*
2762 2762                   * Must serialize if appending.
2763 2763                   */
2764 2764                  if (nfs_rw_lock_held(&rp->r_rwlock, RW_READER)) {
2765 2765                          nfs_rw_exit(&rp->r_rwlock);
2766 2766                          if (nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER,
2767 2767                              INTR4(vp)))
2768 2768                                  return (EINTR);
2769 2769                  }
2770 2770  
2771 2771                  va.va_mask = AT_SIZE;
2772 2772                  error = nfs4getattr(vp, &va, cr);
2773 2773                  if (error)
2774 2774                          return (error);
2775 2775                  uiop->uio_loffset = va.va_size;
2776 2776          }
2777 2777  
2778 2778          offset = uiop->uio_loffset + uiop->uio_resid;
2779 2779  
2780 2780          if (uiop->uio_loffset < (offset_t)0 || offset < 0)
2781 2781                  return (EINVAL);
2782 2782  
2783 2783          if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
2784 2784                  limit = MAXOFFSET_T;
2785 2785  
2786 2786          /*
2787 2787           * Check to make sure that the process will not exceed
2788 2788           * its limit on file size.  It is okay to write up to
2789 2789           * the limit, but not beyond.  Thus, the write which
2790 2790           * reaches the limit will be short and the next write
2791 2791           * will return an error.
2792 2792           */
2793 2793          remainder = 0;
2794 2794          if (offset > uiop->uio_llimit) {
2795 2795                  remainder = offset - uiop->uio_llimit;
2796 2796                  uiop->uio_resid = uiop->uio_llimit - uiop->uio_loffset;
2797 2797                  if (uiop->uio_resid <= 0) {
2798 2798                          proc_t *p = ttoproc(curthread);
2799 2799  
2800 2800                          uiop->uio_resid += remainder;
2801 2801                          mutex_enter(&p->p_lock);
2802 2802                          (void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE],
2803 2803                              p->p_rctls, p, RCA_UNSAFE_SIGINFO);
2804 2804                          mutex_exit(&p->p_lock);
2805 2805                          return (EFBIG);
2806 2806                  }
2807 2807          }
2808 2808  
2809 2809          /* update the change attribute, if we have a write delegation */
2810 2810  
2811 2811          mutex_enter(&rp->r_statev4_lock);
2812 2812          if (rp->r_deleg_type == OPEN_DELEGATE_WRITE)
2813 2813                  rp->r_deleg_change++;
2814 2814  
2815 2815          mutex_exit(&rp->r_statev4_lock);
2816 2816  
2817 2817          if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_READER, INTR4(vp)))
2818 2818                  return (EINTR);
2819 2819  
2820 2820          /*
2821 2821           * Bypass VM if caching has been disabled (e.g., locking) or if
2822 2822           * using client-side direct I/O and the file is not mmap'd and
2823 2823           * there are no cached pages.
2824 2824           */
2825 2825          if ((vp->v_flag & VNOCACHE) ||
2826 2826              (((rp->r_flags & R4DIRECTIO) || (mi->mi_flags & MI4_DIRECTIO)) &&
2827 2827              rp->r_mapcnt == 0 && rp->r_inmap == 0 && !nfs4_has_pages(vp))) {
2828 2828                  size_t bufsize;
2829 2829                  int count;
2830 2830                  u_offset_t org_offset;
2831 2831                  stable_how4 stab_comm;
2832 2832  nfs4_fwrite:
2833 2833                  if (rp->r_flags & R4STALE) {
2834 2834                          resid = uiop->uio_resid;
2835 2835                          offset = uiop->uio_loffset;
2836 2836                          error = rp->r_error;
2837 2837                          /*
2838 2838                           * A close may have cleared r_error, if so,
2839 2839                           * propagate ESTALE error return properly
2840 2840                           */
2841 2841                          if (error == 0)
2842 2842                                  error = ESTALE;
2843 2843                          goto bottom;
2844 2844                  }
2845 2845  
2846 2846                  bufsize = MIN(uiop->uio_resid, mi->mi_stsize);
2847 2847                  base = kmem_alloc(bufsize, KM_SLEEP);
2848 2848                  do {
2849 2849                          if (ioflag & FDSYNC)
2850 2850                                  stab_comm = DATA_SYNC4;
2851 2851                          else
2852 2852                                  stab_comm = FILE_SYNC4;
2853 2853                          resid = uiop->uio_resid;
2854 2854                          offset = uiop->uio_loffset;
2855 2855                          count = MIN(uiop->uio_resid, bufsize);
2856 2856                          org_offset = uiop->uio_loffset;
2857 2857                          error = uiomove(base, count, UIO_WRITE, uiop);
2858 2858                          if (!error) {
2859 2859                                  error = nfs4write(vp, base, org_offset,
2860 2860                                      count, cr, &stab_comm);
2861 2861                                  if (!error) {
2862 2862                                          mutex_enter(&rp->r_statelock);
2863 2863                                          if (rp->r_size < uiop->uio_loffset)
2864 2864                                                  rp->r_size = uiop->uio_loffset;
2865 2865                                          mutex_exit(&rp->r_statelock);
2866 2866                                  }
2867 2867                          }
2868 2868                  } while (!error && uiop->uio_resid > 0);
2869 2869                  kmem_free(base, bufsize);
2870 2870                  goto bottom;
2871 2871          }
2872 2872  
2873 2873          bsize = vp->v_vfsp->vfs_bsize;
2874 2874  
2875 2875          do {
2876 2876                  off = uiop->uio_loffset & MAXBMASK; /* mapping offset */
2877 2877                  on = uiop->uio_loffset & MAXBOFFSET; /* Relative offset */
2878 2878                  n = MIN(MAXBSIZE - on, uiop->uio_resid);
2879 2879  
2880 2880                  resid = uiop->uio_resid;
2881 2881                  offset = uiop->uio_loffset;
2882 2882  
2883 2883                  if (rp->r_flags & R4STALE) {
2884 2884                          error = rp->r_error;
2885 2885                          /*
2886 2886                           * A close may have cleared r_error, if so,
2887 2887                           * propagate ESTALE error return properly
2888 2888                           */
2889 2889                          if (error == 0)
2890 2890                                  error = ESTALE;
2891 2891                          break;
2892 2892                  }
2893 2893  
2894 2894                  /*
2895 2895                   * Don't create dirty pages faster than they
2896 2896                   * can be cleaned so that the system doesn't
2897 2897                   * get imbalanced.  If the async queue is
2898 2898                   * maxed out, then wait for it to drain before
2899 2899                   * creating more dirty pages.  Also, wait for
2900 2900                   * any threads doing pagewalks in the vop_getattr
2901 2901                   * entry points so that they don't block for
2902 2902                   * long periods.
2903 2903                   */
2904 2904                  mutex_enter(&rp->r_statelock);
2905 2905                  while ((mi->mi_max_threads != 0 &&
2906 2906                      rp->r_awcount > 2 * mi->mi_max_threads) ||
2907 2907                      rp->r_gcount > 0) {
2908 2908                          if (INTR4(vp)) {
2909 2909                                  klwp_t *lwp = ttolwp(curthread);
2910 2910  
2911 2911                                  if (lwp != NULL)
2912 2912                                          lwp->lwp_nostop++;
2913 2913                                  if (!cv_wait_sig(&rp->r_cv, &rp->r_statelock)) {
2914 2914                                          mutex_exit(&rp->r_statelock);
2915 2915                                          if (lwp != NULL)
2916 2916                                                  lwp->lwp_nostop--;
2917 2917                                          error = EINTR;
2918 2918                                          goto bottom;
2919 2919                                  }
2920 2920                                  if (lwp != NULL)
2921 2921                                          lwp->lwp_nostop--;
2922 2922                          } else
2923 2923                                  cv_wait(&rp->r_cv, &rp->r_statelock);
2924 2924                  }
2925 2925                  mutex_exit(&rp->r_statelock);
2926 2926  
2927 2927                  /*
2928 2928                   * Touch the page and fault it in if it is not in core
2929 2929                   * before segmap_getmapflt or vpm_data_copy can lock it.
2930 2930                   * This is to avoid the deadlock if the buffer is mapped
2931 2931                   * to the same file through mmap which we want to write.
2932 2932                   */
2933 2933                  uio_prefaultpages((long)n, uiop);
2934 2934  
2935 2935                  if (vpm_enable) {
2936 2936                          /*
2937 2937                           * It will use kpm mappings, so no need to
2938 2938                           * pass an address.
2939 2939                           */
2940 2940                          error = writerp4(rp, NULL, n, uiop, 0);
2941 2941                  } else  {
2942 2942                          if (segmap_kpm) {
2943 2943                                  int pon = uiop->uio_loffset & PAGEOFFSET;
2944 2944                                  size_t pn = MIN(PAGESIZE - pon,
2945 2945                                      uiop->uio_resid);
2946 2946                                  int pagecreate;
2947 2947  
2948 2948                                  mutex_enter(&rp->r_statelock);
2949 2949                                  pagecreate = (pon == 0) && (pn == PAGESIZE ||
2950 2950                                      uiop->uio_loffset + pn >= rp->r_size);
2951 2951                                  mutex_exit(&rp->r_statelock);
2952 2952  
2953 2953                                  base = segmap_getmapflt(segkmap, vp, off + on,
2954 2954                                      pn, !pagecreate, S_WRITE);
2955 2955  
2956 2956                                  error = writerp4(rp, base + pon, n, uiop,
2957 2957                                      pagecreate);
2958 2958  
2959 2959                          } else {
2960 2960                                  base = segmap_getmapflt(segkmap, vp, off + on,
2961 2961                                      n, 0, S_READ);
2962 2962                                  error = writerp4(rp, base + on, n, uiop, 0);
2963 2963                          }
2964 2964                  }
2965 2965  
2966 2966                  if (!error) {
2967 2967                          if (mi->mi_flags & MI4_NOAC)
2968 2968                                  flags = SM_WRITE;
2969 2969                          else if ((uiop->uio_loffset % bsize) == 0 ||
2970 2970                              IS_SWAPVP(vp)) {
2971 2971                                  /*
2972 2972                                   * Have written a whole block.
2973 2973                                   * Start an asynchronous write
2974 2974                                   * and mark the buffer to
2975 2975                                   * indicate that it won't be
2976 2976                                   * needed again soon.
2977 2977                                   */
2978 2978                                  flags = SM_WRITE | SM_ASYNC | SM_DONTNEED;
2979 2979                          } else
2980 2980                                  flags = 0;
2981 2981                          if ((ioflag & (FSYNC|FDSYNC)) ||
2982 2982                              (rp->r_flags & R4OUTOFSPACE)) {
2983 2983                                  flags &= ~SM_ASYNC;
2984 2984                                  flags |= SM_WRITE;
2985 2985                          }
2986 2986                          if (vpm_enable) {
2987 2987                                  error = vpm_sync_pages(vp, off, n, flags);
2988 2988                          } else {
2989 2989                                  error = segmap_release(segkmap, base, flags);
2990 2990                          }
2991 2991                  } else {
2992 2992                          if (vpm_enable) {
2993 2993                                  (void) vpm_sync_pages(vp, off, n, 0);
2994 2994                          } else {
2995 2995                                  (void) segmap_release(segkmap, base, 0);
2996 2996                          }
2997 2997                          /*
2998 2998                           * In the event that we got an access error while
2999 2999                           * faulting in a page for a write-only file just
3000 3000                           * force a write.
3001 3001                           */
3002 3002                          if (error == EACCES)
3003 3003                                  goto nfs4_fwrite;
3004 3004                  }
3005 3005          } while (!error && uiop->uio_resid > 0);
3006 3006  
3007 3007  bottom:
3008 3008          if (error) {
3009 3009                  uiop->uio_resid = resid + remainder;
3010 3010                  uiop->uio_loffset = offset;
3011 3011          } else {
3012 3012                  uiop->uio_resid += remainder;
3013 3013  
3014 3014                  mutex_enter(&rp->r_statev4_lock);
3015 3015                  if (rp->r_deleg_type == OPEN_DELEGATE_WRITE) {
3016 3016                          gethrestime(&rp->r_attr.va_mtime);
3017 3017                          rp->r_attr.va_ctime = rp->r_attr.va_mtime;
3018 3018                  }
3019 3019                  mutex_exit(&rp->r_statev4_lock);
3020 3020          }
3021 3021  
3022 3022          nfs_rw_exit(&rp->r_lkserlock);
3023 3023  
3024 3024          return (error);
3025 3025  }
3026 3026  
3027 3027  /*
3028 3028   * Flags are composed of {B_ASYNC, B_INVAL, B_FREE, B_DONTNEED}
3029 3029   */
3030 3030  static int
3031 3031  nfs4_rdwrlbn(vnode_t *vp, page_t *pp, u_offset_t off, size_t len,
3032 3032      int flags, cred_t *cr)
3033 3033  {
3034 3034          struct buf *bp;
3035 3035          int error;
3036 3036          page_t *savepp;
3037 3037          uchar_t fsdata;
3038 3038          stable_how4 stab_comm;
3039 3039  
3040 3040          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
3041 3041          bp = pageio_setup(pp, len, vp, flags);
3042 3042          ASSERT(bp != NULL);
3043 3043  
3044 3044          /*
3045 3045           * pageio_setup should have set b_addr to 0.  This
3046 3046           * is correct since we want to do I/O on a page
3047 3047           * boundary.  bp_mapin will use this addr to calculate
3048 3048           * an offset, and then set b_addr to the kernel virtual
3049 3049           * address it allocated for us.
3050 3050           */
3051 3051          ASSERT(bp->b_un.b_addr == 0);
3052 3052  
3053 3053          bp->b_edev = 0;
3054 3054          bp->b_dev = 0;
3055 3055          bp->b_lblkno = lbtodb(off);
3056 3056          bp->b_file = vp;
3057 3057          bp->b_offset = (offset_t)off;
3058 3058          bp_mapin(bp);
3059 3059  
3060 3060          if ((flags & (B_WRITE|B_ASYNC)) == (B_WRITE|B_ASYNC) &&
3061 3061              freemem > desfree)
3062 3062                  stab_comm = UNSTABLE4;
3063 3063          else
3064 3064                  stab_comm = FILE_SYNC4;
3065 3065  
3066 3066          error = nfs4_bio(bp, &stab_comm, cr, FALSE);
3067 3067  
3068 3068          bp_mapout(bp);
3069 3069          pageio_done(bp);
3070 3070  
3071 3071          if (stab_comm == UNSTABLE4)
3072 3072                  fsdata = C_DELAYCOMMIT;
3073 3073          else
3074 3074                  fsdata = C_NOCOMMIT;
3075 3075  
3076 3076          savepp = pp;
3077 3077          do {
3078 3078                  pp->p_fsdata = fsdata;
3079 3079          } while ((pp = pp->p_next) != savepp);
3080 3080  
3081 3081          return (error);
3082 3082  }
3083 3083  
3084 3084  /*
3085 3085   */
3086 3086  static int
3087 3087  nfs4rdwr_check_osid(vnode_t *vp, nfs4_error_t *ep, cred_t *cr)
3088 3088  {
3089 3089          nfs4_open_owner_t       *oop;
3090 3090          nfs4_open_stream_t      *osp;
3091 3091          rnode4_t                *rp = VTOR4(vp);
3092 3092          mntinfo4_t              *mi = VTOMI4(vp);
3093 3093          int                     reopen_needed;
3094 3094  
3095 3095          ASSERT(nfs_zone() == mi->mi_zone);
3096 3096  
3097 3097  
3098 3098          oop = find_open_owner(cr, NFS4_PERM_CREATED, mi);
3099 3099          if (!oop)
3100 3100                  return (EIO);
3101 3101  
3102 3102          /* returns with 'os_sync_lock' held */
3103 3103          osp = find_open_stream(oop, rp);
3104 3104          if (!osp) {
3105 3105                  open_owner_rele(oop);
3106 3106                  return (EIO);
3107 3107          }
3108 3108  
3109 3109          if (osp->os_failed_reopen) {
3110 3110                  mutex_exit(&osp->os_sync_lock);
3111 3111                  open_stream_rele(osp, rp);
3112 3112                  open_owner_rele(oop);
3113 3113                  return (EIO);
3114 3114          }
3115 3115  
3116 3116          /*
3117 3117           * Determine whether a reopen is needed.  If this
3118 3118           * is a delegation open stream, then the os_delegation bit
3119 3119           * should be set.
3120 3120           */
3121 3121  
3122 3122          reopen_needed = osp->os_delegation;
3123 3123  
3124 3124          mutex_exit(&osp->os_sync_lock);
3125 3125          open_owner_rele(oop);
3126 3126  
3127 3127          if (reopen_needed) {
3128 3128                  nfs4_error_zinit(ep);
3129 3129                  nfs4_reopen(vp, osp, ep, CLAIM_NULL, FALSE, FALSE);
3130 3130                  mutex_enter(&osp->os_sync_lock);
3131 3131                  if (ep->error || ep->stat || osp->os_failed_reopen) {
3132 3132                          mutex_exit(&osp->os_sync_lock);
3133 3133                          open_stream_rele(osp, rp);
3134 3134                          return (EIO);
3135 3135                  }
3136 3136                  mutex_exit(&osp->os_sync_lock);
3137 3137          }
3138 3138          open_stream_rele(osp, rp);
3139 3139  
3140 3140          return (0);
3141 3141  }
3142 3142  
3143 3143  /*
3144 3144   * Write to file.  Writes to remote server in largest size
3145 3145   * chunks that the server can handle.  Write is synchronous.
3146 3146   */
3147 3147  static int
3148 3148  nfs4write(vnode_t *vp, caddr_t base, u_offset_t offset, int count, cred_t *cr,
3149 3149      stable_how4 *stab_comm)
3150 3150  {
3151 3151          mntinfo4_t *mi;
3152 3152          COMPOUND4args_clnt args;
3153 3153          COMPOUND4res_clnt res;
3154 3154          WRITE4args *wargs;
3155 3155          WRITE4res *wres;
3156 3156          nfs_argop4 argop[2];
3157 3157          nfs_resop4 *resop;
3158 3158          int tsize;
3159 3159          stable_how4 stable;
3160 3160          rnode4_t *rp;
3161 3161          int doqueue = 1;
3162 3162          bool_t needrecov;
3163 3163          nfs4_recov_state_t recov_state;
3164 3164          nfs4_stateid_types_t sid_types;
3165 3165          nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS };
3166 3166          int recov;
3167 3167  
3168 3168          rp = VTOR4(vp);
3169 3169          mi = VTOMI4(vp);
3170 3170  
3171 3171          ASSERT(nfs_zone() == mi->mi_zone);
3172 3172  
3173 3173          stable = *stab_comm;
3174 3174          *stab_comm = FILE_SYNC4;
3175 3175  
3176 3176          needrecov = FALSE;
3177 3177          recov_state.rs_flags = 0;
3178 3178          recov_state.rs_num_retry_despite_err = 0;
3179 3179          nfs4_init_stateid_types(&sid_types);
3180 3180  
3181 3181          /* Is curthread the recovery thread? */
3182 3182          mutex_enter(&mi->mi_lock);
3183 3183          recov = (mi->mi_recovthread == curthread);
3184 3184          mutex_exit(&mi->mi_lock);
3185 3185  
3186 3186  recov_retry:
3187 3187          args.ctag = TAG_WRITE;
3188 3188          args.array_len = 2;
3189 3189          args.array = argop;
3190 3190  
3191 3191          if (!recov) {
3192 3192                  e.error = nfs4_start_fop(VTOMI4(vp), vp, NULL, OH_WRITE,
3193 3193                      &recov_state, NULL);
3194 3194                  if (e.error)
3195 3195                          return (e.error);
3196 3196          }
3197 3197  
3198 3198          /* 0. putfh target fh */
3199 3199          argop[0].argop = OP_CPUTFH;
3200 3200          argop[0].nfs_argop4_u.opcputfh.sfh = rp->r_fh;
3201 3201  
3202 3202          /* 1. write */
3203 3203          nfs4args_write(&argop[1], stable, rp, cr, &wargs, &sid_types);
3204 3204  
3205 3205          do {
3206 3206  
3207 3207                  wargs->offset = (offset4)offset;
3208 3208                  wargs->data_val = base;
3209 3209  
3210 3210                  if (mi->mi_io_kstats) {
3211 3211                          mutex_enter(&mi->mi_lock);
3212 3212                          kstat_runq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
3213 3213                          mutex_exit(&mi->mi_lock);
3214 3214                  }
3215 3215  
3216 3216                  if ((vp->v_flag & VNOCACHE) ||
3217 3217                      (rp->r_flags & R4DIRECTIO) ||
3218 3218                      (mi->mi_flags & MI4_DIRECTIO))
3219 3219                          tsize = MIN(mi->mi_stsize, count);
3220 3220                  else
3221 3221                          tsize = MIN(mi->mi_curwrite, count);
3222 3222                  wargs->data_len = (uint_t)tsize;
3223 3223                  rfs4call(mi, &args, &res, cr, &doqueue, 0, &e);
3224 3224  
3225 3225                  if (mi->mi_io_kstats) {
3226 3226                          mutex_enter(&mi->mi_lock);
3227 3227                          kstat_runq_exit(KSTAT_IO_PTR(mi->mi_io_kstats));
3228 3228                          mutex_exit(&mi->mi_lock);
3229 3229                  }
3230 3230  
3231 3231                  if (!recov) {
3232 3232                          needrecov = nfs4_needs_recovery(&e, FALSE, mi->mi_vfsp);
3233 3233                          if (e.error && !needrecov) {
3234 3234                                  nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_WRITE,
3235 3235                                      &recov_state, needrecov);
3236 3236                                  return (e.error);
3237 3237                          }
3238 3238                  } else {
3239 3239                          if (e.error)
3240 3240                                  return (e.error);
3241 3241                  }
3242 3242  
3243 3243                  /*
3244 3244                   * Do handling of OLD_STATEID outside
3245 3245                   * of the normal recovery framework.
3246 3246                   *
3247 3247                   * If write receives a BAD stateid error while using a
3248 3248                   * delegation stateid, retry using the open stateid (if it
3249 3249                   * exists).  If it doesn't have an open stateid, reopen the
3250 3250                   * file first, then retry.
3251 3251                   */
3252 3252                  if (!e.error && res.status == NFS4ERR_OLD_STATEID &&
3253 3253                      sid_types.cur_sid_type != SPEC_SID) {
3254 3254                          nfs4_save_stateid(&wargs->stateid, &sid_types);
3255 3255                          if (!recov)
3256 3256                                  nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_WRITE,
3257 3257                                      &recov_state, needrecov);
3258 3258                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
3259 3259                          goto recov_retry;
3260 3260                  } else if (e.error == 0 && res.status == NFS4ERR_BAD_STATEID &&
3261 3261                      sid_types.cur_sid_type == DEL_SID) {
3262 3262                          nfs4_save_stateid(&wargs->stateid, &sid_types);
3263 3263                          mutex_enter(&rp->r_statev4_lock);
3264 3264                          rp->r_deleg_return_pending = TRUE;
3265 3265                          mutex_exit(&rp->r_statev4_lock);
3266 3266                          if (nfs4rdwr_check_osid(vp, &e, cr)) {
3267 3267                                  if (!recov)
3268 3268                                          nfs4_end_fop(mi, vp, NULL, OH_WRITE,
3269 3269                                              &recov_state, needrecov);
3270 3270                                  (void) xdr_free(xdr_COMPOUND4res_clnt,
3271 3271                                      (caddr_t)&res);
3272 3272                                  return (EIO);
3273 3273                          }
3274 3274                          if (!recov)
3275 3275                                  nfs4_end_fop(mi, vp, NULL, OH_WRITE,
3276 3276                                      &recov_state, needrecov);
3277 3277                          /* hold needed for nfs4delegreturn_thread */
3278 3278                          VN_HOLD(vp);
3279 3279                          nfs4delegreturn_async(rp, (NFS4_DR_PUSH|NFS4_DR_REOPEN|
3280 3280                              NFS4_DR_DISCARD), FALSE);
3281 3281                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
3282 3282                          goto recov_retry;
3283 3283                  }
3284 3284  
3285 3285                  if (needrecov) {
3286 3286                          bool_t abort;
3287 3287  
3288 3288                          NFS4_DEBUG(nfs4_client_recov_debug, (CE_NOTE,
3289 3289                              "nfs4write: client got error %d, res.status %d"
3290 3290                              ", so start recovery", e.error, res.status));
3291 3291  
3292 3292                          abort = nfs4_start_recovery(&e,
3293 3293                              VTOMI4(vp), vp, NULL, &wargs->stateid,
3294 3294                              NULL, OP_WRITE, NULL, NULL, NULL);
3295 3295                          if (!e.error) {
3296 3296                                  e.error = geterrno4(res.status);
3297 3297                                  (void) xdr_free(xdr_COMPOUND4res_clnt,
3298 3298                                      (caddr_t)&res);
3299 3299                          }
3300 3300                          nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_WRITE,
3301 3301                              &recov_state, needrecov);
3302 3302                          if (abort == FALSE)
3303 3303                                  goto recov_retry;
3304 3304                          return (e.error);
3305 3305                  }
3306 3306  
3307 3307                  if (res.status) {
3308 3308                          e.error = geterrno4(res.status);
3309 3309                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
3310 3310                          if (!recov)
3311 3311                                  nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_WRITE,
3312 3312                                      &recov_state, needrecov);
3313 3313                          return (e.error);
3314 3314                  }
3315 3315  
3316 3316                  resop = &res.array[1];  /* write res */
3317 3317                  wres = &resop->nfs_resop4_u.opwrite;
3318 3318  
3319 3319                  if ((int)wres->count > tsize) {
3320 3320                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
3321 3321  
3322 3322                          zcmn_err(getzoneid(), CE_WARN,
3323 3323                              "nfs4write: server wrote %u, requested was %u",
3324 3324                              (int)wres->count, tsize);
3325 3325                          if (!recov)
3326 3326                                  nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_WRITE,
3327 3327                                      &recov_state, needrecov);
3328 3328                          return (EIO);
3329 3329                  }
3330 3330                  if (wres->committed == UNSTABLE4) {
3331 3331                          *stab_comm = UNSTABLE4;
3332 3332                          if (wargs->stable == DATA_SYNC4 ||
3333 3333                              wargs->stable == FILE_SYNC4) {
3334 3334                                  (void) xdr_free(xdr_COMPOUND4res_clnt,
3335 3335                                      (caddr_t)&res);
3336 3336                                  zcmn_err(getzoneid(), CE_WARN,
3337 3337                                      "nfs4write: server %s did not commit "
3338 3338                                      "to stable storage",
3339 3339                                      rp->r_server->sv_hostname);
3340 3340                                  if (!recov)
3341 3341                                          nfs4_end_fop(VTOMI4(vp), vp, NULL,
3342 3342                                              OH_WRITE, &recov_state, needrecov);
3343 3343                                  return (EIO);
3344 3344                          }
3345 3345                  }
3346 3346  
3347 3347                  tsize = (int)wres->count;
3348 3348                  count -= tsize;
3349 3349                  base += tsize;
3350 3350                  offset += tsize;
3351 3351                  if (mi->mi_io_kstats) {
3352 3352                          mutex_enter(&mi->mi_lock);
3353 3353                          KSTAT_IO_PTR(mi->mi_io_kstats)->writes++;
3354 3354                          KSTAT_IO_PTR(mi->mi_io_kstats)->nwritten +=
3355 3355                              tsize;
3356 3356                          mutex_exit(&mi->mi_lock);
3357 3357                  }
3358 3358                  lwp_stat_update(LWP_STAT_OUBLK, 1);
3359 3359                  mutex_enter(&rp->r_statelock);
3360 3360                  if (rp->r_flags & R4HAVEVERF) {
3361 3361                          if (rp->r_writeverf != wres->writeverf) {
3362 3362                                  nfs4_set_mod(vp);
3363 3363                                  rp->r_writeverf = wres->writeverf;
3364 3364                          }
3365 3365                  } else {
3366 3366                          rp->r_writeverf = wres->writeverf;
3367 3367                          rp->r_flags |= R4HAVEVERF;
3368 3368                  }
3369 3369                  PURGE_ATTRCACHE4_LOCKED(rp);
3370 3370                  rp->r_flags |= R4WRITEMODIFIED;
3371 3371                  gethrestime(&rp->r_attr.va_mtime);
3372 3372                  rp->r_attr.va_ctime = rp->r_attr.va_mtime;
3373 3373                  mutex_exit(&rp->r_statelock);
3374 3374                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
3375 3375          } while (count);
3376 3376  
3377 3377          if (!recov)
3378 3378                  nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_WRITE, &recov_state,
3379 3379                      needrecov);
3380 3380  
3381 3381          return (e.error);
3382 3382  }
3383 3383  
3384 3384  /*
3385 3385   * Read from a file.  Reads data in largest chunks our interface can handle.
3386 3386   */
3387 3387  static int
3388 3388  nfs4read(vnode_t *vp, caddr_t base, offset_t offset, int count,
3389 3389      size_t *residp, cred_t *cr, bool_t async, struct uio *uiop)
3390 3390  {
3391 3391          mntinfo4_t *mi;
3392 3392          COMPOUND4args_clnt args;
3393 3393          COMPOUND4res_clnt res;
3394 3394          READ4args *rargs;
3395 3395          nfs_argop4 argop[2];
3396 3396          int tsize;
3397 3397          int doqueue;
3398 3398          rnode4_t *rp;
3399 3399          int data_len;
3400 3400          bool_t is_eof;
3401 3401          bool_t needrecov = FALSE;
3402 3402          nfs4_recov_state_t recov_state;
3403 3403          nfs4_stateid_types_t sid_types;
3404 3404          nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS };
3405 3405  
3406 3406          rp = VTOR4(vp);
3407 3407          mi = VTOMI4(vp);
3408 3408          doqueue = 1;
3409 3409  
3410 3410          ASSERT(nfs_zone() == mi->mi_zone);
3411 3411  
3412 3412          args.ctag = async ? TAG_READAHEAD : TAG_READ;
3413 3413  
3414 3414          args.array_len = 2;
3415 3415          args.array = argop;
3416 3416  
3417 3417          nfs4_init_stateid_types(&sid_types);
3418 3418  
3419 3419          recov_state.rs_flags = 0;
3420 3420          recov_state.rs_num_retry_despite_err = 0;
3421 3421  
3422 3422  recov_retry:
3423 3423          e.error = nfs4_start_fop(mi, vp, NULL, OH_READ,
3424 3424              &recov_state, NULL);
3425 3425          if (e.error)
3426 3426                  return (e.error);
3427 3427  
3428 3428          /* putfh target fh */
3429 3429          argop[0].argop = OP_CPUTFH;
3430 3430          argop[0].nfs_argop4_u.opcputfh.sfh = rp->r_fh;
3431 3431  
3432 3432          /* read */
3433 3433          argop[1].argop = OP_READ;
3434 3434          rargs = &argop[1].nfs_argop4_u.opread;
3435 3435          rargs->stateid = nfs4_get_stateid(cr, rp, curproc->p_pidp->pid_id, mi,
3436 3436              OP_READ, &sid_types, async);
3437 3437  
3438 3438          do {
3439 3439                  if (mi->mi_io_kstats) {
3440 3440                          mutex_enter(&mi->mi_lock);
3441 3441                          kstat_runq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
3442 3442                          mutex_exit(&mi->mi_lock);
3443 3443                  }
3444 3444  
3445 3445                  NFS4_DEBUG(nfs4_client_call_debug, (CE_NOTE,
3446 3446                      "nfs4read: %s call, rp %s",
3447 3447                      needrecov ? "recov" : "first",
3448 3448                      rnode4info(rp)));
3449 3449  
3450 3450                  if ((vp->v_flag & VNOCACHE) ||
3451 3451                      (rp->r_flags & R4DIRECTIO) ||
3452 3452                      (mi->mi_flags & MI4_DIRECTIO))
3453 3453                          tsize = MIN(mi->mi_tsize, count);
3454 3454                  else
3455 3455                          tsize = MIN(mi->mi_curread, count);
3456 3456  
3457 3457                  rargs->offset = (offset4)offset;
3458 3458                  rargs->count = (count4)tsize;
3459 3459                  rargs->res_data_val_alt = NULL;
3460 3460                  rargs->res_mblk = NULL;
3461 3461                  rargs->res_uiop = NULL;
3462 3462                  rargs->res_maxsize = 0;
3463 3463                  rargs->wlist = NULL;
3464 3464  
3465 3465                  if (uiop)
3466 3466                          rargs->res_uiop = uiop;
3467 3467                  else
3468 3468                          rargs->res_data_val_alt = base;
3469 3469                  rargs->res_maxsize = tsize;
3470 3470  
3471 3471                  rfs4call(mi, &args, &res, cr, &doqueue, 0, &e);
3472 3472  #ifdef  DEBUG
3473 3473                  if (nfs4read_error_inject) {
3474 3474                          res.status = nfs4read_error_inject;
3475 3475                          nfs4read_error_inject = 0;
3476 3476                  }
3477 3477  #endif
3478 3478  
3479 3479                  if (mi->mi_io_kstats) {
3480 3480                          mutex_enter(&mi->mi_lock);
3481 3481                          kstat_runq_exit(KSTAT_IO_PTR(mi->mi_io_kstats));
3482 3482                          mutex_exit(&mi->mi_lock);
3483 3483                  }
3484 3484  
3485 3485                  needrecov = nfs4_needs_recovery(&e, FALSE, mi->mi_vfsp);
3486 3486                  if (e.error != 0 && !needrecov) {
3487 3487                          nfs4_end_fop(mi, vp, NULL, OH_READ,
3488 3488                              &recov_state, needrecov);
3489 3489                          return (e.error);
3490 3490                  }
3491 3491  
3492 3492                  /*
3493 3493                   * Do proper retry for OLD and BAD stateid errors outside
3494 3494                   * of the normal recovery framework.  There are two differences
3495 3495                   * between async and sync reads.  The first is that we allow
3496 3496                   * retry on BAD_STATEID for async reads, but not sync reads.
3497 3497                   * The second is that we mark the file dead for a failed
3498 3498                   * attempt with a special stateid for sync reads, but just
3499 3499                   * return EIO for async reads.
3500 3500                   *
3501 3501                   * If a sync read receives a BAD stateid error while using a
3502 3502                   * delegation stateid, retry using the open stateid (if it
3503 3503                   * exists).  If it doesn't have an open stateid, reopen the
3504 3504                   * file first, then retry.
3505 3505                   */
3506 3506                  if (e.error == 0 && (res.status == NFS4ERR_OLD_STATEID ||
3507 3507                      res.status == NFS4ERR_BAD_STATEID) && async) {
3508 3508                          nfs4_end_fop(mi, vp, NULL, OH_READ,
3509 3509                              &recov_state, needrecov);
3510 3510                          if (sid_types.cur_sid_type == SPEC_SID) {
3511 3511                                  (void) xdr_free(xdr_COMPOUND4res_clnt,
3512 3512                                      (caddr_t)&res);
3513 3513                                  return (EIO);
3514 3514                          }
3515 3515                          nfs4_save_stateid(&rargs->stateid, &sid_types);
3516 3516                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
3517 3517                          goto recov_retry;
3518 3518                  } else if (e.error == 0 && res.status == NFS4ERR_OLD_STATEID &&
3519 3519                      !async && sid_types.cur_sid_type != SPEC_SID) {
3520 3520                          nfs4_save_stateid(&rargs->stateid, &sid_types);
3521 3521                          nfs4_end_fop(mi, vp, NULL, OH_READ,
3522 3522                              &recov_state, needrecov);
3523 3523                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
3524 3524                          goto recov_retry;
3525 3525                  } else if (e.error == 0 && res.status == NFS4ERR_BAD_STATEID &&
3526 3526                      sid_types.cur_sid_type == DEL_SID) {
3527 3527                          nfs4_save_stateid(&rargs->stateid, &sid_types);
3528 3528                          mutex_enter(&rp->r_statev4_lock);
3529 3529                          rp->r_deleg_return_pending = TRUE;
3530 3530                          mutex_exit(&rp->r_statev4_lock);
3531 3531                          if (nfs4rdwr_check_osid(vp, &e, cr)) {
3532 3532                                  nfs4_end_fop(mi, vp, NULL, OH_READ,
3533 3533                                      &recov_state, needrecov);
3534 3534                                  (void) xdr_free(xdr_COMPOUND4res_clnt,
3535 3535                                      (caddr_t)&res);
3536 3536                                  return (EIO);
3537 3537                          }
3538 3538                          nfs4_end_fop(mi, vp, NULL, OH_READ,
3539 3539                              &recov_state, needrecov);
3540 3540                          /* hold needed for nfs4delegreturn_thread */
3541 3541                          VN_HOLD(vp);
3542 3542                          nfs4delegreturn_async(rp, (NFS4_DR_PUSH|NFS4_DR_REOPEN|
3543 3543                              NFS4_DR_DISCARD), FALSE);
3544 3544                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
3545 3545                          goto recov_retry;
3546 3546                  }
3547 3547                  if (needrecov) {
3548 3548                          bool_t abort;
3549 3549  
3550 3550                          NFS4_DEBUG(nfs4_client_recov_debug, (CE_NOTE,
3551 3551                              "nfs4read: initiating recovery\n"));
3552 3552                          abort = nfs4_start_recovery(&e,
3553 3553                              mi, vp, NULL, &rargs->stateid,
3554 3554                              NULL, OP_READ, NULL, NULL, NULL);
3555 3555                          nfs4_end_fop(mi, vp, NULL, OH_READ,
3556 3556                              &recov_state, needrecov);
3557 3557                          /*
3558 3558                           * Do not retry if we got OLD_STATEID using a special
3559 3559                           * stateid.  This avoids looping with a broken server.
3560 3560                           */
3561 3561                          if (e.error == 0 && res.status == NFS4ERR_OLD_STATEID &&
3562 3562                              sid_types.cur_sid_type == SPEC_SID)
3563 3563                                  abort = TRUE;
3564 3564  
3565 3565                          if (abort == FALSE) {
3566 3566                                  /*
3567 3567                                   * Need to retry all possible stateids in
3568 3568                                   * case the recovery error wasn't stateid
3569 3569                                   * related or the stateids have become
3570 3570                                   * stale (server reboot).
3571 3571                                   */
3572 3572                                  nfs4_init_stateid_types(&sid_types);
3573 3573                                  (void) xdr_free(xdr_COMPOUND4res_clnt,
3574 3574                                      (caddr_t)&res);
3575 3575                                  goto recov_retry;
3576 3576                          }
3577 3577  
3578 3578                          if (!e.error) {
3579 3579                                  e.error = geterrno4(res.status);
3580 3580                                  (void) xdr_free(xdr_COMPOUND4res_clnt,
3581 3581                                      (caddr_t)&res);
3582 3582                          }
3583 3583                          return (e.error);
3584 3584                  }
3585 3585  
3586 3586                  if (res.status) {
3587 3587                          e.error = geterrno4(res.status);
3588 3588                          nfs4_end_fop(mi, vp, NULL, OH_READ,
3589 3589                              &recov_state, needrecov);
3590 3590                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
3591 3591                          return (e.error);
3592 3592                  }
3593 3593  
3594 3594                  data_len = res.array[1].nfs_resop4_u.opread.data_len;
3595 3595                  count -= data_len;
3596 3596                  if (base)
3597 3597                          base += data_len;
3598 3598                  offset += data_len;
3599 3599                  if (mi->mi_io_kstats) {
3600 3600                          mutex_enter(&mi->mi_lock);
3601 3601                          KSTAT_IO_PTR(mi->mi_io_kstats)->reads++;
3602 3602                          KSTAT_IO_PTR(mi->mi_io_kstats)->nread += data_len;
3603 3603                          mutex_exit(&mi->mi_lock);
3604 3604                  }
3605 3605                  lwp_stat_update(LWP_STAT_INBLK, 1);
3606 3606                  is_eof = res.array[1].nfs_resop4_u.opread.eof;
3607 3607                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
3608 3608  
3609 3609          } while (count && !is_eof);
3610 3610  
3611 3611          *residp = count;
3612 3612  
3613 3613          nfs4_end_fop(mi, vp, NULL, OH_READ, &recov_state, needrecov);
3614 3614  
3615 3615          return (e.error);
3616 3616  }
3617 3617  
3618 3618  /* ARGSUSED */
3619 3619  static int
3620 3620  nfs4_ioctl(vnode_t *vp, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp,
3621 3621      caller_context_t *ct)
3622 3622  {
3623 3623          if (nfs_zone() != VTOMI4(vp)->mi_zone)
3624 3624                  return (EIO);
3625 3625          switch (cmd) {
3626 3626                  case _FIODIRECTIO:
3627 3627                          return (nfs4_directio(vp, (int)arg, cr));
3628 3628                  default:
3629 3629                          return (ENOTTY);
3630 3630          }
3631 3631  }
3632 3632  
3633 3633  /* ARGSUSED */
3634 3634  int
3635 3635  nfs4_getattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr,
3636 3636      caller_context_t *ct)
3637 3637  {
3638 3638          int error;
3639 3639          rnode4_t *rp = VTOR4(vp);
3640 3640  
3641 3641          if (nfs_zone() != VTOMI4(vp)->mi_zone)
3642 3642                  return (EIO);
3643 3643          /*
3644 3644           * If it has been specified that the return value will
3645 3645           * just be used as a hint, and we are only being asked
3646 3646           * for size, fsid or rdevid, then return the client's
3647 3647           * notion of these values without checking to make sure
3648 3648           * that the attribute cache is up to date.
3649 3649           * The whole point is to avoid an over the wire GETATTR
3650 3650           * call.
3651 3651           */
3652 3652          if (flags & ATTR_HINT) {
3653 3653                  if (!(vap->va_mask & ~(AT_SIZE | AT_FSID | AT_RDEV))) {
3654 3654                          mutex_enter(&rp->r_statelock);
3655 3655                          if (vap->va_mask & AT_SIZE)
3656 3656                                  vap->va_size = rp->r_size;
3657 3657                          if (vap->va_mask & AT_FSID)
3658 3658                                  vap->va_fsid = rp->r_attr.va_fsid;
3659 3659                          if (vap->va_mask & AT_RDEV)
3660 3660                                  vap->va_rdev = rp->r_attr.va_rdev;
3661 3661                          mutex_exit(&rp->r_statelock);
3662 3662                          return (0);
3663 3663                  }
3664 3664          }
3665 3665  
3666 3666          /*
3667 3667           * Only need to flush pages if asking for the mtime
3668 3668           * and if there any dirty pages or any outstanding
3669 3669           * asynchronous (write) requests for this file.
3670 3670           */
3671 3671          if (vap->va_mask & AT_MTIME) {
3672 3672                  rp = VTOR4(vp);
3673 3673                  if (nfs4_has_pages(vp)) {
3674 3674                          mutex_enter(&rp->r_statev4_lock);
3675 3675                          if (rp->r_deleg_type != OPEN_DELEGATE_WRITE) {
3676 3676                                  mutex_exit(&rp->r_statev4_lock);
3677 3677                                  if (rp->r_flags & R4DIRTY ||
3678 3678                                      rp->r_awcount > 0) {
3679 3679                                          mutex_enter(&rp->r_statelock);
3680 3680                                          rp->r_gcount++;
3681 3681                                          mutex_exit(&rp->r_statelock);
3682 3682                                          error =
3683 3683                                              nfs4_putpage(vp, (u_offset_t)0,
3684 3684                                              0, 0, cr, NULL);
3685 3685                                          mutex_enter(&rp->r_statelock);
3686 3686                                          if (error && (error == ENOSPC ||
3687 3687                                              error == EDQUOT)) {
3688 3688                                                  if (!rp->r_error)
3689 3689                                                          rp->r_error = error;
3690 3690                                          }
3691 3691                                          if (--rp->r_gcount == 0)
3692 3692                                                  cv_broadcast(&rp->r_cv);
3693 3693                                          mutex_exit(&rp->r_statelock);
3694 3694                                  }
3695 3695                          } else {
3696 3696                                  mutex_exit(&rp->r_statev4_lock);
3697 3697                          }
3698 3698                  }
3699 3699          }
3700 3700          return (nfs4getattr(vp, vap, cr));
3701 3701  }
3702 3702  
3703 3703  int
3704 3704  nfs4_compare_modes(mode_t from_server, mode_t on_client)
3705 3705  {
3706 3706          /*
3707 3707           * If these are the only two bits cleared
3708 3708           * on the server then return 0 (OK) else
3709 3709           * return 1 (BAD).
3710 3710           */
3711 3711          on_client &= ~(S_ISUID|S_ISGID);
3712 3712          if (on_client == from_server)
3713 3713                  return (0);
3714 3714          else
3715 3715                  return (1);
3716 3716  }
3717 3717  
3718 3718  /*ARGSUSED4*/
3719 3719  static int
3720 3720  nfs4_setattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr,
3721 3721      caller_context_t *ct)
3722 3722  {
3723 3723          int error;
3724 3724  
3725 3725          if (vap->va_mask & AT_NOSET)
3726 3726                  return (EINVAL);
3727 3727  
3728 3728          if (nfs_zone() != VTOMI4(vp)->mi_zone)
3729 3729                  return (EIO);
3730 3730  
3731 3731          /*
  
    | 
      ↓ open down ↓ | 
    3684 lines elided | 
    
      ↑ open up ↑ | 
  
3732 3732           * Don't call secpolicy_vnode_setattr, the client cannot
3733 3733           * use its cached attributes to make security decisions
3734 3734           * as the server may be faking mode bits or mapping uid/gid.
3735 3735           * Always just let the server to the checking.
3736 3736           * If we provide the ability to remove basic priviledges
3737 3737           * to setattr (e.g. basic without chmod) then we will
3738 3738           * need to add a check here before calling the server.
3739 3739           */
3740 3740          error = nfs4setattr(vp, vap, flags, cr, NULL);
3741 3741  
3742      -        if (error == 0 && (vap->va_mask & AT_SIZE) && vap->va_size == 0)
3743      -                vnevent_truncate(vp, ct);
     3742 +        if (error == 0 && (vap->va_mask & AT_SIZE)) {
     3743 +                if (vap->va_size == 0) {
     3744 +                        vnevent_truncate(vp, ct);
     3745 +                } else {
     3746 +                        vnevent_resize(vp, ct);
     3747 +                }
     3748 +        }
3744 3749  
3745 3750          return (error);
3746 3751  }
3747 3752  
3748 3753  /*
3749 3754   * To replace the "guarded" version 3 setattr, we use two types of compound
3750 3755   * setattr requests:
3751 3756   * 1. The "normal" setattr, used when the size of the file isn't being
3752 3757   *    changed - { Putfh <fh>; Setattr; Getattr }/
3753 3758   * 2. If the size is changed, precede Setattr with: Getattr; Verify
3754 3759   *    with only ctime as the argument. If the server ctime differs from
3755 3760   *    what is cached on the client, the verify will fail, but we would
3756 3761   *    already have the ctime from the preceding getattr, so just set it
3757 3762   *    and retry. Thus the compound here is - { Putfh <fh>; Getattr; Verify;
3758 3763   *      Setattr; Getattr }.
3759 3764   *
3760 3765   * The vsecattr_t * input parameter will be non-NULL if ACLs are being set in
3761 3766   * this setattr and NULL if they are not.
3762 3767   */
3763 3768  static int
3764 3769  nfs4setattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr,
3765 3770      vsecattr_t *vsap)
3766 3771  {
3767 3772          COMPOUND4args_clnt args;
3768 3773          COMPOUND4res_clnt res, *resp = NULL;
3769 3774          nfs4_ga_res_t *garp = NULL;
3770 3775          int numops = 3;                 /* { Putfh; Setattr; Getattr } */
3771 3776          nfs_argop4 argop[5];
3772 3777          int verify_argop = -1;
3773 3778          int setattr_argop = 1;
3774 3779          nfs_resop4 *resop;
3775 3780          vattr_t va;
3776 3781          rnode4_t *rp;
3777 3782          int doqueue = 1;
3778 3783          uint_t mask = vap->va_mask;
3779 3784          mode_t omode;
3780 3785          vsecattr_t *vsp;
3781 3786          timestruc_t ctime;
3782 3787          bool_t needrecov = FALSE;
3783 3788          nfs4_recov_state_t recov_state;
3784 3789          nfs4_stateid_types_t sid_types;
3785 3790          stateid4 stateid;
3786 3791          hrtime_t t;
3787 3792          nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS };
3788 3793          servinfo4_t *svp;
3789 3794          bitmap4 supp_attrs;
3790 3795  
3791 3796          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
3792 3797          rp = VTOR4(vp);
3793 3798          nfs4_init_stateid_types(&sid_types);
3794 3799  
3795 3800          /*
3796 3801           * Only need to flush pages if there are any pages and
3797 3802           * if the file is marked as dirty in some fashion.  The
3798 3803           * file must be flushed so that we can accurately
3799 3804           * determine the size of the file and the cached data
3800 3805           * after the SETATTR returns.  A file is considered to
3801 3806           * be dirty if it is either marked with R4DIRTY, has
3802 3807           * outstanding i/o's active, or is mmap'd.  In this
3803 3808           * last case, we can't tell whether there are dirty
3804 3809           * pages, so we flush just to be sure.
3805 3810           */
3806 3811          if (nfs4_has_pages(vp) &&
3807 3812              ((rp->r_flags & R4DIRTY) ||
3808 3813              rp->r_count > 0 ||
3809 3814              rp->r_mapcnt > 0)) {
3810 3815                  ASSERT(vp->v_type != VCHR);
3811 3816                  e.error = nfs4_putpage(vp, (offset_t)0, 0, 0, cr, NULL);
3812 3817                  if (e.error && (e.error == ENOSPC || e.error == EDQUOT)) {
3813 3818                          mutex_enter(&rp->r_statelock);
3814 3819                          if (!rp->r_error)
3815 3820                                  rp->r_error = e.error;
3816 3821                          mutex_exit(&rp->r_statelock);
3817 3822                  }
3818 3823          }
3819 3824  
3820 3825          if (mask & AT_SIZE) {
3821 3826                  /*
3822 3827                   * Verification setattr compound for non-deleg AT_SIZE:
3823 3828                   *      { Putfh; Getattr; Verify; Setattr; Getattr }
3824 3829                   * Set ctime local here (outside the do_again label)
3825 3830                   * so that subsequent retries (after failed VERIFY)
3826 3831                   * will use ctime from GETATTR results (from failed
3827 3832                   * verify compound) as VERIFY arg.
3828 3833                   * If file has delegation, then VERIFY(time_metadata)
3829 3834                   * is of little added value, so don't bother.
3830 3835                   */
3831 3836                  mutex_enter(&rp->r_statev4_lock);
3832 3837                  if (rp->r_deleg_type == OPEN_DELEGATE_NONE ||
3833 3838                      rp->r_deleg_return_pending) {
3834 3839                          numops = 5;
3835 3840                          ctime = rp->r_attr.va_ctime;
3836 3841                  }
3837 3842                  mutex_exit(&rp->r_statev4_lock);
3838 3843          }
3839 3844  
3840 3845          recov_state.rs_flags = 0;
3841 3846          recov_state.rs_num_retry_despite_err = 0;
3842 3847  
3843 3848          args.ctag = TAG_SETATTR;
3844 3849  do_again:
3845 3850  recov_retry:
3846 3851          setattr_argop = numops - 2;
3847 3852  
3848 3853          args.array = argop;
3849 3854          args.array_len = numops;
3850 3855  
3851 3856          e.error = nfs4_start_op(VTOMI4(vp), vp, NULL, &recov_state);
3852 3857          if (e.error)
3853 3858                  return (e.error);
3854 3859  
3855 3860  
3856 3861          /* putfh target fh */
3857 3862          argop[0].argop = OP_CPUTFH;
3858 3863          argop[0].nfs_argop4_u.opcputfh.sfh = rp->r_fh;
3859 3864  
3860 3865          if (numops == 5) {
3861 3866                  /*
3862 3867                   * We only care about the ctime, but need to get mtime
3863 3868                   * and size for proper cache update.
3864 3869                   */
3865 3870                  /* getattr */
3866 3871                  argop[1].argop = OP_GETATTR;
3867 3872                  argop[1].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
3868 3873                  argop[1].nfs_argop4_u.opgetattr.mi = VTOMI4(vp);
3869 3874  
3870 3875                  /* verify - set later in loop */
3871 3876                  verify_argop = 2;
3872 3877          }
3873 3878  
3874 3879          /* setattr */
3875 3880          svp = rp->r_server;
3876 3881          (void) nfs_rw_enter_sig(&svp->sv_lock, RW_READER, 0);
3877 3882          supp_attrs = svp->sv_supp_attrs;
3878 3883          nfs_rw_exit(&svp->sv_lock);
3879 3884  
3880 3885          nfs4args_setattr(&argop[setattr_argop], vap, vsap, flags, rp, cr,
3881 3886              supp_attrs, &e.error, &sid_types);
3882 3887          stateid = argop[setattr_argop].nfs_argop4_u.opsetattr.stateid;
3883 3888          if (e.error) {
3884 3889                  /* req time field(s) overflow - return immediately */
3885 3890                  nfs4_end_op(VTOMI4(vp), vp, NULL, &recov_state, needrecov);
3886 3891                  nfs4_fattr4_free(&argop[setattr_argop].nfs_argop4_u.
3887 3892                      opsetattr.obj_attributes);
3888 3893                  return (e.error);
3889 3894          }
3890 3895          omode = rp->r_attr.va_mode;
3891 3896  
3892 3897          /* getattr */
3893 3898          argop[numops-1].argop = OP_GETATTR;
3894 3899          argop[numops-1].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
3895 3900          /*
3896 3901           * If we are setting the ACL (indicated only by vsap != NULL), request
3897 3902           * the ACL in this getattr.  The ACL returned from this getattr will be
3898 3903           * used in updating the ACL cache.
3899 3904           */
3900 3905          if (vsap != NULL)
3901 3906                  argop[numops-1].nfs_argop4_u.opgetattr.attr_request |=
3902 3907                      FATTR4_ACL_MASK;
3903 3908          argop[numops-1].nfs_argop4_u.opgetattr.mi = VTOMI4(vp);
3904 3909  
3905 3910          /*
3906 3911           * setattr iterates if the object size is set and the cached ctime
3907 3912           * does not match the file ctime. In that case, verify the ctime first.
3908 3913           */
3909 3914  
3910 3915          do {
3911 3916                  if (verify_argop != -1) {
3912 3917                          /*
3913 3918                           * Verify that the ctime match before doing setattr.
3914 3919                           */
3915 3920                          va.va_mask = AT_CTIME;
3916 3921                          va.va_ctime = ctime;
3917 3922                          svp = rp->r_server;
3918 3923                          (void) nfs_rw_enter_sig(&svp->sv_lock, RW_READER, 0);
3919 3924                          supp_attrs = svp->sv_supp_attrs;
3920 3925                          nfs_rw_exit(&svp->sv_lock);
3921 3926                          e.error = nfs4args_verify(&argop[verify_argop], &va,
3922 3927                              OP_VERIFY, supp_attrs);
3923 3928                          if (e.error) {
3924 3929                                  /* req time field(s) overflow - return */
3925 3930                                  nfs4_end_op(VTOMI4(vp), vp, NULL, &recov_state,
3926 3931                                      needrecov);
3927 3932                                  break;
3928 3933                          }
3929 3934                  }
3930 3935  
3931 3936                  doqueue = 1;
3932 3937  
3933 3938                  t = gethrtime();
3934 3939  
3935 3940                  rfs4call(VTOMI4(vp), &args, &res, cr, &doqueue, 0, &e);
3936 3941  
3937 3942                  /*
3938 3943                   * Purge the access cache and ACL cache if changing either the
3939 3944                   * owner of the file, the group owner, or the mode.  These may
3940 3945                   * change the access permissions of the file, so purge old
3941 3946                   * information and start over again.
3942 3947                   */
3943 3948                  if (mask & (AT_UID | AT_GID | AT_MODE)) {
3944 3949                          (void) nfs4_access_purge_rp(rp);
3945 3950                          if (rp->r_secattr != NULL) {
3946 3951                                  mutex_enter(&rp->r_statelock);
3947 3952                                  vsp = rp->r_secattr;
3948 3953                                  rp->r_secattr = NULL;
3949 3954                                  mutex_exit(&rp->r_statelock);
3950 3955                                  if (vsp != NULL)
3951 3956                                          nfs4_acl_free_cache(vsp);
3952 3957                          }
3953 3958                  }
3954 3959  
3955 3960                  /*
3956 3961                   * If res.array_len == numops, then everything succeeded,
3957 3962                   * except for possibly the final getattr.  If only the
3958 3963                   * last getattr failed, give up, and don't try recovery.
3959 3964                   */
3960 3965                  if (res.array_len == numops) {
3961 3966                          nfs4_end_op(VTOMI4(vp), vp, NULL, &recov_state,
3962 3967                              needrecov);
3963 3968                          if (! e.error)
3964 3969                                  resp = &res;
3965 3970                          break;
3966 3971                  }
3967 3972  
3968 3973                  /*
3969 3974                   * if either rpc call failed or completely succeeded - done
3970 3975                   */
3971 3976                  needrecov = nfs4_needs_recovery(&e, FALSE, vp->v_vfsp);
3972 3977                  if (e.error) {
3973 3978                          PURGE_ATTRCACHE4(vp);
3974 3979                          if (!needrecov) {
3975 3980                                  nfs4_end_op(VTOMI4(vp), vp, NULL, &recov_state,
3976 3981                                      needrecov);
3977 3982                                  break;
3978 3983                          }
3979 3984                  }
3980 3985  
3981 3986                  /*
3982 3987                   * Do proper retry for OLD_STATEID outside of the normal
3983 3988                   * recovery framework.
3984 3989                   */
3985 3990                  if (e.error == 0 && res.status == NFS4ERR_OLD_STATEID &&
3986 3991                      sid_types.cur_sid_type != SPEC_SID &&
3987 3992                      sid_types.cur_sid_type != NO_SID) {
3988 3993                          nfs4_end_op(VTOMI4(vp), vp, NULL, &recov_state,
3989 3994                              needrecov);
3990 3995                          nfs4_save_stateid(&stateid, &sid_types);
3991 3996                          nfs4_fattr4_free(&argop[setattr_argop].nfs_argop4_u.
3992 3997                              opsetattr.obj_attributes);
3993 3998                          if (verify_argop != -1) {
3994 3999                                  nfs4args_verify_free(&argop[verify_argop]);
3995 4000                                  verify_argop = -1;
3996 4001                          }
3997 4002                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
3998 4003                          goto recov_retry;
3999 4004                  }
4000 4005  
4001 4006                  if (needrecov) {
4002 4007                          bool_t abort;
4003 4008  
4004 4009                          abort = nfs4_start_recovery(&e,
4005 4010                              VTOMI4(vp), vp, NULL, NULL, NULL,
4006 4011                              OP_SETATTR, NULL, NULL, NULL);
4007 4012                          nfs4_end_op(VTOMI4(vp), vp, NULL, &recov_state,
4008 4013                              needrecov);
4009 4014                          /*
4010 4015                           * Do not retry if we failed with OLD_STATEID using
4011 4016                           * a special stateid.  This is done to avoid looping
4012 4017                           * with a broken server.
4013 4018                           */
4014 4019                          if (e.error == 0 && res.status == NFS4ERR_OLD_STATEID &&
4015 4020                              (sid_types.cur_sid_type == SPEC_SID ||
4016 4021                              sid_types.cur_sid_type == NO_SID))
4017 4022                                  abort = TRUE;
4018 4023                          if (!e.error) {
4019 4024                                  if (res.status == NFS4ERR_BADOWNER)
4020 4025                                          nfs4_log_badowner(VTOMI4(vp),
4021 4026                                              OP_SETATTR);
4022 4027  
4023 4028                                  e.error = geterrno4(res.status);
4024 4029                                  (void) xdr_free(xdr_COMPOUND4res_clnt,
4025 4030                                      (caddr_t)&res);
4026 4031                          }
4027 4032                          nfs4_fattr4_free(&argop[setattr_argop].nfs_argop4_u.
4028 4033                              opsetattr.obj_attributes);
4029 4034                          if (verify_argop != -1) {
4030 4035                                  nfs4args_verify_free(&argop[verify_argop]);
4031 4036                                  verify_argop = -1;
4032 4037                          }
4033 4038                          if (abort == FALSE) {
4034 4039                                  /*
4035 4040                                   * Need to retry all possible stateids in
4036 4041                                   * case the recovery error wasn't stateid
4037 4042                                   * related or the stateids have become
4038 4043                                   * stale (server reboot).
4039 4044                                   */
4040 4045                                  nfs4_init_stateid_types(&sid_types);
4041 4046                                  goto recov_retry;
4042 4047                          }
4043 4048                          return (e.error);
4044 4049                  }
4045 4050  
4046 4051                  /*
4047 4052                   * Need to call nfs4_end_op before nfs4getattr to
4048 4053                   * avoid potential nfs4_start_op deadlock. See RFE
4049 4054                   * 4777612.  Calls to nfs4_invalidate_pages() and
4050 4055                   * nfs4_purge_stale_fh() might also generate over the
4051 4056                   * wire calls which my cause nfs4_start_op() deadlock.
4052 4057                   */
4053 4058                  nfs4_end_op(VTOMI4(vp), vp, NULL, &recov_state, needrecov);
4054 4059  
4055 4060                  /*
4056 4061                   * Check to update lease.
4057 4062                   */
4058 4063                  resp = &res;
4059 4064                  if (res.status == NFS4_OK) {
4060 4065                          break;
4061 4066                  }
4062 4067  
4063 4068                  /*
4064 4069                   * Check if verify failed to see if try again
4065 4070                   */
4066 4071                  if ((verify_argop == -1) || (res.array_len != 3)) {
4067 4072                          /*
4068 4073                           * can't continue...
4069 4074                           */
4070 4075                          if (res.status == NFS4ERR_BADOWNER)
4071 4076                                  nfs4_log_badowner(VTOMI4(vp), OP_SETATTR);
4072 4077  
4073 4078                          e.error = geterrno4(res.status);
4074 4079                  } else {
4075 4080                          /*
4076 4081                           * When the verify request fails, the client ctime is
4077 4082                           * not in sync with the server. This is the same as
4078 4083                           * the version 3 "not synchronized" error, and we
4079 4084                           * handle it in a similar manner (XXX do we need to???).
4080 4085                           * Use the ctime returned in the first getattr for
4081 4086                           * the input to the next verify.
4082 4087                           * If we couldn't get the attributes, then we give up
4083 4088                           * because we can't complete the operation as required.
4084 4089                           */
4085 4090                          garp = &res.array[1].nfs_resop4_u.opgetattr.ga_res;
4086 4091                  }
4087 4092                  if (e.error) {
4088 4093                          PURGE_ATTRCACHE4(vp);
4089 4094                          nfs4_purge_stale_fh(e.error, vp, cr);
4090 4095                  } else {
4091 4096                          /*
4092 4097                           * retry with a new verify value
4093 4098                           */
4094 4099                          ctime = garp->n4g_va.va_ctime;
4095 4100                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
4096 4101                          resp = NULL;
4097 4102                  }
4098 4103                  if (!e.error) {
4099 4104                          nfs4_fattr4_free(&argop[setattr_argop].nfs_argop4_u.
4100 4105                              opsetattr.obj_attributes);
4101 4106                          if (verify_argop != -1) {
4102 4107                                  nfs4args_verify_free(&argop[verify_argop]);
4103 4108                                  verify_argop = -1;
4104 4109                          }
4105 4110                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
4106 4111                          goto do_again;
4107 4112                  }
4108 4113          } while (!e.error);
4109 4114  
4110 4115          if (e.error) {
4111 4116                  /*
4112 4117                   * If we are here, rfs4call has an irrecoverable error - return
4113 4118                   */
4114 4119                  nfs4_fattr4_free(&argop[setattr_argop].nfs_argop4_u.
4115 4120                      opsetattr.obj_attributes);
4116 4121                  if (verify_argop != -1) {
4117 4122                          nfs4args_verify_free(&argop[verify_argop]);
4118 4123                          verify_argop = -1;
4119 4124                  }
4120 4125                  if (resp)
4121 4126                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)resp);
4122 4127                  return (e.error);
4123 4128          }
4124 4129  
4125 4130  
4126 4131  
4127 4132          /*
4128 4133           * If changing the size of the file, invalidate
4129 4134           * any local cached data which is no longer part
4130 4135           * of the file.  We also possibly invalidate the
4131 4136           * last page in the file.  We could use
4132 4137           * pvn_vpzero(), but this would mark the page as
4133 4138           * modified and require it to be written back to
4134 4139           * the server for no particularly good reason.
4135 4140           * This way, if we access it, then we bring it
4136 4141           * back in.  A read should be cheaper than a
4137 4142           * write.
4138 4143           */
4139 4144          if (mask & AT_SIZE) {
4140 4145                  nfs4_invalidate_pages(vp, (vap->va_size & PAGEMASK), cr);
4141 4146          }
4142 4147  
4143 4148          /* either no error or one of the postop getattr failed */
4144 4149  
4145 4150          /*
4146 4151           * XXX Perform a simplified version of wcc checking. Instead of
4147 4152           * have another getattr to get pre-op, just purge cache if
4148 4153           * any of the ops prior to and including the getattr failed.
4149 4154           * If the getattr succeeded then update the attrcache accordingly.
4150 4155           */
4151 4156  
4152 4157          garp = NULL;
4153 4158          if (res.status == NFS4_OK) {
4154 4159                  /*
4155 4160                   * Last getattr
4156 4161                   */
4157 4162                  resop = &res.array[numops - 1];
4158 4163                  garp = &resop->nfs_resop4_u.opgetattr.ga_res;
4159 4164          }
4160 4165          /*
4161 4166           * In certain cases, nfs4_update_attrcache() will purge the attrcache,
4162 4167           * rather than filling it.  See the function itself for details.
4163 4168           */
4164 4169          e.error = nfs4_update_attrcache(res.status, garp, t, vp, cr);
4165 4170          if (garp != NULL) {
4166 4171                  if (garp->n4g_resbmap & FATTR4_ACL_MASK) {
4167 4172                          nfs4_acl_fill_cache(rp, &garp->n4g_vsa);
4168 4173                          vs_ace4_destroy(&garp->n4g_vsa);
4169 4174                  } else {
4170 4175                          if (vsap != NULL) {
4171 4176                                  /*
4172 4177                                   * The ACL was supposed to be set and to be
4173 4178                                   * returned in the last getattr of this
4174 4179                                   * compound, but for some reason the getattr
4175 4180                                   * result doesn't contain the ACL.  In this
4176 4181                                   * case, purge the ACL cache.
4177 4182                                   */
4178 4183                                  if (rp->r_secattr != NULL) {
4179 4184                                          mutex_enter(&rp->r_statelock);
4180 4185                                          vsp = rp->r_secattr;
4181 4186                                          rp->r_secattr = NULL;
4182 4187                                          mutex_exit(&rp->r_statelock);
4183 4188                                          if (vsp != NULL)
4184 4189                                                  nfs4_acl_free_cache(vsp);
4185 4190                                  }
4186 4191                          }
4187 4192                  }
4188 4193          }
4189 4194  
4190 4195          if (res.status == NFS4_OK && (mask & AT_SIZE)) {
4191 4196                  /*
4192 4197                   * Set the size, rather than relying on getting it updated
4193 4198                   * via a GETATTR.  With delegations the client tries to
4194 4199                   * suppress GETATTR calls.
4195 4200                   */
4196 4201                  mutex_enter(&rp->r_statelock);
4197 4202                  rp->r_size = vap->va_size;
4198 4203                  mutex_exit(&rp->r_statelock);
4199 4204          }
4200 4205  
4201 4206          /*
4202 4207           * Can free up request args and res
4203 4208           */
4204 4209          nfs4_fattr4_free(&argop[setattr_argop].nfs_argop4_u.
4205 4210              opsetattr.obj_attributes);
4206 4211          if (verify_argop != -1) {
4207 4212                  nfs4args_verify_free(&argop[verify_argop]);
4208 4213                  verify_argop = -1;
4209 4214          }
4210 4215          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
4211 4216  
4212 4217          /*
4213 4218           * Some servers will change the mode to clear the setuid
4214 4219           * and setgid bits when changing the uid or gid.  The
4215 4220           * client needs to compensate appropriately.
4216 4221           */
4217 4222          if (mask & (AT_UID | AT_GID)) {
4218 4223                  int terror, do_setattr;
4219 4224  
4220 4225                  do_setattr = 0;
4221 4226                  va.va_mask = AT_MODE;
4222 4227                  terror = nfs4getattr(vp, &va, cr);
4223 4228                  if (!terror &&
4224 4229                      (((mask & AT_MODE) && va.va_mode != vap->va_mode) ||
4225 4230                      (!(mask & AT_MODE) && va.va_mode != omode))) {
4226 4231                          va.va_mask = AT_MODE;
4227 4232                          if (mask & AT_MODE) {
4228 4233                                  /*
4229 4234                                   * We asked the mode to be changed and what
4230 4235                                   * we just got from the server in getattr is
4231 4236                                   * not what we wanted it to be, so set it now.
4232 4237                                   */
4233 4238                                  va.va_mode = vap->va_mode;
4234 4239                                  do_setattr = 1;
4235 4240                          } else {
4236 4241                                  /*
4237 4242                                   * We did not ask the mode to be changed,
4238 4243                                   * Check to see that the server just cleared
4239 4244                                   * I_SUID and I_GUID from it. If not then
4240 4245                                   * set mode to omode with UID/GID cleared.
4241 4246                                   */
4242 4247                                  if (nfs4_compare_modes(va.va_mode, omode)) {
4243 4248                                          omode &= ~(S_ISUID|S_ISGID);
4244 4249                                          va.va_mode = omode;
4245 4250                                          do_setattr = 1;
4246 4251                                  }
4247 4252                          }
4248 4253  
4249 4254                          if (do_setattr)
4250 4255                                  (void) nfs4setattr(vp, &va, 0, cr, NULL);
4251 4256                  }
4252 4257          }
4253 4258  
4254 4259          return (e.error);
4255 4260  }
4256 4261  
4257 4262  /* ARGSUSED */
4258 4263  static int
4259 4264  nfs4_access(vnode_t *vp, int mode, int flags, cred_t *cr, caller_context_t *ct)
4260 4265  {
4261 4266          COMPOUND4args_clnt args;
4262 4267          COMPOUND4res_clnt res;
4263 4268          int doqueue;
4264 4269          uint32_t acc, resacc, argacc;
4265 4270          rnode4_t *rp;
4266 4271          cred_t *cred, *ncr, *ncrfree = NULL;
4267 4272          nfs4_access_type_t cacc;
4268 4273          int num_ops;
4269 4274          nfs_argop4 argop[3];
4270 4275          nfs_resop4 *resop;
4271 4276          bool_t needrecov = FALSE, do_getattr;
4272 4277          nfs4_recov_state_t recov_state;
4273 4278          int rpc_error;
4274 4279          hrtime_t t;
4275 4280          nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS };
4276 4281          mntinfo4_t *mi = VTOMI4(vp);
4277 4282  
4278 4283          if (nfs_zone() != mi->mi_zone)
4279 4284                  return (EIO);
4280 4285  
4281 4286          acc = 0;
4282 4287          if (mode & VREAD)
4283 4288                  acc |= ACCESS4_READ;
4284 4289          if (mode & VWRITE) {
4285 4290                  if ((vp->v_vfsp->vfs_flag & VFS_RDONLY) && !ISVDEV(vp->v_type))
4286 4291                          return (EROFS);
4287 4292                  if (vp->v_type == VDIR)
4288 4293                          acc |= ACCESS4_DELETE;
4289 4294                  acc |= ACCESS4_MODIFY | ACCESS4_EXTEND;
4290 4295          }
4291 4296          if (mode & VEXEC) {
4292 4297                  if (vp->v_type == VDIR)
4293 4298                          acc |= ACCESS4_LOOKUP;
4294 4299                  else
4295 4300                          acc |= ACCESS4_EXECUTE;
4296 4301          }
4297 4302  
4298 4303          if (VTOR4(vp)->r_acache != NULL) {
4299 4304                  e.error = nfs4_validate_caches(vp, cr);
4300 4305                  if (e.error)
4301 4306                          return (e.error);
4302 4307          }
4303 4308  
4304 4309          rp = VTOR4(vp);
4305 4310          if (vp->v_type == VDIR)
4306 4311                  argacc = ACCESS4_READ | ACCESS4_DELETE | ACCESS4_MODIFY |
4307 4312                      ACCESS4_EXTEND | ACCESS4_LOOKUP;
4308 4313          else
4309 4314                  argacc = ACCESS4_READ | ACCESS4_MODIFY | ACCESS4_EXTEND |
4310 4315                      ACCESS4_EXECUTE;
4311 4316          recov_state.rs_flags = 0;
4312 4317          recov_state.rs_num_retry_despite_err = 0;
4313 4318  
4314 4319          cred = cr;
4315 4320          /*
4316 4321           * ncr and ncrfree both initially
4317 4322           * point to the memory area returned
4318 4323           * by crnetadjust();
4319 4324           * ncrfree not NULL when exiting means
4320 4325           * that we need to release it
4321 4326           */
4322 4327          ncr = crnetadjust(cred);
4323 4328          ncrfree = ncr;
4324 4329  
4325 4330  tryagain:
4326 4331          cacc = nfs4_access_check(rp, acc, cred);
4327 4332          if (cacc == NFS4_ACCESS_ALLOWED) {
4328 4333                  if (ncrfree != NULL)
4329 4334                          crfree(ncrfree);
4330 4335                  return (0);
4331 4336          }
4332 4337          if (cacc == NFS4_ACCESS_DENIED) {
4333 4338                  /*
4334 4339                   * If the cred can be adjusted, try again
4335 4340                   * with the new cred.
4336 4341                   */
4337 4342                  if (ncr != NULL) {
4338 4343                          cred = ncr;
4339 4344                          ncr = NULL;
4340 4345                          goto tryagain;
4341 4346                  }
4342 4347                  if (ncrfree != NULL)
4343 4348                          crfree(ncrfree);
4344 4349                  return (EACCES);
4345 4350          }
4346 4351  
4347 4352  recov_retry:
4348 4353          /*
4349 4354           * Don't take with r_statev4_lock here. r_deleg_type could
4350 4355           * change as soon as lock is released.  Since it is an int,
4351 4356           * there is no atomicity issue.
4352 4357           */
4353 4358          do_getattr = (rp->r_deleg_type == OPEN_DELEGATE_NONE);
4354 4359          num_ops = do_getattr ? 3 : 2;
4355 4360  
4356 4361          args.ctag = TAG_ACCESS;
4357 4362  
4358 4363          args.array_len = num_ops;
4359 4364          args.array = argop;
4360 4365  
4361 4366          if (e.error = nfs4_start_fop(mi, vp, NULL, OH_ACCESS,
4362 4367              &recov_state, NULL)) {
4363 4368                  if (ncrfree != NULL)
4364 4369                          crfree(ncrfree);
4365 4370                  return (e.error);
4366 4371          }
4367 4372  
4368 4373          /* putfh target fh */
4369 4374          argop[0].argop = OP_CPUTFH;
4370 4375          argop[0].nfs_argop4_u.opcputfh.sfh = VTOR4(vp)->r_fh;
4371 4376  
4372 4377          /* access */
4373 4378          argop[1].argop = OP_ACCESS;
4374 4379          argop[1].nfs_argop4_u.opaccess.access = argacc;
4375 4380  
4376 4381          /* getattr */
4377 4382          if (do_getattr) {
4378 4383                  argop[2].argop = OP_GETATTR;
4379 4384                  argop[2].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
4380 4385                  argop[2].nfs_argop4_u.opgetattr.mi = mi;
4381 4386          }
4382 4387  
4383 4388          NFS4_DEBUG(nfs4_client_call_debug, (CE_NOTE,
4384 4389              "nfs4_access: %s call, rp %s", needrecov ? "recov" : "first",
4385 4390              rnode4info(VTOR4(vp))));
4386 4391  
4387 4392          doqueue = 1;
4388 4393          t = gethrtime();
4389 4394          rfs4call(VTOMI4(vp), &args, &res, cred, &doqueue, 0, &e);
4390 4395          rpc_error = e.error;
4391 4396  
4392 4397          needrecov = nfs4_needs_recovery(&e, FALSE, vp->v_vfsp);
4393 4398          if (needrecov) {
4394 4399                  NFS4_DEBUG(nfs4_client_recov_debug, (CE_NOTE,
4395 4400                      "nfs4_access: initiating recovery\n"));
4396 4401  
4397 4402                  if (nfs4_start_recovery(&e, VTOMI4(vp), vp, NULL, NULL,
4398 4403                      NULL, OP_ACCESS, NULL, NULL, NULL) == FALSE) {
4399 4404                          nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_ACCESS,
4400 4405                              &recov_state, needrecov);
4401 4406                          if (!e.error)
4402 4407                                  (void) xdr_free(xdr_COMPOUND4res_clnt,
4403 4408                                      (caddr_t)&res);
4404 4409                          goto recov_retry;
4405 4410                  }
4406 4411          }
4407 4412          nfs4_end_fop(mi, vp, NULL, OH_ACCESS, &recov_state, needrecov);
4408 4413  
4409 4414          if (e.error)
4410 4415                  goto out;
4411 4416  
4412 4417          if (res.status) {
4413 4418                  e.error = geterrno4(res.status);
4414 4419                  /*
4415 4420                   * This might generate over the wire calls throught
4416 4421                   * nfs4_invalidate_pages. Hence we need to call nfs4_end_op()
4417 4422                   * here to avoid a deadlock.
4418 4423                   */
4419 4424                  nfs4_purge_stale_fh(e.error, vp, cr);
4420 4425                  goto out;
4421 4426          }
4422 4427          resop = &res.array[1];  /* access res */
4423 4428  
4424 4429          resacc = resop->nfs_resop4_u.opaccess.access;
4425 4430  
4426 4431          if (do_getattr) {
4427 4432                  resop++;        /* getattr res */
4428 4433                  nfs4_attr_cache(vp, &resop->nfs_resop4_u.opgetattr.ga_res,
4429 4434                      t, cr, FALSE, NULL);
4430 4435          }
4431 4436  
4432 4437          if (!e.error) {
4433 4438                  nfs4_access_cache(rp, argacc, resacc, cred);
4434 4439                  /*
4435 4440                   * we just cached results with cred; if cred is the
4436 4441                   * adjusted credentials from crnetadjust, we do not want
4437 4442                   * to release them before exiting: hence setting ncrfree
4438 4443                   * to NULL
4439 4444                   */
4440 4445                  if (cred != cr)
4441 4446                          ncrfree = NULL;
4442 4447                  /* XXX check the supported bits too? */
4443 4448                  if ((acc & resacc) != acc) {
4444 4449                          /*
4445 4450                           * The following code implements the semantic
4446 4451                           * that a setuid root program has *at least* the
4447 4452                           * permissions of the user that is running the
4448 4453                           * program.  See rfs3call() for more portions
4449 4454                           * of the implementation of this functionality.
4450 4455                           */
4451 4456                          /* XXX-LP */
4452 4457                          if (ncr != NULL) {
4453 4458                                  (void) xdr_free(xdr_COMPOUND4res_clnt,
4454 4459                                      (caddr_t)&res);
4455 4460                                  cred = ncr;
4456 4461                                  ncr = NULL;
4457 4462                                  goto tryagain;
4458 4463                          }
4459 4464                          e.error = EACCES;
4460 4465                  }
4461 4466          }
4462 4467  
4463 4468  out:
4464 4469          if (!rpc_error)
4465 4470                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
4466 4471  
4467 4472          if (ncrfree != NULL)
4468 4473                  crfree(ncrfree);
4469 4474  
4470 4475          return (e.error);
4471 4476  }
4472 4477  
4473 4478  /* ARGSUSED */
4474 4479  static int
4475 4480  nfs4_readlink(vnode_t *vp, struct uio *uiop, cred_t *cr, caller_context_t *ct)
4476 4481  {
4477 4482          COMPOUND4args_clnt args;
4478 4483          COMPOUND4res_clnt res;
4479 4484          int doqueue;
4480 4485          rnode4_t *rp;
4481 4486          nfs_argop4 argop[3];
4482 4487          nfs_resop4 *resop;
4483 4488          READLINK4res *lr_res;
4484 4489          nfs4_ga_res_t *garp;
4485 4490          uint_t len;
4486 4491          char *linkdata;
4487 4492          bool_t needrecov = FALSE;
4488 4493          nfs4_recov_state_t recov_state;
4489 4494          hrtime_t t;
4490 4495          nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS };
4491 4496  
4492 4497          if (nfs_zone() != VTOMI4(vp)->mi_zone)
4493 4498                  return (EIO);
4494 4499          /*
4495 4500           * Can't readlink anything other than a symbolic link.
4496 4501           */
4497 4502          if (vp->v_type != VLNK)
4498 4503                  return (EINVAL);
4499 4504  
4500 4505          rp = VTOR4(vp);
4501 4506          if (nfs4_do_symlink_cache && rp->r_symlink.contents != NULL) {
4502 4507                  e.error = nfs4_validate_caches(vp, cr);
4503 4508                  if (e.error)
4504 4509                          return (e.error);
4505 4510                  mutex_enter(&rp->r_statelock);
4506 4511                  if (rp->r_symlink.contents != NULL) {
4507 4512                          e.error = uiomove(rp->r_symlink.contents,
4508 4513                              rp->r_symlink.len, UIO_READ, uiop);
4509 4514                          mutex_exit(&rp->r_statelock);
4510 4515                          return (e.error);
4511 4516                  }
4512 4517                  mutex_exit(&rp->r_statelock);
4513 4518          }
4514 4519          recov_state.rs_flags = 0;
4515 4520          recov_state.rs_num_retry_despite_err = 0;
4516 4521  
4517 4522  recov_retry:
4518 4523          args.array_len = 3;
4519 4524          args.array = argop;
4520 4525          args.ctag = TAG_READLINK;
4521 4526  
4522 4527          e.error = nfs4_start_op(VTOMI4(vp), vp, NULL, &recov_state);
4523 4528          if (e.error) {
4524 4529                  return (e.error);
4525 4530          }
4526 4531  
4527 4532          /* 0. putfh symlink fh */
4528 4533          argop[0].argop = OP_CPUTFH;
4529 4534          argop[0].nfs_argop4_u.opcputfh.sfh = VTOR4(vp)->r_fh;
4530 4535  
4531 4536          /* 1. readlink */
4532 4537          argop[1].argop = OP_READLINK;
4533 4538  
4534 4539          /* 2. getattr */
4535 4540          argop[2].argop = OP_GETATTR;
4536 4541          argop[2].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
4537 4542          argop[2].nfs_argop4_u.opgetattr.mi = VTOMI4(vp);
4538 4543  
4539 4544          doqueue = 1;
4540 4545  
4541 4546          NFS4_DEBUG(nfs4_client_call_debug, (CE_NOTE,
4542 4547              "nfs4_readlink: %s call, rp %s", needrecov ? "recov" : "first",
4543 4548              rnode4info(VTOR4(vp))));
4544 4549  
4545 4550          t = gethrtime();
4546 4551  
4547 4552          rfs4call(VTOMI4(vp), &args, &res, cr, &doqueue, 0, &e);
4548 4553  
4549 4554          needrecov = nfs4_needs_recovery(&e, FALSE, vp->v_vfsp);
4550 4555          if (needrecov) {
4551 4556                  NFS4_DEBUG(nfs4_client_recov_debug, (CE_NOTE,
4552 4557                      "nfs4_readlink: initiating recovery\n"));
4553 4558  
4554 4559                  if (nfs4_start_recovery(&e, VTOMI4(vp), vp, NULL, NULL,
4555 4560                      NULL, OP_READLINK, NULL, NULL, NULL) == FALSE) {
4556 4561                          if (!e.error)
4557 4562                                  (void) xdr_free(xdr_COMPOUND4res_clnt,
4558 4563                                      (caddr_t)&res);
4559 4564  
4560 4565                          nfs4_end_op(VTOMI4(vp), vp, NULL, &recov_state,
4561 4566                              needrecov);
4562 4567                          goto recov_retry;
4563 4568                  }
4564 4569          }
4565 4570  
4566 4571          nfs4_end_op(VTOMI4(vp), vp, NULL, &recov_state, needrecov);
4567 4572  
4568 4573          if (e.error)
4569 4574                  return (e.error);
4570 4575  
4571 4576          /*
4572 4577           * There is an path in the code below which calls
4573 4578           * nfs4_purge_stale_fh(), which may generate otw calls through
4574 4579           * nfs4_invalidate_pages. Hence we need to call nfs4_end_op()
4575 4580           * here to avoid nfs4_start_op() deadlock.
4576 4581           */
4577 4582  
4578 4583          if (res.status && (res.array_len < args.array_len)) {
4579 4584                  /*
4580 4585                   * either Putfh or Link failed
4581 4586                   */
4582 4587                  e.error = geterrno4(res.status);
4583 4588                  nfs4_purge_stale_fh(e.error, vp, cr);
4584 4589                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
4585 4590                  return (e.error);
4586 4591          }
4587 4592  
4588 4593          resop = &res.array[1];  /* readlink res */
4589 4594          lr_res = &resop->nfs_resop4_u.opreadlink;
4590 4595  
4591 4596          /*
4592 4597           * treat symlink names as data
4593 4598           */
4594 4599          linkdata = utf8_to_str((utf8string *)&lr_res->link, &len, NULL);
4595 4600          if (linkdata != NULL) {
4596 4601                  int uio_len = len - 1;
4597 4602                  /* len includes null byte, which we won't uiomove */
4598 4603                  e.error = uiomove(linkdata, uio_len, UIO_READ, uiop);
4599 4604                  if (nfs4_do_symlink_cache && rp->r_symlink.contents == NULL) {
4600 4605                          mutex_enter(&rp->r_statelock);
4601 4606                          if (rp->r_symlink.contents == NULL) {
4602 4607                                  rp->r_symlink.contents = linkdata;
4603 4608                                  rp->r_symlink.len = uio_len;
4604 4609                                  rp->r_symlink.size = len;
4605 4610                                  mutex_exit(&rp->r_statelock);
4606 4611                          } else {
4607 4612                                  mutex_exit(&rp->r_statelock);
4608 4613                                  kmem_free(linkdata, len);
4609 4614                          }
4610 4615                  } else {
4611 4616                          kmem_free(linkdata, len);
4612 4617                  }
4613 4618          }
4614 4619          if (res.status == NFS4_OK) {
4615 4620                  resop++;        /* getattr res */
4616 4621                  garp = &resop->nfs_resop4_u.opgetattr.ga_res;
4617 4622          }
4618 4623          e.error = nfs4_update_attrcache(res.status, garp, t, vp, cr);
4619 4624  
4620 4625          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
4621 4626  
4622 4627          /*
4623 4628           * The over the wire error for attempting to readlink something
4624 4629           * other than a symbolic link is ENXIO.  However, we need to
4625 4630           * return EINVAL instead of ENXIO, so we map it here.
4626 4631           */
4627 4632          return (e.error == ENXIO ? EINVAL : e.error);
4628 4633  }
4629 4634  
4630 4635  /*
4631 4636   * Flush local dirty pages to stable storage on the server.
4632 4637   *
4633 4638   * If FNODSYNC is specified, then there is nothing to do because
4634 4639   * metadata changes are not cached on the client before being
4635 4640   * sent to the server.
4636 4641   */
4637 4642  /* ARGSUSED */
4638 4643  static int
4639 4644  nfs4_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
4640 4645  {
4641 4646          int error;
4642 4647  
4643 4648          if ((syncflag & FNODSYNC) || IS_SWAPVP(vp))
4644 4649                  return (0);
4645 4650          if (nfs_zone() != VTOMI4(vp)->mi_zone)
4646 4651                  return (EIO);
4647 4652          error = nfs4_putpage_commit(vp, (offset_t)0, 0, cr);
4648 4653          if (!error)
4649 4654                  error = VTOR4(vp)->r_error;
4650 4655          return (error);
4651 4656  }
4652 4657  
4653 4658  /*
4654 4659   * Weirdness: if the file was removed or the target of a rename
4655 4660   * operation while it was open, it got renamed instead.  Here we
4656 4661   * remove the renamed file.
4657 4662   */
4658 4663  /* ARGSUSED */
4659 4664  void
4660 4665  nfs4_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
4661 4666  {
4662 4667          rnode4_t *rp;
4663 4668  
4664 4669          ASSERT(vp != DNLC_NO_VNODE);
4665 4670  
4666 4671          rp = VTOR4(vp);
4667 4672  
4668 4673          if (IS_SHADOW(vp, rp)) {
4669 4674                  sv_inactive(vp);
4670 4675                  return;
4671 4676          }
4672 4677  
4673 4678          /*
4674 4679           * If this is coming from the wrong zone, we let someone in the right
4675 4680           * zone take care of it asynchronously.  We can get here due to
4676 4681           * VN_RELE() being called from pageout() or fsflush().  This call may
4677 4682           * potentially turn into an expensive no-op if, for instance, v_count
4678 4683           * gets incremented in the meantime, but it's still correct.
4679 4684           */
4680 4685          if (nfs_zone() != VTOMI4(vp)->mi_zone) {
4681 4686                  nfs4_async_inactive(vp, cr);
4682 4687                  return;
4683 4688          }
4684 4689  
4685 4690          /*
4686 4691           * Some of the cleanup steps might require over-the-wire
4687 4692           * operations.  Since VOP_INACTIVE can get called as a result of
4688 4693           * other over-the-wire operations (e.g., an attribute cache update
4689 4694           * can lead to a DNLC purge), doing those steps now would lead to a
4690 4695           * nested call to the recovery framework, which can deadlock.  So
4691 4696           * do any over-the-wire cleanups asynchronously, in a separate
4692 4697           * thread.
4693 4698           */
4694 4699  
4695 4700          mutex_enter(&rp->r_os_lock);
4696 4701          mutex_enter(&rp->r_statelock);
4697 4702          mutex_enter(&rp->r_statev4_lock);
4698 4703  
4699 4704          if (vp->v_type == VREG && list_head(&rp->r_open_streams) != NULL) {
4700 4705                  mutex_exit(&rp->r_statev4_lock);
4701 4706                  mutex_exit(&rp->r_statelock);
4702 4707                  mutex_exit(&rp->r_os_lock);
4703 4708                  nfs4_async_inactive(vp, cr);
4704 4709                  return;
4705 4710          }
4706 4711  
4707 4712          if (rp->r_deleg_type == OPEN_DELEGATE_READ ||
4708 4713              rp->r_deleg_type == OPEN_DELEGATE_WRITE) {
4709 4714                  mutex_exit(&rp->r_statev4_lock);
4710 4715                  mutex_exit(&rp->r_statelock);
4711 4716                  mutex_exit(&rp->r_os_lock);
4712 4717                  nfs4_async_inactive(vp, cr);
4713 4718                  return;
4714 4719          }
4715 4720  
4716 4721          if (rp->r_unldvp != NULL) {
4717 4722                  mutex_exit(&rp->r_statev4_lock);
4718 4723                  mutex_exit(&rp->r_statelock);
4719 4724                  mutex_exit(&rp->r_os_lock);
4720 4725                  nfs4_async_inactive(vp, cr);
4721 4726                  return;
4722 4727          }
4723 4728          mutex_exit(&rp->r_statev4_lock);
4724 4729          mutex_exit(&rp->r_statelock);
4725 4730          mutex_exit(&rp->r_os_lock);
4726 4731  
4727 4732          rp4_addfree(rp, cr);
4728 4733  }
4729 4734  
4730 4735  /*
4731 4736   * nfs4_inactive_otw - nfs4_inactive, plus over-the-wire calls to free up
4732 4737   * various bits of state.  The caller must not refer to vp after this call.
4733 4738   */
4734 4739  
4735 4740  void
4736 4741  nfs4_inactive_otw(vnode_t *vp, cred_t *cr)
4737 4742  {
4738 4743          rnode4_t *rp = VTOR4(vp);
4739 4744          nfs4_recov_state_t recov_state;
4740 4745          nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS };
4741 4746          vnode_t *unldvp;
4742 4747          char *unlname;
4743 4748          cred_t *unlcred;
4744 4749          COMPOUND4args_clnt args;
4745 4750          COMPOUND4res_clnt res, *resp;
4746 4751          nfs_argop4 argop[2];
4747 4752          int doqueue;
4748 4753  #ifdef DEBUG
4749 4754          char *name;
4750 4755  #endif
4751 4756  
4752 4757          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
4753 4758          ASSERT(!IS_SHADOW(vp, rp));
4754 4759  
4755 4760  #ifdef DEBUG
4756 4761          name = fn_name(VTOSV(vp)->sv_name);
4757 4762          NFS4_DEBUG(nfs4_client_inactive_debug, (CE_NOTE, "nfs4_inactive_otw: "
4758 4763              "release vnode %s", name));
4759 4764          kmem_free(name, MAXNAMELEN);
4760 4765  #endif
4761 4766  
4762 4767          if (vp->v_type == VREG) {
4763 4768                  bool_t recov_failed = FALSE;
4764 4769  
4765 4770                  e.error = nfs4close_all(vp, cr);
4766 4771                  if (e.error) {
4767 4772                          /* Check to see if recovery failed */
4768 4773                          mutex_enter(&(VTOMI4(vp)->mi_lock));
4769 4774                          if (VTOMI4(vp)->mi_flags & MI4_RECOV_FAIL)
4770 4775                                  recov_failed = TRUE;
4771 4776                          mutex_exit(&(VTOMI4(vp)->mi_lock));
4772 4777                          if (!recov_failed) {
4773 4778                                  mutex_enter(&rp->r_statelock);
4774 4779                                  if (rp->r_flags & R4RECOVERR)
4775 4780                                          recov_failed = TRUE;
4776 4781                                  mutex_exit(&rp->r_statelock);
4777 4782                          }
4778 4783                          if (recov_failed) {
4779 4784                                  NFS4_DEBUG(nfs4_client_recov_debug,
4780 4785                                      (CE_NOTE, "nfs4_inactive_otw: "
4781 4786                                      "close failed (recovery failure)"));
4782 4787                          }
4783 4788                  }
4784 4789          }
4785 4790  
4786 4791  redo:
4787 4792          if (rp->r_unldvp == NULL) {
4788 4793                  rp4_addfree(rp, cr);
4789 4794                  return;
4790 4795          }
4791 4796  
4792 4797          /*
4793 4798           * Save the vnode pointer for the directory where the
4794 4799           * unlinked-open file got renamed, then set it to NULL
4795 4800           * to prevent another thread from getting here before
4796 4801           * we're done with the remove.  While we have the
4797 4802           * statelock, make local copies of the pertinent rnode
4798 4803           * fields.  If we weren't to do this in an atomic way, the
4799 4804           * the unl* fields could become inconsistent with respect
4800 4805           * to each other due to a race condition between this
4801 4806           * code and nfs_remove().  See bug report 1034328.
4802 4807           */
4803 4808          mutex_enter(&rp->r_statelock);
4804 4809          if (rp->r_unldvp == NULL) {
4805 4810                  mutex_exit(&rp->r_statelock);
4806 4811                  rp4_addfree(rp, cr);
4807 4812                  return;
4808 4813          }
4809 4814  
4810 4815          unldvp = rp->r_unldvp;
4811 4816          rp->r_unldvp = NULL;
4812 4817          unlname = rp->r_unlname;
4813 4818          rp->r_unlname = NULL;
4814 4819          unlcred = rp->r_unlcred;
4815 4820          rp->r_unlcred = NULL;
4816 4821          mutex_exit(&rp->r_statelock);
4817 4822  
4818 4823          /*
4819 4824           * If there are any dirty pages left, then flush
4820 4825           * them.  This is unfortunate because they just
4821 4826           * may get thrown away during the remove operation,
4822 4827           * but we have to do this for correctness.
4823 4828           */
4824 4829          if (nfs4_has_pages(vp) &&
4825 4830              ((rp->r_flags & R4DIRTY) || rp->r_count > 0)) {
4826 4831                  ASSERT(vp->v_type != VCHR);
4827 4832                  e.error = nfs4_putpage(vp, (u_offset_t)0, 0, 0, cr, NULL);
4828 4833                  if (e.error) {
4829 4834                          mutex_enter(&rp->r_statelock);
4830 4835                          if (!rp->r_error)
4831 4836                                  rp->r_error = e.error;
4832 4837                          mutex_exit(&rp->r_statelock);
4833 4838                  }
4834 4839          }
4835 4840  
4836 4841          recov_state.rs_flags = 0;
4837 4842          recov_state.rs_num_retry_despite_err = 0;
4838 4843  recov_retry_remove:
4839 4844          /*
4840 4845           * Do the remove operation on the renamed file
4841 4846           */
4842 4847          args.ctag = TAG_INACTIVE;
4843 4848  
4844 4849          /*
4845 4850           * Remove ops: putfh dir; remove
4846 4851           */
4847 4852          args.array_len = 2;
4848 4853          args.array = argop;
4849 4854  
4850 4855          e.error = nfs4_start_op(VTOMI4(unldvp), unldvp, NULL, &recov_state);
4851 4856          if (e.error) {
4852 4857                  kmem_free(unlname, MAXNAMELEN);
4853 4858                  crfree(unlcred);
4854 4859                  VN_RELE(unldvp);
4855 4860                  /*
4856 4861                   * Try again; this time around r_unldvp will be NULL, so we'll
4857 4862                   * just call rp4_addfree() and return.
4858 4863                   */
4859 4864                  goto redo;
4860 4865          }
4861 4866  
4862 4867          /* putfh directory */
4863 4868          argop[0].argop = OP_CPUTFH;
4864 4869          argop[0].nfs_argop4_u.opcputfh.sfh = VTOR4(unldvp)->r_fh;
4865 4870  
4866 4871          /* remove */
4867 4872          argop[1].argop = OP_CREMOVE;
4868 4873          argop[1].nfs_argop4_u.opcremove.ctarget = unlname;
4869 4874  
4870 4875          doqueue = 1;
4871 4876          resp = &res;
4872 4877  
4873 4878  #if 0 /* notyet */
4874 4879          /*
4875 4880           * Can't do this yet.  We may be being called from
4876 4881           * dnlc_purge_XXX while that routine is holding a
4877 4882           * mutex lock to the nc_rele list.  The calls to
4878 4883           * nfs3_cache_wcc_data may result in calls to
4879 4884           * dnlc_purge_XXX.  This will result in a deadlock.
4880 4885           */
4881 4886          rfs4call(VTOMI4(unldvp), &args, &res, unlcred, &doqueue, 0, &e);
4882 4887          if (e.error) {
4883 4888                  PURGE_ATTRCACHE4(unldvp);
4884 4889                  resp = NULL;
4885 4890          } else if (res.status) {
4886 4891                  e.error = geterrno4(res.status);
4887 4892                  PURGE_ATTRCACHE4(unldvp);
4888 4893                  /*
4889 4894                   * This code is inactive right now
4890 4895                   * but if made active there should
4891 4896                   * be a nfs4_end_op() call before
4892 4897                   * nfs4_purge_stale_fh to avoid start_op()
4893 4898                   * deadlock. See BugId: 4948726
4894 4899                   */
4895 4900                  nfs4_purge_stale_fh(error, unldvp, cr);
4896 4901          } else {
4897 4902                  nfs_resop4 *resop;
4898 4903                  REMOVE4res *rm_res;
4899 4904  
4900 4905                  resop = &res.array[1];
4901 4906                  rm_res = &resop->nfs_resop4_u.opremove;
4902 4907                  /*
4903 4908                   * Update directory cache attribute,
4904 4909                   * readdir and dnlc caches.
4905 4910                   */
4906 4911                  nfs4_update_dircaches(&rm_res->cinfo, unldvp, NULL, NULL, NULL);
4907 4912          }
4908 4913  #else
4909 4914          rfs4call(VTOMI4(unldvp), &args, &res, unlcred, &doqueue, 0, &e);
4910 4915  
4911 4916          PURGE_ATTRCACHE4(unldvp);
4912 4917  #endif
4913 4918  
4914 4919          if (nfs4_needs_recovery(&e, FALSE, unldvp->v_vfsp)) {
4915 4920                  if (nfs4_start_recovery(&e, VTOMI4(unldvp), unldvp, NULL,
4916 4921                      NULL, NULL, OP_REMOVE, NULL, NULL, NULL) == FALSE) {
4917 4922                          if (!e.error)
4918 4923                                  (void) xdr_free(xdr_COMPOUND4res_clnt,
4919 4924                                      (caddr_t)&res);
4920 4925                          nfs4_end_op(VTOMI4(unldvp), unldvp, NULL,
4921 4926                              &recov_state, TRUE);
4922 4927                          goto recov_retry_remove;
4923 4928                  }
4924 4929          }
4925 4930          nfs4_end_op(VTOMI4(unldvp), unldvp, NULL, &recov_state, FALSE);
4926 4931  
4927 4932          /*
4928 4933           * Release stuff held for the remove
4929 4934           */
4930 4935          VN_RELE(unldvp);
4931 4936          if (!e.error && resp)
4932 4937                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)resp);
4933 4938  
4934 4939          kmem_free(unlname, MAXNAMELEN);
4935 4940          crfree(unlcred);
4936 4941          goto redo;
4937 4942  }
4938 4943  
4939 4944  /*
4940 4945   * Remote file system operations having to do with directory manipulation.
4941 4946   */
4942 4947  /* ARGSUSED3 */
4943 4948  int
4944 4949  nfs4_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
4945 4950      int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
4946 4951      int *direntflags, pathname_t *realpnp)
4947 4952  {
4948 4953          int error;
4949 4954          vnode_t *vp, *avp = NULL;
4950 4955          rnode4_t *drp;
4951 4956  
4952 4957          *vpp = NULL;
4953 4958          if (nfs_zone() != VTOMI4(dvp)->mi_zone)
4954 4959                  return (EPERM);
4955 4960          /*
4956 4961           * if LOOKUP_XATTR, must replace dvp (object) with
4957 4962           * object's attrdir before continuing with lookup
4958 4963           */
4959 4964          if (flags & LOOKUP_XATTR) {
4960 4965                  error = nfs4lookup_xattr(dvp, nm, &avp, flags, cr);
4961 4966                  if (error)
4962 4967                          return (error);
4963 4968  
4964 4969                  dvp = avp;
4965 4970  
4966 4971                  /*
4967 4972                   * If lookup is for "", just return dvp now.  The attrdir
4968 4973                   * has already been activated (from nfs4lookup_xattr), and
4969 4974                   * the caller will RELE the original dvp -- not
4970 4975                   * the attrdir.  So, set vpp and return.
4971 4976                   * Currently, when the LOOKUP_XATTR flag is
4972 4977                   * passed to VOP_LOOKUP, the name is always empty, and
4973 4978                   * shortcircuiting here avoids 3 unneeded lock/unlock
4974 4979                   * pairs.
4975 4980                   *
4976 4981                   * If a non-empty name was provided, then it is the
4977 4982                   * attribute name, and it will be looked up below.
4978 4983                   */
4979 4984                  if (*nm == '\0') {
4980 4985                          *vpp = dvp;
4981 4986                          return (0);
4982 4987                  }
4983 4988  
4984 4989                  /*
4985 4990                   * The vfs layer never sends a name when asking for the
4986 4991                   * attrdir, so we should never get here (unless of course
4987 4992                   * name is passed at some time in future -- at which time
4988 4993                   * we'll blow up here).
4989 4994                   */
4990 4995                  ASSERT(0);
4991 4996          }
4992 4997  
4993 4998          drp = VTOR4(dvp);
4994 4999          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_READER, INTR4(dvp)))
4995 5000                  return (EINTR);
4996 5001  
4997 5002          error = nfs4lookup(dvp, nm, vpp, cr, 0);
4998 5003          nfs_rw_exit(&drp->r_rwlock);
4999 5004  
5000 5005          /*
5001 5006           * If vnode is a device, create special vnode.
5002 5007           */
5003 5008          if (!error && ISVDEV((*vpp)->v_type)) {
5004 5009                  vp = *vpp;
5005 5010                  *vpp = specvp(vp, vp->v_rdev, vp->v_type, cr);
5006 5011                  VN_RELE(vp);
5007 5012          }
5008 5013  
5009 5014          return (error);
5010 5015  }
5011 5016  
5012 5017  /* ARGSUSED */
5013 5018  static int
5014 5019  nfs4lookup_xattr(vnode_t *dvp, char *nm, vnode_t **vpp, int flags, cred_t *cr)
5015 5020  {
5016 5021          int error;
5017 5022          rnode4_t *drp;
5018 5023          int cflag = ((flags & CREATE_XATTR_DIR) != 0);
5019 5024          mntinfo4_t *mi;
5020 5025  
5021 5026          mi = VTOMI4(dvp);
5022 5027          if (!(mi->mi_vfsp->vfs_flag & VFS_XATTR) &&
5023 5028              !vfs_has_feature(mi->mi_vfsp, VFSFT_SYSATTR_VIEWS))
5024 5029                  return (EINVAL);
5025 5030  
5026 5031          drp = VTOR4(dvp);
5027 5032          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_READER, INTR4(dvp)))
5028 5033                  return (EINTR);
5029 5034  
5030 5035          mutex_enter(&drp->r_statelock);
5031 5036          /*
5032 5037           * If the server doesn't support xattrs just return EINVAL
5033 5038           */
5034 5039          if (drp->r_xattr_dir == NFS4_XATTR_DIR_NOTSUPP) {
5035 5040                  mutex_exit(&drp->r_statelock);
5036 5041                  nfs_rw_exit(&drp->r_rwlock);
5037 5042                  return (EINVAL);
5038 5043          }
5039 5044  
5040 5045          /*
5041 5046           * If there is a cached xattr directory entry,
5042 5047           * use it as long as the attributes are valid. If the
5043 5048           * attributes are not valid, take the simple approach and
5044 5049           * free the cached value and re-fetch a new value.
5045 5050           *
5046 5051           * We don't negative entry cache for now, if we did we
5047 5052           * would need to check if the file has changed on every
5048 5053           * lookup. But xattrs don't exist very often and failing
5049 5054           * an openattr is not much more expensive than and NVERIFY or GETATTR
5050 5055           * so do an openattr over the wire for now.
5051 5056           */
5052 5057          if (drp->r_xattr_dir != NULL) {
5053 5058                  if (ATTRCACHE4_VALID(dvp)) {
5054 5059                          VN_HOLD(drp->r_xattr_dir);
5055 5060                          *vpp = drp->r_xattr_dir;
5056 5061                          mutex_exit(&drp->r_statelock);
5057 5062                          nfs_rw_exit(&drp->r_rwlock);
5058 5063                          return (0);
5059 5064                  }
5060 5065                  VN_RELE(drp->r_xattr_dir);
5061 5066                  drp->r_xattr_dir = NULL;
5062 5067          }
5063 5068          mutex_exit(&drp->r_statelock);
5064 5069  
5065 5070          error = nfs4openattr(dvp, vpp, cflag, cr);
5066 5071  
5067 5072          nfs_rw_exit(&drp->r_rwlock);
5068 5073  
5069 5074          return (error);
5070 5075  }
5071 5076  
5072 5077  static int
5073 5078  nfs4lookup(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr, int skipdnlc)
5074 5079  {
5075 5080          int error;
5076 5081          rnode4_t *drp;
5077 5082  
5078 5083          ASSERT(nfs_zone() == VTOMI4(dvp)->mi_zone);
5079 5084  
5080 5085          /*
5081 5086           * If lookup is for "", just return dvp.  Don't need
5082 5087           * to send it over the wire, look it up in the dnlc,
5083 5088           * or perform any access checks.
5084 5089           */
5085 5090          if (*nm == '\0') {
5086 5091                  VN_HOLD(dvp);
5087 5092                  *vpp = dvp;
5088 5093                  return (0);
5089 5094          }
5090 5095  
5091 5096          /*
5092 5097           * Can't do lookups in non-directories.
5093 5098           */
5094 5099          if (dvp->v_type != VDIR)
5095 5100                  return (ENOTDIR);
5096 5101  
5097 5102          /*
5098 5103           * If lookup is for ".", just return dvp.  Don't need
5099 5104           * to send it over the wire or look it up in the dnlc,
5100 5105           * just need to check access.
5101 5106           */
5102 5107          if (nm[0] == '.' && nm[1] == '\0') {
5103 5108                  error = nfs4_access(dvp, VEXEC, 0, cr, NULL);
5104 5109                  if (error)
5105 5110                          return (error);
5106 5111                  VN_HOLD(dvp);
5107 5112                  *vpp = dvp;
5108 5113                  return (0);
5109 5114          }
5110 5115  
5111 5116          drp = VTOR4(dvp);
5112 5117          if (!(drp->r_flags & R4LOOKUP)) {
5113 5118                  mutex_enter(&drp->r_statelock);
5114 5119                  drp->r_flags |= R4LOOKUP;
5115 5120                  mutex_exit(&drp->r_statelock);
5116 5121          }
5117 5122  
5118 5123          *vpp = NULL;
5119 5124          /*
5120 5125           * Lookup this name in the DNLC.  If there is no entry
5121 5126           * lookup over the wire.
5122 5127           */
5123 5128          if (!skipdnlc)
5124 5129                  *vpp = dnlc_lookup(dvp, nm);
5125 5130          if (*vpp == NULL) {
5126 5131                  /*
5127 5132                   * We need to go over the wire to lookup the name.
5128 5133                   */
5129 5134                  return (nfs4lookupnew_otw(dvp, nm, vpp, cr));
5130 5135          }
5131 5136  
5132 5137          /*
5133 5138           * We hit on the dnlc
5134 5139           */
5135 5140          if (*vpp != DNLC_NO_VNODE ||
5136 5141              (dvp->v_vfsp->vfs_flag & VFS_RDONLY)) {
5137 5142                  /*
5138 5143                   * But our attrs may not be valid.
5139 5144                   */
5140 5145                  if (ATTRCACHE4_VALID(dvp)) {
5141 5146                          error = nfs4_waitfor_purge_complete(dvp);
5142 5147                          if (error) {
5143 5148                                  VN_RELE(*vpp);
5144 5149                                  *vpp = NULL;
5145 5150                                  return (error);
5146 5151                          }
5147 5152  
5148 5153                          /*
5149 5154                           * If after the purge completes, check to make sure
5150 5155                           * our attrs are still valid.
5151 5156                           */
5152 5157                          if (ATTRCACHE4_VALID(dvp)) {
5153 5158                                  /*
5154 5159                                   * If we waited for a purge we may have
5155 5160                                   * lost our vnode so look it up again.
5156 5161                                   */
5157 5162                                  VN_RELE(*vpp);
5158 5163                                  *vpp = dnlc_lookup(dvp, nm);
5159 5164                                  if (*vpp == NULL)
5160 5165                                          return (nfs4lookupnew_otw(dvp,
5161 5166                                              nm, vpp, cr));
5162 5167  
5163 5168                                  /*
5164 5169                                   * The access cache should almost always hit
5165 5170                                   */
5166 5171                                  error = nfs4_access(dvp, VEXEC, 0, cr, NULL);
5167 5172  
5168 5173                                  if (error) {
5169 5174                                          VN_RELE(*vpp);
5170 5175                                          *vpp = NULL;
5171 5176                                          return (error);
5172 5177                                  }
5173 5178                                  if (*vpp == DNLC_NO_VNODE) {
5174 5179                                          VN_RELE(*vpp);
5175 5180                                          *vpp = NULL;
5176 5181                                          return (ENOENT);
5177 5182                                  }
5178 5183                                  return (0);
5179 5184                          }
5180 5185                  }
5181 5186          }
5182 5187  
5183 5188          ASSERT(*vpp != NULL);
5184 5189  
5185 5190          /*
5186 5191           * We may have gotten here we have one of the following cases:
5187 5192           *      1) vpp != DNLC_NO_VNODE, our attrs have timed out so we
5188 5193           *              need to validate them.
5189 5194           *      2) vpp == DNLC_NO_VNODE, a negative entry that we always
5190 5195           *              must validate.
5191 5196           *
5192 5197           * Go to the server and check if the directory has changed, if
5193 5198           * it hasn't we are done and can use the dnlc entry.
5194 5199           */
5195 5200          return (nfs4lookupvalidate_otw(dvp, nm, vpp, cr));
5196 5201  }
5197 5202  
5198 5203  /*
5199 5204   * Go to the server and check if the directory has changed, if
5200 5205   * it hasn't we are done and can use the dnlc entry.  If it
5201 5206   * has changed we get a new copy of its attributes and check
5202 5207   * the access for VEXEC, then relookup the filename and
5203 5208   * get its filehandle and attributes.
5204 5209   *
5205 5210   * PUTFH dfh NVERIFY GETATTR ACCESS LOOKUP GETFH GETATTR
5206 5211   *      if the NVERIFY failed we must
5207 5212   *              purge the caches
5208 5213   *              cache new attributes (will set r_time_attr_inval)
5209 5214   *              cache new access
5210 5215   *              recheck VEXEC access
5211 5216   *              add name to dnlc, possibly negative
5212 5217   *              if LOOKUP succeeded
5213 5218   *                      cache new attributes
5214 5219   *      else
5215 5220   *              set a new r_time_attr_inval for dvp
5216 5221   *              check to make sure we have access
5217 5222   *
5218 5223   * The vpp returned is the vnode passed in if the directory is valid,
5219 5224   * a new vnode if successful lookup, or NULL on error.
5220 5225   */
5221 5226  static int
5222 5227  nfs4lookupvalidate_otw(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr)
5223 5228  {
5224 5229          COMPOUND4args_clnt args;
5225 5230          COMPOUND4res_clnt res;
5226 5231          fattr4 *ver_fattr;
5227 5232          fattr4_change dchange;
5228 5233          int32_t *ptr;
5229 5234          int argoplist_size  = 7 * sizeof (nfs_argop4);
5230 5235          nfs_argop4 *argop;
5231 5236          int doqueue;
5232 5237          mntinfo4_t *mi;
5233 5238          nfs4_recov_state_t recov_state;
5234 5239          hrtime_t t;
5235 5240          int isdotdot;
5236 5241          vnode_t *nvp;
5237 5242          nfs_fh4 *fhp;
5238 5243          nfs4_sharedfh_t *sfhp;
5239 5244          nfs4_access_type_t cacc;
5240 5245          rnode4_t *nrp;
5241 5246          rnode4_t *drp = VTOR4(dvp);
5242 5247          nfs4_ga_res_t *garp = NULL;
5243 5248          nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS };
5244 5249  
5245 5250          ASSERT(nfs_zone() == VTOMI4(dvp)->mi_zone);
5246 5251          ASSERT(nm != NULL);
5247 5252          ASSERT(nm[0] != '\0');
5248 5253          ASSERT(dvp->v_type == VDIR);
5249 5254          ASSERT(nm[0] != '.' || nm[1] != '\0');
5250 5255          ASSERT(*vpp != NULL);
5251 5256  
5252 5257          if (nm[0] == '.' && nm[1] == '.' && nm[2] == '\0') {
5253 5258                  isdotdot = 1;
5254 5259                  args.ctag = TAG_LOOKUP_VPARENT;
5255 5260          } else {
5256 5261                  /*
5257 5262                   * If dvp were a stub, it should have triggered and caused
5258 5263                   * a mount for us to get this far.
5259 5264                   */
5260 5265                  ASSERT(!RP_ISSTUB(VTOR4(dvp)));
5261 5266  
5262 5267                  isdotdot = 0;
5263 5268                  args.ctag = TAG_LOOKUP_VALID;
5264 5269          }
5265 5270  
5266 5271          mi = VTOMI4(dvp);
5267 5272          recov_state.rs_flags = 0;
5268 5273          recov_state.rs_num_retry_despite_err = 0;
5269 5274  
5270 5275          nvp = NULL;
5271 5276  
5272 5277          /* Save the original mount point security information */
5273 5278          (void) save_mnt_secinfo(mi->mi_curr_serv);
5274 5279  
5275 5280  recov_retry:
5276 5281          e.error = nfs4_start_fop(mi, dvp, NULL, OH_LOOKUP,
5277 5282              &recov_state, NULL);
5278 5283          if (e.error) {
5279 5284                  (void) check_mnt_secinfo(mi->mi_curr_serv, nvp);
5280 5285                  VN_RELE(*vpp);
5281 5286                  *vpp = NULL;
5282 5287                  return (e.error);
5283 5288          }
5284 5289  
5285 5290          argop = kmem_alloc(argoplist_size, KM_SLEEP);
5286 5291  
5287 5292          /* PUTFH dfh NVERIFY GETATTR ACCESS LOOKUP GETFH GETATTR */
5288 5293          args.array_len = 7;
5289 5294          args.array = argop;
5290 5295  
5291 5296          /* 0. putfh file */
5292 5297          argop[0].argop = OP_CPUTFH;
5293 5298          argop[0].nfs_argop4_u.opcputfh.sfh = VTOR4(dvp)->r_fh;
5294 5299  
5295 5300          /* 1. nverify the change info */
5296 5301          argop[1].argop = OP_NVERIFY;
5297 5302          ver_fattr = &argop[1].nfs_argop4_u.opnverify.obj_attributes;
5298 5303          ver_fattr->attrmask = FATTR4_CHANGE_MASK;
5299 5304          ver_fattr->attrlist4 = (char *)&dchange;
5300 5305          ptr = (int32_t *)&dchange;
5301 5306          IXDR_PUT_HYPER(ptr, VTOR4(dvp)->r_change);
5302 5307          ver_fattr->attrlist4_len = sizeof (fattr4_change);
5303 5308  
5304 5309          /* 2. getattr directory */
5305 5310          argop[2].argop = OP_GETATTR;
5306 5311          argop[2].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
5307 5312          argop[2].nfs_argop4_u.opgetattr.mi = VTOMI4(dvp);
5308 5313  
5309 5314          /* 3. access directory */
5310 5315          argop[3].argop = OP_ACCESS;
5311 5316          argop[3].nfs_argop4_u.opaccess.access = ACCESS4_READ | ACCESS4_DELETE |
5312 5317              ACCESS4_MODIFY | ACCESS4_EXTEND | ACCESS4_LOOKUP;
5313 5318  
5314 5319          /* 4. lookup name */
5315 5320          if (isdotdot) {
5316 5321                  argop[4].argop = OP_LOOKUPP;
5317 5322          } else {
5318 5323                  argop[4].argop = OP_CLOOKUP;
5319 5324                  argop[4].nfs_argop4_u.opclookup.cname = nm;
5320 5325          }
5321 5326  
5322 5327          /* 5. resulting file handle */
5323 5328          argop[5].argop = OP_GETFH;
5324 5329  
5325 5330          /* 6. resulting file attributes */
5326 5331          argop[6].argop = OP_GETATTR;
5327 5332          argop[6].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
5328 5333          argop[6].nfs_argop4_u.opgetattr.mi = VTOMI4(dvp);
5329 5334  
5330 5335          doqueue = 1;
5331 5336          t = gethrtime();
5332 5337  
5333 5338          rfs4call(VTOMI4(dvp), &args, &res, cr, &doqueue, 0, &e);
5334 5339  
5335 5340          if (!isdotdot && res.status == NFS4ERR_MOVED) {
5336 5341                  e.error = nfs4_setup_referral(dvp, nm, vpp, cr);
5337 5342                  if (e.error != 0 && *vpp != NULL)
5338 5343                          VN_RELE(*vpp);
5339 5344                  nfs4_end_fop(mi, dvp, NULL, OH_LOOKUP,
5340 5345                      &recov_state, FALSE);
5341 5346                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
5342 5347                  kmem_free(argop, argoplist_size);
5343 5348                  return (e.error);
5344 5349          }
5345 5350  
5346 5351          if (nfs4_needs_recovery(&e, FALSE, dvp->v_vfsp)) {
5347 5352                  /*
5348 5353                   * For WRONGSEC of a non-dotdot case, send secinfo directly
5349 5354                   * from this thread, do not go thru the recovery thread since
5350 5355                   * we need the nm information.
5351 5356                   *
5352 5357                   * Not doing dotdot case because there is no specification
5353 5358                   * for (PUTFH, SECINFO "..") yet.
5354 5359                   */
5355 5360                  if (!isdotdot && res.status == NFS4ERR_WRONGSEC) {
5356 5361                          if ((e.error = nfs4_secinfo_vnode_otw(dvp, nm, cr)))
5357 5362                                  nfs4_end_fop(mi, dvp, NULL, OH_LOOKUP,
5358 5363                                      &recov_state, FALSE);
5359 5364                          else
5360 5365                                  nfs4_end_fop(mi, dvp, NULL, OH_LOOKUP,
5361 5366                                      &recov_state, TRUE);
5362 5367                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
5363 5368                          kmem_free(argop, argoplist_size);
5364 5369                          if (!e.error)
5365 5370                                  goto recov_retry;
5366 5371                          (void) check_mnt_secinfo(mi->mi_curr_serv, nvp);
5367 5372                          VN_RELE(*vpp);
5368 5373                          *vpp = NULL;
5369 5374                          return (e.error);
5370 5375                  }
5371 5376  
5372 5377                  if (nfs4_start_recovery(&e, mi, dvp, NULL, NULL, NULL,
5373 5378                      OP_LOOKUP, NULL, NULL, NULL) == FALSE) {
5374 5379                          nfs4_end_fop(mi, dvp, NULL, OH_LOOKUP,
5375 5380                              &recov_state, TRUE);
5376 5381  
5377 5382                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
5378 5383                          kmem_free(argop, argoplist_size);
5379 5384                          goto recov_retry;
5380 5385                  }
5381 5386          }
5382 5387  
5383 5388          nfs4_end_fop(mi, dvp, NULL, OH_LOOKUP, &recov_state, FALSE);
5384 5389  
5385 5390          if (e.error || res.array_len == 0) {
5386 5391                  /*
5387 5392                   * If e.error isn't set, then reply has no ops (or we couldn't
5388 5393                   * be here).  The only legal way to reply without an op array
5389 5394                   * is via NFS4ERR_MINOR_VERS_MISMATCH.  An ops array should
5390 5395                   * be in the reply for all other status values.
5391 5396                   *
5392 5397                   * For valid replies without an ops array, return ENOTSUP
5393 5398                   * (geterrno4 xlation of VERS_MISMATCH).  For illegal replies,
5394 5399                   * return EIO -- don't trust status.
5395 5400                   */
5396 5401                  if (e.error == 0)
5397 5402                          e.error = (res.status == NFS4ERR_MINOR_VERS_MISMATCH) ?
5398 5403                              ENOTSUP : EIO;
5399 5404                  VN_RELE(*vpp);
5400 5405                  *vpp = NULL;
5401 5406                  kmem_free(argop, argoplist_size);
5402 5407                  (void) check_mnt_secinfo(mi->mi_curr_serv, nvp);
5403 5408                  return (e.error);
5404 5409          }
5405 5410  
5406 5411          if (res.status != NFS4ERR_SAME) {
5407 5412                  e.error = geterrno4(res.status);
5408 5413  
5409 5414                  /*
5410 5415                   * The NVERIFY "failed" so the directory has changed
5411 5416                   * First make sure PUTFH succeeded and NVERIFY "failed"
5412 5417                   * cleanly.
5413 5418                   */
5414 5419                  if ((res.array[0].nfs_resop4_u.opputfh.status != NFS4_OK) ||
5415 5420                      (res.array[1].nfs_resop4_u.opnverify.status != NFS4_OK)) {
5416 5421                          nfs4_purge_stale_fh(e.error, dvp, cr);
5417 5422                          VN_RELE(*vpp);
5418 5423                          *vpp = NULL;
5419 5424                          goto exit;
5420 5425                  }
5421 5426  
5422 5427                  /*
5423 5428                   * We know the NVERIFY "failed" so we must:
5424 5429                   *      purge the caches (access and indirectly dnlc if needed)
5425 5430                   */
5426 5431                  nfs4_purge_caches(dvp, NFS4_NOPURGE_DNLC, cr, TRUE);
5427 5432  
5428 5433                  if (res.array[2].nfs_resop4_u.opgetattr.status != NFS4_OK) {
5429 5434                          nfs4_purge_stale_fh(e.error, dvp, cr);
5430 5435                          VN_RELE(*vpp);
5431 5436                          *vpp = NULL;
5432 5437                          goto exit;
5433 5438                  }
5434 5439  
5435 5440                  /*
5436 5441                   * Install new cached attributes for the directory
5437 5442                   */
5438 5443                  nfs4_attr_cache(dvp,
5439 5444                      &res.array[2].nfs_resop4_u.opgetattr.ga_res,
5440 5445                      t, cr, FALSE, NULL);
5441 5446  
5442 5447                  if (res.array[3].nfs_resop4_u.opaccess.status != NFS4_OK) {
5443 5448                          nfs4_purge_stale_fh(e.error, dvp, cr);
5444 5449                          VN_RELE(*vpp);
5445 5450                          *vpp = NULL;
5446 5451                          e.error = geterrno4(res.status);
5447 5452                          goto exit;
5448 5453                  }
5449 5454  
5450 5455                  /*
5451 5456                   * Now we know the directory is valid,
5452 5457                   * cache new directory access
5453 5458                   */
5454 5459                  nfs4_access_cache(drp,
5455 5460                      args.array[3].nfs_argop4_u.opaccess.access,
5456 5461                      res.array[3].nfs_resop4_u.opaccess.access, cr);
5457 5462  
5458 5463                  /*
5459 5464                   * recheck VEXEC access
5460 5465                   */
5461 5466                  cacc = nfs4_access_check(drp, ACCESS4_LOOKUP, cr);
5462 5467                  if (cacc != NFS4_ACCESS_ALLOWED) {
5463 5468                          /*
5464 5469                           * Directory permissions might have been revoked
5465 5470                           */
5466 5471                          if (cacc == NFS4_ACCESS_DENIED) {
5467 5472                                  e.error = EACCES;
5468 5473                                  VN_RELE(*vpp);
5469 5474                                  *vpp = NULL;
5470 5475                                  goto exit;
5471 5476                          }
5472 5477  
5473 5478                          /*
5474 5479                           * Somehow we must not have asked for enough
5475 5480                           * so try a singleton ACCESS, should never happen.
5476 5481                           */
5477 5482                          e.error = nfs4_access(dvp, VEXEC, 0, cr, NULL);
5478 5483                          if (e.error) {
5479 5484                                  VN_RELE(*vpp);
5480 5485                                  *vpp = NULL;
5481 5486                                  goto exit;
5482 5487                          }
5483 5488                  }
5484 5489  
5485 5490                  e.error = geterrno4(res.status);
5486 5491                  if (res.array[4].nfs_resop4_u.oplookup.status != NFS4_OK) {
5487 5492                          /*
5488 5493                           * The lookup failed, probably no entry
5489 5494                           */
5490 5495                          if (e.error == ENOENT && nfs4_lookup_neg_cache) {
5491 5496                                  dnlc_update(dvp, nm, DNLC_NO_VNODE);
5492 5497                          } else {
5493 5498                                  /*
5494 5499                                   * Might be some other error, so remove
5495 5500                                   * the dnlc entry to make sure we start all
5496 5501                                   * over again, next time.
5497 5502                                   */
5498 5503                                  dnlc_remove(dvp, nm);
5499 5504                          }
5500 5505                          VN_RELE(*vpp);
5501 5506                          *vpp = NULL;
5502 5507                          goto exit;
5503 5508                  }
5504 5509  
5505 5510                  if (res.array[5].nfs_resop4_u.opgetfh.status != NFS4_OK) {
5506 5511                          /*
5507 5512                           * The file exists but we can't get its fh for
5508 5513                           * some unknown reason.  Remove it from the dnlc
5509 5514                           * and error out to be safe.
5510 5515                           */
5511 5516                          dnlc_remove(dvp, nm);
5512 5517                          VN_RELE(*vpp);
5513 5518                          *vpp = NULL;
5514 5519                          goto exit;
5515 5520                  }
5516 5521                  fhp = &res.array[5].nfs_resop4_u.opgetfh.object;
5517 5522                  if (fhp->nfs_fh4_len == 0) {
5518 5523                          /*
5519 5524                           * The file exists but a bogus fh
5520 5525                           * some unknown reason.  Remove it from the dnlc
5521 5526                           * and error out to be safe.
5522 5527                           */
5523 5528                          e.error = ENOENT;
5524 5529                          dnlc_remove(dvp, nm);
5525 5530                          VN_RELE(*vpp);
5526 5531                          *vpp = NULL;
5527 5532                          goto exit;
5528 5533                  }
5529 5534                  sfhp = sfh4_get(fhp, mi);
5530 5535  
5531 5536                  if (res.array[6].nfs_resop4_u.opgetattr.status == NFS4_OK)
5532 5537                          garp = &res.array[6].nfs_resop4_u.opgetattr.ga_res;
5533 5538  
5534 5539                  /*
5535 5540                   * Make the new rnode
5536 5541                   */
5537 5542                  if (isdotdot) {
5538 5543                          e.error = nfs4_make_dotdot(sfhp, t, dvp, cr, &nvp, 1);
5539 5544                          if (e.error) {
5540 5545                                  sfh4_rele(&sfhp);
5541 5546                                  VN_RELE(*vpp);
5542 5547                                  *vpp = NULL;
5543 5548                                  goto exit;
5544 5549                          }
5545 5550                          /*
5546 5551                           * XXX if nfs4_make_dotdot uses an existing rnode
5547 5552                           * XXX it doesn't update the attributes.
5548 5553                           * XXX for now just save them again to save an OTW
5549 5554                           */
5550 5555                          nfs4_attr_cache(nvp, garp, t, cr, FALSE, NULL);
5551 5556                  } else {
5552 5557                          nvp = makenfs4node(sfhp, garp, dvp->v_vfsp, t, cr,
5553 5558                              dvp, fn_get(VTOSV(dvp)->sv_name, nm, sfhp));
5554 5559                          /*
5555 5560                           * If v_type == VNON, then garp was NULL because
5556 5561                           * the last op in the compound failed and makenfs4node
5557 5562                           * could not find the vnode for sfhp. It created
5558 5563                           * a new vnode, so we have nothing to purge here.
5559 5564                           */
5560 5565                          if (nvp->v_type == VNON) {
5561 5566                                  vattr_t vattr;
5562 5567  
5563 5568                                  vattr.va_mask = AT_TYPE;
5564 5569                                  /*
5565 5570                                   * N.B. We've already called nfs4_end_fop above.
5566 5571                                   */
5567 5572                                  e.error = nfs4getattr(nvp, &vattr, cr);
5568 5573                                  if (e.error) {
5569 5574                                          sfh4_rele(&sfhp);
5570 5575                                          VN_RELE(*vpp);
5571 5576                                          *vpp = NULL;
5572 5577                                          VN_RELE(nvp);
5573 5578                                          goto exit;
5574 5579                                  }
5575 5580                                  nvp->v_type = vattr.va_type;
5576 5581                          }
5577 5582                  }
5578 5583                  sfh4_rele(&sfhp);
5579 5584  
5580 5585                  nrp = VTOR4(nvp);
5581 5586                  mutex_enter(&nrp->r_statev4_lock);
5582 5587                  if (!nrp->created_v4) {
5583 5588                          mutex_exit(&nrp->r_statev4_lock);
5584 5589                          dnlc_update(dvp, nm, nvp);
5585 5590                  } else
5586 5591                          mutex_exit(&nrp->r_statev4_lock);
5587 5592  
5588 5593                  VN_RELE(*vpp);
5589 5594                  *vpp = nvp;
5590 5595          } else {
5591 5596                  hrtime_t now;
5592 5597                  hrtime_t delta = 0;
5593 5598  
5594 5599                  e.error = 0;
5595 5600  
5596 5601                  /*
5597 5602                   * Because the NVERIFY "succeeded" we know that the
5598 5603                   * directory attributes are still valid
5599 5604                   * so update r_time_attr_inval
5600 5605                   */
5601 5606                  now = gethrtime();
5602 5607                  mutex_enter(&drp->r_statelock);
5603 5608                  if (!(mi->mi_flags & MI4_NOAC) && !(dvp->v_flag & VNOCACHE)) {
5604 5609                          delta = now - drp->r_time_attr_saved;
5605 5610                          if (delta < mi->mi_acdirmin)
5606 5611                                  delta = mi->mi_acdirmin;
5607 5612                          else if (delta > mi->mi_acdirmax)
5608 5613                                  delta = mi->mi_acdirmax;
5609 5614                  }
5610 5615                  drp->r_time_attr_inval = now + delta;
5611 5616                  mutex_exit(&drp->r_statelock);
5612 5617                  dnlc_update(dvp, nm, *vpp);
5613 5618  
5614 5619                  /*
5615 5620                   * Even though we have a valid directory attr cache
5616 5621                   * and dnlc entry, we may not have access.
5617 5622                   * This should almost always hit the cache.
5618 5623                   */
5619 5624                  e.error = nfs4_access(dvp, VEXEC, 0, cr, NULL);
5620 5625                  if (e.error) {
5621 5626                          VN_RELE(*vpp);
5622 5627                          *vpp = NULL;
5623 5628                  }
5624 5629  
5625 5630                  if (*vpp == DNLC_NO_VNODE) {
5626 5631                          VN_RELE(*vpp);
5627 5632                          *vpp = NULL;
5628 5633                          e.error = ENOENT;
5629 5634                  }
5630 5635          }
5631 5636  
5632 5637  exit:
5633 5638          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
5634 5639          kmem_free(argop, argoplist_size);
5635 5640          (void) check_mnt_secinfo(mi->mi_curr_serv, nvp);
5636 5641          return (e.error);
5637 5642  }
5638 5643  
5639 5644  /*
5640 5645   * We need to go over the wire to lookup the name, but
5641 5646   * while we are there verify the directory has not
5642 5647   * changed but if it has, get new attributes and check access
5643 5648   *
5644 5649   * PUTFH dfh SAVEFH LOOKUP nm GETFH GETATTR RESTOREFH
5645 5650   *                                      NVERIFY GETATTR ACCESS
5646 5651   *
5647 5652   * With the results:
5648 5653   *      if the NVERIFY failed we must purge the caches, add new attributes,
5649 5654   *              and cache new access.
5650 5655   *      set a new r_time_attr_inval
5651 5656   *      add name to dnlc, possibly negative
5652 5657   *      if LOOKUP succeeded
5653 5658   *              cache new attributes
5654 5659   */
5655 5660  static int
5656 5661  nfs4lookupnew_otw(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr)
5657 5662  {
5658 5663          COMPOUND4args_clnt args;
5659 5664          COMPOUND4res_clnt res;
5660 5665          fattr4 *ver_fattr;
5661 5666          fattr4_change dchange;
5662 5667          int32_t *ptr;
5663 5668          nfs4_ga_res_t *garp = NULL;
5664 5669          int argoplist_size  = 9 * sizeof (nfs_argop4);
5665 5670          nfs_argop4 *argop;
5666 5671          int doqueue;
5667 5672          mntinfo4_t *mi;
5668 5673          nfs4_recov_state_t recov_state;
5669 5674          hrtime_t t;
5670 5675          int isdotdot;
5671 5676          vnode_t *nvp;
5672 5677          nfs_fh4 *fhp;
5673 5678          nfs4_sharedfh_t *sfhp;
5674 5679          nfs4_access_type_t cacc;
5675 5680          rnode4_t *nrp;
5676 5681          rnode4_t *drp = VTOR4(dvp);
5677 5682          nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS };
5678 5683  
5679 5684          ASSERT(nfs_zone() == VTOMI4(dvp)->mi_zone);
5680 5685          ASSERT(nm != NULL);
5681 5686          ASSERT(nm[0] != '\0');
5682 5687          ASSERT(dvp->v_type == VDIR);
5683 5688          ASSERT(nm[0] != '.' || nm[1] != '\0');
5684 5689          ASSERT(*vpp == NULL);
5685 5690  
5686 5691          if (nm[0] == '.' && nm[1] == '.' && nm[2] == '\0') {
5687 5692                  isdotdot = 1;
5688 5693                  args.ctag = TAG_LOOKUP_PARENT;
5689 5694          } else {
5690 5695                  /*
5691 5696                   * If dvp were a stub, it should have triggered and caused
5692 5697                   * a mount for us to get this far.
5693 5698                   */
5694 5699                  ASSERT(!RP_ISSTUB(VTOR4(dvp)));
5695 5700  
5696 5701                  isdotdot = 0;
5697 5702                  args.ctag = TAG_LOOKUP;
5698 5703          }
5699 5704  
5700 5705          mi = VTOMI4(dvp);
5701 5706          recov_state.rs_flags = 0;
5702 5707          recov_state.rs_num_retry_despite_err = 0;
5703 5708  
5704 5709          nvp = NULL;
5705 5710  
5706 5711          /* Save the original mount point security information */
5707 5712          (void) save_mnt_secinfo(mi->mi_curr_serv);
5708 5713  
5709 5714  recov_retry:
5710 5715          e.error = nfs4_start_fop(mi, dvp, NULL, OH_LOOKUP,
5711 5716              &recov_state, NULL);
5712 5717          if (e.error) {
5713 5718                  (void) check_mnt_secinfo(mi->mi_curr_serv, nvp);
5714 5719                  return (e.error);
5715 5720          }
5716 5721  
5717 5722          argop = kmem_alloc(argoplist_size, KM_SLEEP);
5718 5723  
5719 5724          /* PUTFH SAVEFH LOOKUP GETFH GETATTR RESTOREFH NVERIFY GETATTR ACCESS */
5720 5725          args.array_len = 9;
5721 5726          args.array = argop;
5722 5727  
5723 5728          /* 0. putfh file */
5724 5729          argop[0].argop = OP_CPUTFH;
5725 5730          argop[0].nfs_argop4_u.opcputfh.sfh = VTOR4(dvp)->r_fh;
5726 5731  
5727 5732          /* 1. savefh for the nverify */
5728 5733          argop[1].argop = OP_SAVEFH;
5729 5734  
5730 5735          /* 2. lookup name */
5731 5736          if (isdotdot) {
5732 5737                  argop[2].argop = OP_LOOKUPP;
5733 5738          } else {
5734 5739                  argop[2].argop = OP_CLOOKUP;
5735 5740                  argop[2].nfs_argop4_u.opclookup.cname = nm;
5736 5741          }
5737 5742  
5738 5743          /* 3. resulting file handle */
5739 5744          argop[3].argop = OP_GETFH;
5740 5745  
5741 5746          /* 4. resulting file attributes */
5742 5747          argop[4].argop = OP_GETATTR;
5743 5748          argop[4].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
5744 5749          argop[4].nfs_argop4_u.opgetattr.mi = VTOMI4(dvp);
5745 5750  
5746 5751          /* 5. restorefh back the directory for the nverify */
5747 5752          argop[5].argop = OP_RESTOREFH;
5748 5753  
5749 5754          /* 6. nverify the change info */
5750 5755          argop[6].argop = OP_NVERIFY;
5751 5756          ver_fattr = &argop[6].nfs_argop4_u.opnverify.obj_attributes;
5752 5757          ver_fattr->attrmask = FATTR4_CHANGE_MASK;
5753 5758          ver_fattr->attrlist4 = (char *)&dchange;
5754 5759          ptr = (int32_t *)&dchange;
5755 5760          IXDR_PUT_HYPER(ptr, VTOR4(dvp)->r_change);
5756 5761          ver_fattr->attrlist4_len = sizeof (fattr4_change);
5757 5762  
5758 5763          /* 7. getattr directory */
5759 5764          argop[7].argop = OP_GETATTR;
5760 5765          argop[7].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
5761 5766          argop[7].nfs_argop4_u.opgetattr.mi = VTOMI4(dvp);
5762 5767  
5763 5768          /* 8. access directory */
5764 5769          argop[8].argop = OP_ACCESS;
5765 5770          argop[8].nfs_argop4_u.opaccess.access = ACCESS4_READ | ACCESS4_DELETE |
5766 5771              ACCESS4_MODIFY | ACCESS4_EXTEND | ACCESS4_LOOKUP;
5767 5772  
5768 5773          doqueue = 1;
5769 5774          t = gethrtime();
5770 5775  
5771 5776          rfs4call(VTOMI4(dvp), &args, &res, cr, &doqueue, 0, &e);
5772 5777  
5773 5778          if (!isdotdot && res.status == NFS4ERR_MOVED) {
5774 5779                  e.error = nfs4_setup_referral(dvp, nm, vpp, cr);
5775 5780                  if (e.error != 0 && *vpp != NULL)
5776 5781                          VN_RELE(*vpp);
5777 5782                  nfs4_end_fop(mi, dvp, NULL, OH_LOOKUP,
5778 5783                      &recov_state, FALSE);
5779 5784                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
5780 5785                  kmem_free(argop, argoplist_size);
5781 5786                  return (e.error);
5782 5787          }
5783 5788  
5784 5789          if (nfs4_needs_recovery(&e, FALSE, dvp->v_vfsp)) {
5785 5790                  /*
5786 5791                   * For WRONGSEC of a non-dotdot case, send secinfo directly
5787 5792                   * from this thread, do not go thru the recovery thread since
5788 5793                   * we need the nm information.
5789 5794                   *
5790 5795                   * Not doing dotdot case because there is no specification
5791 5796                   * for (PUTFH, SECINFO "..") yet.
5792 5797                   */
5793 5798                  if (!isdotdot && res.status == NFS4ERR_WRONGSEC) {
5794 5799                          if ((e.error = nfs4_secinfo_vnode_otw(dvp, nm, cr)))
5795 5800                                  nfs4_end_fop(mi, dvp, NULL, OH_LOOKUP,
5796 5801                                      &recov_state, FALSE);
5797 5802                          else
5798 5803                                  nfs4_end_fop(mi, dvp, NULL, OH_LOOKUP,
5799 5804                                      &recov_state, TRUE);
5800 5805                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
5801 5806                          kmem_free(argop, argoplist_size);
5802 5807                          if (!e.error)
5803 5808                                  goto recov_retry;
5804 5809                          (void) check_mnt_secinfo(mi->mi_curr_serv, nvp);
5805 5810                          return (e.error);
5806 5811                  }
5807 5812  
5808 5813                  if (nfs4_start_recovery(&e, mi, dvp, NULL, NULL, NULL,
5809 5814                      OP_LOOKUP, NULL, NULL, NULL) == FALSE) {
5810 5815                          nfs4_end_fop(mi, dvp, NULL, OH_LOOKUP,
5811 5816                              &recov_state, TRUE);
5812 5817  
5813 5818                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
5814 5819                          kmem_free(argop, argoplist_size);
5815 5820                          goto recov_retry;
5816 5821                  }
5817 5822          }
5818 5823  
5819 5824          nfs4_end_fop(mi, dvp, NULL, OH_LOOKUP, &recov_state, FALSE);
5820 5825  
5821 5826          if (e.error || res.array_len == 0) {
5822 5827                  /*
5823 5828                   * If e.error isn't set, then reply has no ops (or we couldn't
5824 5829                   * be here).  The only legal way to reply without an op array
5825 5830                   * is via NFS4ERR_MINOR_VERS_MISMATCH.  An ops array should
5826 5831                   * be in the reply for all other status values.
5827 5832                   *
5828 5833                   * For valid replies without an ops array, return ENOTSUP
5829 5834                   * (geterrno4 xlation of VERS_MISMATCH).  For illegal replies,
5830 5835                   * return EIO -- don't trust status.
5831 5836                   */
5832 5837                  if (e.error == 0)
5833 5838                          e.error = (res.status == NFS4ERR_MINOR_VERS_MISMATCH) ?
5834 5839                              ENOTSUP : EIO;
5835 5840  
5836 5841                  kmem_free(argop, argoplist_size);
5837 5842                  (void) check_mnt_secinfo(mi->mi_curr_serv, nvp);
5838 5843                  return (e.error);
5839 5844          }
5840 5845  
5841 5846          e.error = geterrno4(res.status);
5842 5847  
5843 5848          /*
5844 5849           * The PUTFH and SAVEFH may have failed.
5845 5850           */
5846 5851          if ((res.array[0].nfs_resop4_u.opputfh.status != NFS4_OK) ||
5847 5852              (res.array[1].nfs_resop4_u.opsavefh.status != NFS4_OK)) {
5848 5853                  nfs4_purge_stale_fh(e.error, dvp, cr);
5849 5854                  goto exit;
5850 5855          }
5851 5856  
5852 5857          /*
5853 5858           * Check if the file exists, if it does delay entering
5854 5859           * into the dnlc until after we update the directory
5855 5860           * attributes so we don't cause it to get purged immediately.
5856 5861           */
5857 5862          if (res.array[2].nfs_resop4_u.oplookup.status != NFS4_OK) {
5858 5863                  /*
5859 5864                   * The lookup failed, probably no entry
5860 5865                   */
5861 5866                  if (e.error == ENOENT && nfs4_lookup_neg_cache)
5862 5867                          dnlc_update(dvp, nm, DNLC_NO_VNODE);
5863 5868                  goto exit;
5864 5869          }
5865 5870  
5866 5871          if (res.array[3].nfs_resop4_u.opgetfh.status != NFS4_OK) {
5867 5872                  /*
5868 5873                   * The file exists but we can't get its fh for
5869 5874                   * some unknown reason. Error out to be safe.
5870 5875                   */
5871 5876                  goto exit;
5872 5877          }
5873 5878  
5874 5879          fhp = &res.array[3].nfs_resop4_u.opgetfh.object;
5875 5880          if (fhp->nfs_fh4_len == 0) {
5876 5881                  /*
5877 5882                   * The file exists but a bogus fh
5878 5883                   * some unknown reason.  Error out to be safe.
5879 5884                   */
5880 5885                  e.error = EIO;
5881 5886                  goto exit;
5882 5887          }
5883 5888          sfhp = sfh4_get(fhp, mi);
5884 5889  
5885 5890          if (res.array[4].nfs_resop4_u.opgetattr.status != NFS4_OK) {
5886 5891                  sfh4_rele(&sfhp);
5887 5892                  goto exit;
5888 5893          }
5889 5894          garp = &res.array[4].nfs_resop4_u.opgetattr.ga_res;
5890 5895  
5891 5896          /*
5892 5897           * The RESTOREFH may have failed
5893 5898           */
5894 5899          if (res.array[5].nfs_resop4_u.oprestorefh.status != NFS4_OK) {
5895 5900                  sfh4_rele(&sfhp);
5896 5901                  e.error = EIO;
5897 5902                  goto exit;
5898 5903          }
5899 5904  
5900 5905          if (res.array[6].nfs_resop4_u.opnverify.status != NFS4ERR_SAME) {
5901 5906                  /*
5902 5907                   * First make sure the NVERIFY failed as we expected,
5903 5908                   * if it didn't then be conservative and error out
5904 5909                   * as we can't trust the directory.
5905 5910                   */
5906 5911                  if (res.array[6].nfs_resop4_u.opnverify.status != NFS4_OK) {
5907 5912                          sfh4_rele(&sfhp);
5908 5913                          e.error = EIO;
5909 5914                          goto exit;
5910 5915                  }
5911 5916  
5912 5917                  /*
5913 5918                   * We know the NVERIFY "failed" so the directory has changed,
5914 5919                   * so we must:
5915 5920                   *      purge the caches (access and indirectly dnlc if needed)
5916 5921                   */
5917 5922                  nfs4_purge_caches(dvp, NFS4_NOPURGE_DNLC, cr, TRUE);
5918 5923  
5919 5924                  if (res.array[7].nfs_resop4_u.opgetattr.status != NFS4_OK) {
5920 5925                          sfh4_rele(&sfhp);
5921 5926                          goto exit;
5922 5927                  }
5923 5928                  nfs4_attr_cache(dvp,
5924 5929                      &res.array[7].nfs_resop4_u.opgetattr.ga_res,
5925 5930                      t, cr, FALSE, NULL);
5926 5931  
5927 5932                  if (res.array[8].nfs_resop4_u.opaccess.status != NFS4_OK) {
5928 5933                          nfs4_purge_stale_fh(e.error, dvp, cr);
5929 5934                          sfh4_rele(&sfhp);
5930 5935                          e.error = geterrno4(res.status);
5931 5936                          goto exit;
5932 5937                  }
5933 5938  
5934 5939                  /*
5935 5940                   * Now we know the directory is valid,
5936 5941                   * cache new directory access
5937 5942                   */
5938 5943                  nfs4_access_cache(drp,
5939 5944                      args.array[8].nfs_argop4_u.opaccess.access,
5940 5945                      res.array[8].nfs_resop4_u.opaccess.access, cr);
5941 5946  
5942 5947                  /*
5943 5948                   * recheck VEXEC access
5944 5949                   */
5945 5950                  cacc = nfs4_access_check(drp, ACCESS4_LOOKUP, cr);
5946 5951                  if (cacc != NFS4_ACCESS_ALLOWED) {
5947 5952                          /*
5948 5953                           * Directory permissions might have been revoked
5949 5954                           */
5950 5955                          if (cacc == NFS4_ACCESS_DENIED) {
5951 5956                                  sfh4_rele(&sfhp);
5952 5957                                  e.error = EACCES;
5953 5958                                  goto exit;
5954 5959                          }
5955 5960  
5956 5961                          /*
5957 5962                           * Somehow we must not have asked for enough
5958 5963                           * so try a singleton ACCESS should never happen
5959 5964                           */
5960 5965                          e.error = nfs4_access(dvp, VEXEC, 0, cr, NULL);
5961 5966                          if (e.error) {
5962 5967                                  sfh4_rele(&sfhp);
5963 5968                                  goto exit;
5964 5969                          }
5965 5970                  }
5966 5971  
5967 5972                  e.error = geterrno4(res.status);
5968 5973          } else {
5969 5974                  hrtime_t now;
5970 5975                  hrtime_t delta = 0;
5971 5976  
5972 5977                  e.error = 0;
5973 5978  
5974 5979                  /*
5975 5980                   * Because the NVERIFY "succeeded" we know that the
5976 5981                   * directory attributes are still valid
5977 5982                   * so update r_time_attr_inval
5978 5983                   */
5979 5984                  now = gethrtime();
5980 5985                  mutex_enter(&drp->r_statelock);
5981 5986                  if (!(mi->mi_flags & MI4_NOAC) && !(dvp->v_flag & VNOCACHE)) {
5982 5987                          delta = now - drp->r_time_attr_saved;
5983 5988                          if (delta < mi->mi_acdirmin)
5984 5989                                  delta = mi->mi_acdirmin;
5985 5990                          else if (delta > mi->mi_acdirmax)
5986 5991                                  delta = mi->mi_acdirmax;
5987 5992                  }
5988 5993                  drp->r_time_attr_inval = now + delta;
5989 5994                  mutex_exit(&drp->r_statelock);
5990 5995  
5991 5996                  /*
5992 5997                   * Even though we have a valid directory attr cache,
5993 5998                   * we may not have access.
5994 5999                   * This should almost always hit the cache.
5995 6000                   */
5996 6001                  e.error = nfs4_access(dvp, VEXEC, 0, cr, NULL);
5997 6002                  if (e.error) {
5998 6003                          sfh4_rele(&sfhp);
5999 6004                          goto exit;
6000 6005                  }
6001 6006          }
6002 6007  
6003 6008          /*
6004 6009           * Now we have successfully completed the lookup, if the
6005 6010           * directory has changed we now have the valid attributes.
6006 6011           * We also know we have directory access.
6007 6012           * Create the new rnode and insert it in the dnlc.
6008 6013           */
6009 6014          if (isdotdot) {
6010 6015                  e.error = nfs4_make_dotdot(sfhp, t, dvp, cr, &nvp, 1);
6011 6016                  if (e.error) {
6012 6017                          sfh4_rele(&sfhp);
6013 6018                          goto exit;
6014 6019                  }
6015 6020                  /*
6016 6021                   * XXX if nfs4_make_dotdot uses an existing rnode
6017 6022                   * XXX it doesn't update the attributes.
6018 6023                   * XXX for now just save them again to save an OTW
6019 6024                   */
6020 6025                  nfs4_attr_cache(nvp, garp, t, cr, FALSE, NULL);
6021 6026          } else {
6022 6027                  nvp = makenfs4node(sfhp, garp, dvp->v_vfsp, t, cr,
6023 6028                      dvp, fn_get(VTOSV(dvp)->sv_name, nm, sfhp));
6024 6029          }
6025 6030          sfh4_rele(&sfhp);
6026 6031  
6027 6032          nrp = VTOR4(nvp);
6028 6033          mutex_enter(&nrp->r_statev4_lock);
6029 6034          if (!nrp->created_v4) {
6030 6035                  mutex_exit(&nrp->r_statev4_lock);
6031 6036                  dnlc_update(dvp, nm, nvp);
6032 6037          } else
6033 6038                  mutex_exit(&nrp->r_statev4_lock);
6034 6039  
6035 6040          *vpp = nvp;
6036 6041  
6037 6042  exit:
6038 6043          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
6039 6044          kmem_free(argop, argoplist_size);
6040 6045          (void) check_mnt_secinfo(mi->mi_curr_serv, nvp);
6041 6046          return (e.error);
6042 6047  }
6043 6048  
6044 6049  #ifdef DEBUG
6045 6050  void
6046 6051  nfs4lookup_dump_compound(char *where, nfs_argop4 *argbase, int argcnt)
6047 6052  {
6048 6053          uint_t i, len;
6049 6054          zoneid_t zoneid = getzoneid();
6050 6055          char *s;
6051 6056  
6052 6057          zcmn_err(zoneid, CE_NOTE, "%s: dumping cmpd", where);
6053 6058          for (i = 0; i < argcnt; i++) {
6054 6059                  nfs_argop4 *op = &argbase[i];
6055 6060                  switch (op->argop) {
6056 6061                  case OP_CPUTFH:
6057 6062                  case OP_PUTFH:
6058 6063                          zcmn_err(zoneid, CE_NOTE, "\t op %d, putfh", i);
6059 6064                          break;
6060 6065                  case OP_PUTROOTFH:
6061 6066                          zcmn_err(zoneid, CE_NOTE, "\t op %d, putrootfh", i);
6062 6067                          break;
6063 6068                  case OP_CLOOKUP:
6064 6069                          s = op->nfs_argop4_u.opclookup.cname;
6065 6070                          zcmn_err(zoneid, CE_NOTE, "\t op %d, lookup %s", i, s);
6066 6071                          break;
6067 6072                  case OP_LOOKUP:
6068 6073                          s = utf8_to_str(&op->nfs_argop4_u.oplookup.objname,
6069 6074                              &len, NULL);
6070 6075                          zcmn_err(zoneid, CE_NOTE, "\t op %d, lookup %s", i, s);
6071 6076                          kmem_free(s, len);
6072 6077                          break;
6073 6078                  case OP_LOOKUPP:
6074 6079                          zcmn_err(zoneid, CE_NOTE, "\t op %d, lookupp ..", i);
6075 6080                          break;
6076 6081                  case OP_GETFH:
6077 6082                          zcmn_err(zoneid, CE_NOTE, "\t op %d, getfh", i);
6078 6083                          break;
6079 6084                  case OP_GETATTR:
6080 6085                          zcmn_err(zoneid, CE_NOTE, "\t op %d, getattr", i);
6081 6086                          break;
6082 6087                  case OP_OPENATTR:
6083 6088                          zcmn_err(zoneid, CE_NOTE, "\t op %d, openattr", i);
6084 6089                          break;
6085 6090                  default:
6086 6091                          zcmn_err(zoneid, CE_NOTE, "\t op %d, opcode %d", i,
6087 6092                              op->argop);
6088 6093                          break;
6089 6094                  }
6090 6095          }
6091 6096  }
6092 6097  #endif
6093 6098  
6094 6099  /*
6095 6100   * nfs4lookup_setup - constructs a multi-lookup compound request.
6096 6101   *
6097 6102   * Given the path "nm1/nm2/.../nmn", the following compound requests
6098 6103   * may be created:
6099 6104   *
6100 6105   * Note: Getfh is not be needed because filehandle attr is mandatory, but it
6101 6106   * is faster, for now.
6102 6107   *
6103 6108   * l4_getattrs indicates the type of compound requested.
6104 6109   *
6105 6110   * LKP4_NO_ATTRIBUTE - no attributes (used by secinfo):
6106 6111   *
6107 6112   *      compound { Put*fh; Lookup {nm1}; Lookup {nm2}; ...  Lookup {nmn} }
6108 6113   *
6109 6114   *   total number of ops is n + 1.
6110 6115   *
6111 6116   * LKP4_LAST_NAMED_ATTR - multi-component path for a named
6112 6117   *      attribute: create lookups plus one OPENATTR/GETFH/GETATTR
6113 6118   *      before the last component, and only get attributes
6114 6119   *      for the last component.  Note that the second-to-last
6115 6120   *      pathname component is XATTR_RPATH, which does NOT go
6116 6121   *      over-the-wire as a lookup.
6117 6122   *
6118 6123   *      compound { Put*fh; Lookup {nm1}; Lookup {nm2}; ... Lookup {nmn-2};
6119 6124   *              Openattr; Getfh; Getattr; Lookup {nmn}; Getfh; Getattr }
6120 6125   *
6121 6126   *   and total number of ops is n + 5.
6122 6127   *
6123 6128   * LKP4_LAST_ATTRDIR - multi-component path for the hidden named
6124 6129   *      attribute directory: create lookups plus an OPENATTR
6125 6130   *      replacing the last lookup.  Note that the last pathname
6126 6131   *      component is XATTR_RPATH, which does NOT go over-the-wire
6127 6132   *      as a lookup.
6128 6133   *
6129 6134   *      compound { Put*fh; Lookup {nm1}; Lookup {nm2}; ... Getfh; Getattr;
6130 6135   *              Openattr; Getfh; Getattr }
6131 6136   *
6132 6137   *   and total number of ops is n + 5.
6133 6138   *
6134 6139   * LKP4_ALL_ATTRIBUTES - create lookups and get attributes for intermediate
6135 6140   *      nodes too.
6136 6141   *
6137 6142   *      compound { Put*fh; Lookup {nm1}; Getfh; Getattr;
6138 6143   *              Lookup {nm2}; ...  Lookup {nmn}; Getfh; Getattr }
6139 6144   *
6140 6145   *   and total number of ops is 3*n + 1.
6141 6146   *
6142 6147   * All cases: returns the index in the arg array of the final LOOKUP op, or
6143 6148   * -1 if no LOOKUPs were used.
6144 6149   */
6145 6150  int
6146 6151  nfs4lookup_setup(char *nm, lookup4_param_t *lookupargp, int needgetfh)
6147 6152  {
6148 6153          enum lkp4_attr_setup l4_getattrs = lookupargp->l4_getattrs;
6149 6154          nfs_argop4 *argbase, *argop;
6150 6155          int arglen, argcnt;
6151 6156          int n = 1;      /* number of components */
6152 6157          int nga = 1;    /* number of Getattr's in request */
6153 6158          char c = '\0', *s, *p;
6154 6159          int lookup_idx = -1;
6155 6160          int argoplist_size;
6156 6161  
6157 6162          /* set lookuparg response result to 0 */
6158 6163          lookupargp->resp->status = NFS4_OK;
6159 6164  
6160 6165          /* skip leading "/" or "." e.g. ".//./" if there is */
6161 6166          for (; ; nm++) {
6162 6167                  if (*nm != '/' && *nm != '.')
6163 6168                          break;
6164 6169  
6165 6170                  /* ".." is counted as 1 component */
6166 6171                  if (*nm == '.' && *(nm + 1) != '/')
6167 6172                          break;
6168 6173          }
6169 6174  
6170 6175          /*
6171 6176           * Find n = number of components - nm must be null terminated
6172 6177           * Skip "." components.
6173 6178           */
6174 6179          if (*nm != '\0')
6175 6180                  for (n = 1, s = nm; *s != '\0'; s++) {
6176 6181                          if ((*s == '/') && (*(s + 1) != '/') &&
6177 6182                              (*(s + 1) != '\0') &&
6178 6183                              !(*(s + 1) == '.' && (*(s + 2) == '/' ||
6179 6184                              *(s + 2) == '\0')))
6180 6185                                  n++;
6181 6186                  }
6182 6187          else
6183 6188                  n = 0;
6184 6189  
6185 6190          /*
6186 6191           * nga is number of components that need Getfh+Getattr
6187 6192           */
6188 6193          switch (l4_getattrs) {
6189 6194          case LKP4_NO_ATTRIBUTES:
6190 6195                  nga = 0;
6191 6196                  break;
6192 6197          case LKP4_ALL_ATTRIBUTES:
6193 6198                  nga = n;
6194 6199                  /*
6195 6200                   * Always have at least 1 getfh, getattr pair
6196 6201                   */
6197 6202                  if (nga == 0)
6198 6203                          nga++;
6199 6204                  break;
6200 6205          case LKP4_LAST_ATTRDIR:
6201 6206          case LKP4_LAST_NAMED_ATTR:
6202 6207                  nga = n+1;
6203 6208                  break;
6204 6209          }
6205 6210  
6206 6211          /*
6207 6212           * If change to use the filehandle attr instead of getfh
6208 6213           * the following line can be deleted.
6209 6214           */
6210 6215          nga *= 2;
6211 6216  
6212 6217          /*
6213 6218           * calculate number of ops in request as
6214 6219           * header + trailer + lookups + getattrs
6215 6220           */
6216 6221          arglen = lookupargp->header_len + lookupargp->trailer_len + n + nga;
6217 6222  
6218 6223          argoplist_size = arglen * sizeof (nfs_argop4);
6219 6224          argop = argbase = kmem_alloc(argoplist_size, KM_SLEEP);
6220 6225          lookupargp->argsp->array = argop;
6221 6226  
6222 6227          argcnt = lookupargp->header_len;
6223 6228          argop += argcnt;
6224 6229  
6225 6230          /*
6226 6231           * loop and create a lookup op and possibly getattr/getfh for
6227 6232           * each component. Skip "." components.
6228 6233           */
6229 6234          for (s = nm; *s != '\0'; s = p) {
6230 6235                  /*
6231 6236                   * Set up a pathname struct for each component if needed
6232 6237                   */
6233 6238                  while (*s == '/')
6234 6239                          s++;
6235 6240                  if (*s == '\0')
6236 6241                          break;
6237 6242  
6238 6243                  for (p = s; (*p != '/') && (*p != '\0'); p++)
6239 6244                          ;
6240 6245                  c = *p;
6241 6246                  *p = '\0';
6242 6247  
6243 6248                  if (s[0] == '.' && s[1] == '\0') {
6244 6249                          *p = c;
6245 6250                          continue;
6246 6251                  }
6247 6252                  if (l4_getattrs == LKP4_LAST_ATTRDIR &&
6248 6253                      strcmp(s, XATTR_RPATH) == 0) {
6249 6254                          /* getfh XXX may not be needed in future */
6250 6255                          argop->argop = OP_GETFH;
6251 6256                          argop++;
6252 6257                          argcnt++;
6253 6258  
6254 6259                          /* getattr */
6255 6260                          argop->argop = OP_GETATTR;
6256 6261                          argop->nfs_argop4_u.opgetattr.attr_request =
6257 6262                              lookupargp->ga_bits;
6258 6263                          argop->nfs_argop4_u.opgetattr.mi =
6259 6264                              lookupargp->mi;
6260 6265                          argop++;
6261 6266                          argcnt++;
6262 6267  
6263 6268                          /* openattr */
6264 6269                          argop->argop = OP_OPENATTR;
6265 6270                  } else if (l4_getattrs == LKP4_LAST_NAMED_ATTR &&
6266 6271                      strcmp(s, XATTR_RPATH) == 0) {
6267 6272                          /* openattr */
6268 6273                          argop->argop = OP_OPENATTR;
6269 6274                          argop++;
6270 6275                          argcnt++;
6271 6276  
6272 6277                          /* getfh XXX may not be needed in future */
6273 6278                          argop->argop = OP_GETFH;
6274 6279                          argop++;
6275 6280                          argcnt++;
6276 6281  
6277 6282                          /* getattr */
6278 6283                          argop->argop = OP_GETATTR;
6279 6284                          argop->nfs_argop4_u.opgetattr.attr_request =
6280 6285                              lookupargp->ga_bits;
6281 6286                          argop->nfs_argop4_u.opgetattr.mi =
6282 6287                              lookupargp->mi;
6283 6288                          argop++;
6284 6289                          argcnt++;
6285 6290                          *p = c;
6286 6291                          continue;
6287 6292                  } else if (s[0] == '.' && s[1] == '.' && s[2] == '\0') {
6288 6293                          /* lookupp */
6289 6294                          argop->argop = OP_LOOKUPP;
6290 6295                  } else {
6291 6296                          /* lookup */
6292 6297                          argop->argop = OP_LOOKUP;
6293 6298                          (void) str_to_utf8(s,
6294 6299                              &argop->nfs_argop4_u.oplookup.objname);
6295 6300                  }
6296 6301                  lookup_idx = argcnt;
6297 6302                  argop++;
6298 6303                  argcnt++;
6299 6304  
6300 6305                  *p = c;
6301 6306  
6302 6307                  if (l4_getattrs == LKP4_ALL_ATTRIBUTES) {
6303 6308                          /* getfh XXX may not be needed in future */
6304 6309                          argop->argop = OP_GETFH;
6305 6310                          argop++;
6306 6311                          argcnt++;
6307 6312  
6308 6313                          /* getattr */
6309 6314                          argop->argop = OP_GETATTR;
6310 6315                          argop->nfs_argop4_u.opgetattr.attr_request =
6311 6316                              lookupargp->ga_bits;
6312 6317                          argop->nfs_argop4_u.opgetattr.mi =
6313 6318                              lookupargp->mi;
6314 6319                          argop++;
6315 6320                          argcnt++;
6316 6321                  }
6317 6322          }
6318 6323  
6319 6324          if ((l4_getattrs != LKP4_NO_ATTRIBUTES) &&
6320 6325              ((l4_getattrs != LKP4_ALL_ATTRIBUTES) || (lookup_idx < 0))) {
6321 6326                  if (needgetfh) {
6322 6327                          /* stick in a post-lookup getfh */
6323 6328                          argop->argop = OP_GETFH;
6324 6329                          argcnt++;
6325 6330                          argop++;
6326 6331                  }
6327 6332                  /* post-lookup getattr */
6328 6333                  argop->argop = OP_GETATTR;
6329 6334                  argop->nfs_argop4_u.opgetattr.attr_request =
6330 6335                      lookupargp->ga_bits;
6331 6336                  argop->nfs_argop4_u.opgetattr.mi = lookupargp->mi;
6332 6337                  argcnt++;
6333 6338          }
6334 6339          argcnt += lookupargp->trailer_len;      /* actual op count */
6335 6340          lookupargp->argsp->array_len = argcnt;
6336 6341          lookupargp->arglen = arglen;
6337 6342  
6338 6343  #ifdef DEBUG
6339 6344          if (nfs4_client_lookup_debug)
6340 6345                  nfs4lookup_dump_compound("nfs4lookup_setup", argbase, argcnt);
6341 6346  #endif
6342 6347  
6343 6348          return (lookup_idx);
6344 6349  }
6345 6350  
6346 6351  static int
6347 6352  nfs4openattr(vnode_t *dvp, vnode_t **avp, int cflag, cred_t *cr)
6348 6353  {
6349 6354          COMPOUND4args_clnt      args;
6350 6355          COMPOUND4res_clnt       res;
6351 6356          GETFH4res       *gf_res = NULL;
6352 6357          nfs_argop4      argop[4];
6353 6358          nfs_resop4      *resop = NULL;
6354 6359          nfs4_sharedfh_t *sfhp;
6355 6360          hrtime_t t;
6356 6361          nfs4_error_t    e;
6357 6362  
6358 6363          rnode4_t        *drp;
6359 6364          int             doqueue = 1;
6360 6365          vnode_t         *vp;
6361 6366          int             needrecov = 0;
6362 6367          nfs4_recov_state_t recov_state;
6363 6368  
6364 6369          ASSERT(nfs_zone() == VTOMI4(dvp)->mi_zone);
6365 6370  
6366 6371          *avp = NULL;
6367 6372          recov_state.rs_flags = 0;
6368 6373          recov_state.rs_num_retry_despite_err = 0;
6369 6374  
6370 6375  recov_retry:
6371 6376          /* COMPOUND: putfh, openattr, getfh, getattr */
6372 6377          args.array_len = 4;
6373 6378          args.array = argop;
6374 6379          args.ctag = TAG_OPENATTR;
6375 6380  
6376 6381          e.error = nfs4_start_op(VTOMI4(dvp), dvp, NULL, &recov_state);
6377 6382          if (e.error)
6378 6383                  return (e.error);
6379 6384  
6380 6385          drp = VTOR4(dvp);
6381 6386  
6382 6387          /* putfh */
6383 6388          argop[0].argop = OP_CPUTFH;
6384 6389          argop[0].nfs_argop4_u.opcputfh.sfh = drp->r_fh;
6385 6390  
6386 6391          /* openattr */
6387 6392          argop[1].argop = OP_OPENATTR;
6388 6393          argop[1].nfs_argop4_u.opopenattr.createdir = (cflag ? TRUE : FALSE);
6389 6394  
6390 6395          /* getfh */
6391 6396          argop[2].argop = OP_GETFH;
6392 6397  
6393 6398          /* getattr */
6394 6399          argop[3].argop = OP_GETATTR;
6395 6400          argop[3].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
6396 6401          argop[3].nfs_argop4_u.opgetattr.mi = VTOMI4(dvp);
6397 6402  
6398 6403          NFS4_DEBUG(nfs4_client_call_debug, (CE_NOTE,
6399 6404              "nfs4openattr: %s call, drp %s", needrecov ? "recov" : "first",
6400 6405              rnode4info(drp)));
6401 6406  
6402 6407          t = gethrtime();
6403 6408  
6404 6409          rfs4call(VTOMI4(dvp), &args, &res, cr, &doqueue, 0, &e);
6405 6410  
6406 6411          needrecov = nfs4_needs_recovery(&e, FALSE, dvp->v_vfsp);
6407 6412          if (needrecov) {
6408 6413                  bool_t abort;
6409 6414  
6410 6415                  NFS4_DEBUG(nfs4_client_recov_debug, (CE_NOTE,
6411 6416                      "nfs4openattr: initiating recovery\n"));
6412 6417  
6413 6418                  abort = nfs4_start_recovery(&e,
6414 6419                      VTOMI4(dvp), dvp, NULL, NULL, NULL,
6415 6420                      OP_OPENATTR, NULL, NULL, NULL);
6416 6421                  nfs4_end_op(VTOMI4(dvp), dvp, NULL, &recov_state, needrecov);
6417 6422                  if (!e.error) {
6418 6423                          e.error = geterrno4(res.status);
6419 6424                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
6420 6425                  }
6421 6426                  if (abort == FALSE)
6422 6427                          goto recov_retry;
6423 6428                  return (e.error);
6424 6429          }
6425 6430  
6426 6431          if (e.error) {
6427 6432                  nfs4_end_op(VTOMI4(dvp), dvp, NULL, &recov_state, needrecov);
6428 6433                  return (e.error);
6429 6434          }
6430 6435  
6431 6436          if (res.status) {
6432 6437                  /*
6433 6438                   * If OTW errro is NOTSUPP, then it should be
6434 6439                   * translated to EINVAL.  All Solaris file system
6435 6440                   * implementations return EINVAL to the syscall layer
6436 6441                   * when the attrdir cannot be created due to an
6437 6442                   * implementation restriction or noxattr mount option.
6438 6443                   */
6439 6444                  if (res.status == NFS4ERR_NOTSUPP) {
6440 6445                          mutex_enter(&drp->r_statelock);
6441 6446                          if (drp->r_xattr_dir)
6442 6447                                  VN_RELE(drp->r_xattr_dir);
6443 6448                          VN_HOLD(NFS4_XATTR_DIR_NOTSUPP);
6444 6449                          drp->r_xattr_dir = NFS4_XATTR_DIR_NOTSUPP;
6445 6450                          mutex_exit(&drp->r_statelock);
6446 6451  
6447 6452                          e.error = EINVAL;
6448 6453                  } else {
6449 6454                          e.error = geterrno4(res.status);
6450 6455                  }
6451 6456  
6452 6457                  if (e.error) {
6453 6458                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
6454 6459                          nfs4_end_op(VTOMI4(dvp), dvp, NULL, &recov_state,
6455 6460                              needrecov);
6456 6461                          return (e.error);
6457 6462                  }
6458 6463          }
6459 6464  
6460 6465          resop = &res.array[0];  /* putfh res */
6461 6466          ASSERT(resop->nfs_resop4_u.opgetfh.status == NFS4_OK);
6462 6467  
6463 6468          resop = &res.array[1];  /* openattr res */
6464 6469          ASSERT(resop->nfs_resop4_u.opopenattr.status == NFS4_OK);
6465 6470  
6466 6471          resop = &res.array[2];  /* getfh res */
6467 6472          gf_res = &resop->nfs_resop4_u.opgetfh;
6468 6473          if (gf_res->object.nfs_fh4_len == 0) {
6469 6474                  *avp = NULL;
6470 6475                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
6471 6476                  nfs4_end_op(VTOMI4(dvp), dvp, NULL, &recov_state, needrecov);
6472 6477                  return (ENOENT);
6473 6478          }
6474 6479  
6475 6480          sfhp = sfh4_get(&gf_res->object, VTOMI4(dvp));
6476 6481          vp = makenfs4node(sfhp, &res.array[3].nfs_resop4_u.opgetattr.ga_res,
6477 6482              dvp->v_vfsp, t, cr, dvp,
6478 6483              fn_get(VTOSV(dvp)->sv_name, XATTR_RPATH, sfhp));
6479 6484          sfh4_rele(&sfhp);
6480 6485  
6481 6486          if (e.error)
6482 6487                  PURGE_ATTRCACHE4(vp);
6483 6488  
6484 6489          mutex_enter(&vp->v_lock);
6485 6490          vp->v_flag |= V_XATTRDIR;
6486 6491          mutex_exit(&vp->v_lock);
6487 6492  
6488 6493          *avp = vp;
6489 6494  
6490 6495          mutex_enter(&drp->r_statelock);
6491 6496          if (drp->r_xattr_dir)
6492 6497                  VN_RELE(drp->r_xattr_dir);
6493 6498          VN_HOLD(vp);
6494 6499          drp->r_xattr_dir = vp;
6495 6500  
6496 6501          /*
6497 6502           * Invalidate pathconf4 cache because r_xattr_dir is no longer
6498 6503           * NULL.  xattrs could be created at any time, and we have no
6499 6504           * way to update pc4_xattr_exists in the base object if/when
6500 6505           * it happens.
6501 6506           */
6502 6507          drp->r_pathconf.pc4_xattr_valid = 0;
6503 6508  
6504 6509          mutex_exit(&drp->r_statelock);
6505 6510  
6506 6511          nfs4_end_op(VTOMI4(dvp), dvp, NULL, &recov_state, needrecov);
6507 6512  
6508 6513          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
6509 6514  
6510 6515          return (0);
6511 6516  }
6512 6517  
6513 6518  /* ARGSUSED */
6514 6519  static int
6515 6520  nfs4_create(vnode_t *dvp, char *nm, struct vattr *va, enum vcexcl exclusive,
6516 6521      int mode, vnode_t **vpp, cred_t *cr, int flags, caller_context_t *ct,
6517 6522      vsecattr_t *vsecp)
6518 6523  {
6519 6524          int error;
6520 6525          vnode_t *vp = NULL;
6521 6526          rnode4_t *rp;
6522 6527          struct vattr vattr;
6523 6528          rnode4_t *drp;
6524 6529          vnode_t *tempvp;
6525 6530          enum createmode4 createmode;
6526 6531          bool_t must_trunc = FALSE;
6527 6532          int     truncating = 0;
6528 6533  
6529 6534          if (nfs_zone() != VTOMI4(dvp)->mi_zone)
6530 6535                  return (EPERM);
6531 6536          if (exclusive == EXCL && (dvp->v_flag & V_XATTRDIR)) {
6532 6537                  return (EINVAL);
6533 6538          }
6534 6539  
6535 6540          /* . and .. have special meaning in the protocol, reject them. */
6536 6541  
6537 6542          if (nm[0] == '.' && (nm[1] == '\0' || (nm[1] == '.' && nm[2] == '\0')))
6538 6543                  return (EISDIR);
6539 6544  
6540 6545          drp = VTOR4(dvp);
6541 6546  
6542 6547          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR4(dvp)))
6543 6548                  return (EINTR);
6544 6549  
6545 6550  top:
6546 6551          /*
6547 6552           * We make a copy of the attributes because the caller does not
6548 6553           * expect us to change what va points to.
6549 6554           */
6550 6555          vattr = *va;
6551 6556  
6552 6557          /*
6553 6558           * If the pathname is "", then dvp is the root vnode of
6554 6559           * a remote file mounted over a local directory.
6555 6560           * All that needs to be done is access
6556 6561           * checking and truncation.  Note that we avoid doing
6557 6562           * open w/ create because the parent directory might
6558 6563           * be in pseudo-fs and the open would fail.
6559 6564           */
6560 6565          if (*nm == '\0') {
6561 6566                  error = 0;
6562 6567                  VN_HOLD(dvp);
6563 6568                  vp = dvp;
6564 6569                  must_trunc = TRUE;
6565 6570          } else {
6566 6571                  /*
6567 6572                   * We need to go over the wire, just to be sure whether the
6568 6573                   * file exists or not.  Using the DNLC can be dangerous in
6569 6574                   * this case when making a decision regarding existence.
6570 6575                   */
6571 6576                  error = nfs4lookup(dvp, nm, &vp, cr, 1);
6572 6577          }
6573 6578  
6574 6579          if (exclusive)
6575 6580                  createmode = EXCLUSIVE4;
6576 6581          else
6577 6582                  createmode = GUARDED4;
6578 6583  
6579 6584          /*
6580 6585           * error would be set if the file does not exist on the
6581 6586           * server, so lets go create it.
6582 6587           */
6583 6588          if (error) {
6584 6589                  goto create_otw;
6585 6590          }
6586 6591  
6587 6592          /*
6588 6593           * File does exist on the server
6589 6594           */
6590 6595          if (exclusive == EXCL)
6591 6596                  error = EEXIST;
6592 6597          else if (vp->v_type == VDIR && (mode & VWRITE))
6593 6598                  error = EISDIR;
6594 6599          else {
6595 6600                  /*
6596 6601                   * If vnode is a device, create special vnode.
6597 6602                   */
6598 6603                  if (ISVDEV(vp->v_type)) {
6599 6604                          tempvp = vp;
6600 6605                          vp = specvp(vp, vp->v_rdev, vp->v_type, cr);
6601 6606                          VN_RELE(tempvp);
6602 6607                  }
6603 6608                  if (!(error = VOP_ACCESS(vp, mode, 0, cr, ct))) {
6604 6609                          if ((vattr.va_mask & AT_SIZE) &&
6605 6610                              vp->v_type == VREG) {
6606 6611                                  rp = VTOR4(vp);
6607 6612                                  /*
6608 6613                                   * Check here for large file handled
6609 6614                                   * by LF-unaware process (as
6610 6615                                   * ufs_create() does)
6611 6616                                   */
6612 6617                                  if (!(flags & FOFFMAX)) {
6613 6618                                          mutex_enter(&rp->r_statelock);
6614 6619                                          if (rp->r_size > MAXOFF32_T)
6615 6620                                                  error = EOVERFLOW;
6616 6621                                          mutex_exit(&rp->r_statelock);
6617 6622                                  }
6618 6623  
6619 6624                                  /* if error is set then we need to return */
6620 6625                                  if (error) {
6621 6626                                          nfs_rw_exit(&drp->r_rwlock);
6622 6627                                          VN_RELE(vp);
6623 6628                                          return (error);
6624 6629                                  }
6625 6630  
6626 6631                                  if (must_trunc) {
6627 6632                                          vattr.va_mask = AT_SIZE;
6628 6633                                          error = nfs4setattr(vp, &vattr, 0, cr,
6629 6634                                              NULL);
6630 6635                                  } else {
6631 6636                                  /*
6632 6637                                   * we know we have a regular file that already
6633 6638                                   * exists and we may end up truncating the file
6634 6639                                   * as a result of the open_otw, so flush out
6635 6640                                   * any dirty pages for this file first.
6636 6641                                   */
6637 6642                                          if (nfs4_has_pages(vp) &&
6638 6643                                              ((rp->r_flags & R4DIRTY) ||
6639 6644                                              rp->r_count > 0 ||
6640 6645                                              rp->r_mapcnt > 0)) {
6641 6646                                                  error = nfs4_putpage(vp,
6642 6647                                                      (offset_t)0, 0, 0, cr, ct);
6643 6648                                                  if (error && (error == ENOSPC ||
6644 6649                                                      error == EDQUOT)) {
6645 6650                                                          mutex_enter(
6646 6651                                                              &rp->r_statelock);
6647 6652                                                          if (!rp->r_error)
6648 6653                                                                  rp->r_error =
6649 6654                                                                      error;
6650 6655                                                          mutex_exit(
6651 6656                                                              &rp->r_statelock);
6652 6657                                                  }
6653 6658                                          }
6654 6659                                          vattr.va_mask = (AT_SIZE |
6655 6660                                              AT_TYPE | AT_MODE);
6656 6661                                          vattr.va_type = VREG;
6657 6662                                          createmode = UNCHECKED4;
6658 6663                                          truncating = 1;
6659 6664                                          goto create_otw;
6660 6665                                  }
6661 6666                          }
6662 6667                  }
6663 6668          }
6664 6669          nfs_rw_exit(&drp->r_rwlock);
6665 6670          if (error) {
6666 6671                  VN_RELE(vp);
6667 6672          } else {
6668 6673                  vnode_t *tvp;
6669 6674                  rnode4_t *trp;
6670 6675                  tvp = vp;
6671 6676                  if (vp->v_type == VREG) {
6672 6677                          trp = VTOR4(vp);
6673 6678                          if (IS_SHADOW(vp, trp))
6674 6679                                  tvp = RTOV4(trp);
6675 6680                  }
6676 6681  
6677 6682                  if (must_trunc) {
6678 6683                          /*
6679 6684                           * existing file got truncated, notify.
6680 6685                           */
6681 6686                          vnevent_create(tvp, ct);
6682 6687                  }
6683 6688  
6684 6689                  *vpp = vp;
6685 6690          }
6686 6691          return (error);
6687 6692  
6688 6693  create_otw:
6689 6694          dnlc_remove(dvp, nm);
6690 6695  
6691 6696          ASSERT(vattr.va_mask & AT_TYPE);
6692 6697  
6693 6698          /*
6694 6699           * If not a regular file let nfs4mknod() handle it.
6695 6700           */
6696 6701          if (vattr.va_type != VREG) {
6697 6702                  error = nfs4mknod(dvp, nm, &vattr, exclusive, mode, vpp, cr);
6698 6703                  nfs_rw_exit(&drp->r_rwlock);
6699 6704                  return (error);
6700 6705          }
6701 6706  
6702 6707          /*
6703 6708           * It _is_ a regular file.
6704 6709           */
6705 6710          ASSERT(vattr.va_mask & AT_MODE);
6706 6711          if (MANDMODE(vattr.va_mode)) {
6707 6712                  nfs_rw_exit(&drp->r_rwlock);
6708 6713                  return (EACCES);
6709 6714          }
6710 6715  
6711 6716          /*
6712 6717           * If this happens to be a mknod of a regular file, then flags will
6713 6718           * have neither FREAD or FWRITE.  However, we must set at least one
6714 6719           * for the call to nfs4open_otw.  If it's open(O_CREAT) driving
6715 6720           * nfs4_create, then either FREAD, FWRITE, or FRDWR has already been
6716 6721           * set (based on openmode specified by app).
6717 6722           */
6718 6723          if ((flags & (FREAD|FWRITE)) == 0)
6719 6724                  flags |= (FREAD|FWRITE);
6720 6725  
6721 6726          error = nfs4open_otw(dvp, nm, &vattr, vpp, cr, 1, flags, createmode, 0);
6722 6727  
6723 6728          if (vp != NULL) {
6724 6729                  /* if create was successful, throw away the file's pages */
6725 6730                  if (!error && (vattr.va_mask & AT_SIZE))
6726 6731                          nfs4_invalidate_pages(vp, (vattr.va_size & PAGEMASK),
6727 6732                              cr);
6728 6733                  /* release the lookup hold */
6729 6734                  VN_RELE(vp);
6730 6735                  vp = NULL;
6731 6736          }
6732 6737  
6733 6738          /*
6734 6739           * validate that we opened a regular file. This handles a misbehaving
6735 6740           * server that returns an incorrect FH.
6736 6741           */
6737 6742          if ((error == 0) && *vpp && (*vpp)->v_type != VREG) {
6738 6743                  error = EISDIR;
6739 6744                  VN_RELE(*vpp);
6740 6745          }
6741 6746  
6742 6747          /*
6743 6748           * If this is not an exclusive create, then the CREATE
6744 6749           * request will be made with the GUARDED mode set.  This
6745 6750           * means that the server will return EEXIST if the file
6746 6751           * exists.  The file could exist because of a retransmitted
6747 6752           * request.  In this case, we recover by starting over and
6748 6753           * checking to see whether the file exists.  This second
6749 6754           * time through it should and a CREATE request will not be
6750 6755           * sent.
6751 6756           *
6752 6757           * This handles the problem of a dangling CREATE request
6753 6758           * which contains attributes which indicate that the file
6754 6759           * should be truncated.  This retransmitted request could
6755 6760           * possibly truncate valid data in the file if not caught
6756 6761           * by the duplicate request mechanism on the server or if
6757 6762           * not caught by other means.  The scenario is:
6758 6763           *
6759 6764           * Client transmits CREATE request with size = 0
6760 6765           * Client times out, retransmits request.
6761 6766           * Response to the first request arrives from the server
6762 6767           *  and the client proceeds on.
6763 6768           * Client writes data to the file.
6764 6769           * The server now processes retransmitted CREATE request
6765 6770           *  and truncates file.
6766 6771           *
6767 6772           * The use of the GUARDED CREATE request prevents this from
6768 6773           * happening because the retransmitted CREATE would fail
6769 6774           * with EEXIST and would not truncate the file.
6770 6775           */
6771 6776          if (error == EEXIST && exclusive == NONEXCL) {
6772 6777  #ifdef DEBUG
6773 6778                  nfs4_create_misses++;
6774 6779  #endif
6775 6780                  goto top;
6776 6781          }
6777 6782          nfs_rw_exit(&drp->r_rwlock);
6778 6783          if (truncating && !error && *vpp) {
6779 6784                  vnode_t *tvp;
6780 6785                  rnode4_t *trp;
6781 6786                  /*
6782 6787                   * existing file got truncated, notify.
6783 6788                   */
6784 6789                  tvp = *vpp;
6785 6790                  trp = VTOR4(tvp);
6786 6791                  if (IS_SHADOW(tvp, trp))
6787 6792                          tvp = RTOV4(trp);
6788 6793                  vnevent_create(tvp, ct);
6789 6794          }
6790 6795          return (error);
6791 6796  }
6792 6797  
6793 6798  /*
6794 6799   * Create compound (for mkdir, mknod, symlink):
6795 6800   * { Putfh <dfh>; Create; Getfh; Getattr }
6796 6801   * It's okay if setattr failed to set gid - this is not considered
6797 6802   * an error, but purge attrs in that case.
6798 6803   */
6799 6804  static int
6800 6805  call_nfs4_create_req(vnode_t *dvp, char *nm, void *data, struct vattr *va,
6801 6806      vnode_t **vpp, cred_t *cr, nfs_ftype4 type)
6802 6807  {
6803 6808          int need_end_op = FALSE;
6804 6809          COMPOUND4args_clnt args;
6805 6810          COMPOUND4res_clnt res, *resp = NULL;
6806 6811          nfs_argop4 *argop;
6807 6812          nfs_resop4 *resop;
6808 6813          int doqueue;
6809 6814          mntinfo4_t *mi;
6810 6815          rnode4_t *drp = VTOR4(dvp);
6811 6816          change_info4 *cinfo;
6812 6817          GETFH4res *gf_res;
6813 6818          struct vattr vattr;
6814 6819          vnode_t *vp;
6815 6820          fattr4 *crattr;
6816 6821          bool_t needrecov = FALSE;
6817 6822          nfs4_recov_state_t recov_state;
6818 6823          nfs4_sharedfh_t *sfhp = NULL;
6819 6824          hrtime_t t;
6820 6825          nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS };
6821 6826          int numops, argoplist_size, setgid_flag, idx_create, idx_fattr;
6822 6827          dirattr_info_t dinfo, *dinfop;
6823 6828          servinfo4_t *svp;
6824 6829          bitmap4 supp_attrs;
6825 6830  
6826 6831          ASSERT(type == NF4DIR || type == NF4LNK || type == NF4BLK ||
6827 6832              type == NF4CHR || type == NF4SOCK || type == NF4FIFO);
6828 6833  
6829 6834          mi = VTOMI4(dvp);
6830 6835  
6831 6836          /*
6832 6837           * Make sure we properly deal with setting the right gid
6833 6838           * on a new directory to reflect the parent's setgid bit
6834 6839           */
6835 6840          setgid_flag = 0;
6836 6841          if (type == NF4DIR) {
6837 6842                  struct vattr dva;
6838 6843  
6839 6844                  va->va_mode &= ~VSGID;
6840 6845                  dva.va_mask = AT_MODE | AT_GID;
6841 6846                  if (VOP_GETATTR(dvp, &dva, 0, cr, NULL) == 0) {
6842 6847  
6843 6848                          /*
6844 6849                           * If the parent's directory has the setgid bit set
6845 6850                           * _and_ the client was able to get a valid mapping
6846 6851                           * for the parent dir's owner_group, we want to
6847 6852                           * append NVERIFY(owner_group == dva.va_gid) and
6848 6853                           * SETTATTR to the CREATE compound.
6849 6854                           */
6850 6855                          if (mi->mi_flags & MI4_GRPID || dva.va_mode & VSGID) {
6851 6856                                  setgid_flag = 1;
6852 6857                                  va->va_mode |= VSGID;
6853 6858                                  if (dva.va_gid != GID_NOBODY) {
6854 6859                                          va->va_mask |= AT_GID;
6855 6860                                          va->va_gid = dva.va_gid;
6856 6861                                  }
6857 6862                          }
6858 6863                  }
6859 6864          }
6860 6865  
6861 6866          /*
6862 6867           * Create ops:
6863 6868           *      0:putfh(dir) 1:savefh(dir) 2:create 3:getfh(new) 4:getattr(new)
6864 6869           *      5:restorefh(dir) 6:getattr(dir)
6865 6870           *
6866 6871           * if (setgid)
6867 6872           *      0:putfh(dir) 1:create 2:getfh(new) 3:getattr(new)
6868 6873           *      4:savefh(new) 5:putfh(dir) 6:getattr(dir) 7:restorefh(new)
6869 6874           *      8:nverify 9:setattr
6870 6875           */
6871 6876          if (setgid_flag) {
6872 6877                  numops = 10;
6873 6878                  idx_create = 1;
6874 6879                  idx_fattr = 3;
6875 6880          } else {
6876 6881                  numops = 7;
6877 6882                  idx_create = 2;
6878 6883                  idx_fattr = 4;
6879 6884          }
6880 6885  
6881 6886          ASSERT(nfs_zone() == mi->mi_zone);
6882 6887          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR4(dvp))) {
6883 6888                  return (EINTR);
6884 6889          }
6885 6890          recov_state.rs_flags = 0;
6886 6891          recov_state.rs_num_retry_despite_err = 0;
6887 6892  
6888 6893          argoplist_size = numops * sizeof (nfs_argop4);
6889 6894          argop = kmem_alloc(argoplist_size, KM_SLEEP);
6890 6895  
6891 6896  recov_retry:
6892 6897          if (type == NF4LNK)
6893 6898                  args.ctag = TAG_SYMLINK;
6894 6899          else if (type == NF4DIR)
6895 6900                  args.ctag = TAG_MKDIR;
6896 6901          else
6897 6902                  args.ctag = TAG_MKNOD;
6898 6903  
6899 6904          args.array_len = numops;
6900 6905          args.array = argop;
6901 6906  
6902 6907          if (e.error = nfs4_start_op(mi, dvp, NULL, &recov_state)) {
6903 6908                  nfs_rw_exit(&drp->r_rwlock);
6904 6909                  kmem_free(argop, argoplist_size);
6905 6910                  return (e.error);
6906 6911          }
6907 6912          need_end_op = TRUE;
6908 6913  
6909 6914  
6910 6915          /* 0: putfh directory */
6911 6916          argop[0].argop = OP_CPUTFH;
6912 6917          argop[0].nfs_argop4_u.opcputfh.sfh = drp->r_fh;
6913 6918  
6914 6919          /* 1/2: Create object */
6915 6920          argop[idx_create].argop = OP_CCREATE;
6916 6921          argop[idx_create].nfs_argop4_u.opccreate.cname = nm;
6917 6922          argop[idx_create].nfs_argop4_u.opccreate.type = type;
6918 6923          if (type == NF4LNK) {
6919 6924                  /*
6920 6925                   * symlink, treat name as data
6921 6926                   */
6922 6927                  ASSERT(data != NULL);
6923 6928                  argop[idx_create].nfs_argop4_u.opccreate.ftype4_u.clinkdata =
6924 6929                      (char *)data;
6925 6930          }
6926 6931          if (type == NF4BLK || type == NF4CHR) {
6927 6932                  ASSERT(data != NULL);
6928 6933                  argop[idx_create].nfs_argop4_u.opccreate.ftype4_u.devdata =
6929 6934                      *((specdata4 *)data);
6930 6935          }
6931 6936  
6932 6937          crattr = &argop[idx_create].nfs_argop4_u.opccreate.createattrs;
6933 6938  
6934 6939          svp = drp->r_server;
6935 6940          (void) nfs_rw_enter_sig(&svp->sv_lock, RW_READER, 0);
6936 6941          supp_attrs = svp->sv_supp_attrs;
6937 6942          nfs_rw_exit(&svp->sv_lock);
6938 6943  
6939 6944          if (vattr_to_fattr4(va, NULL, crattr, 0, OP_CREATE, supp_attrs)) {
6940 6945                  nfs_rw_exit(&drp->r_rwlock);
6941 6946                  nfs4_end_op(mi, dvp, NULL, &recov_state, needrecov);
6942 6947                  e.error = EINVAL;
6943 6948                  kmem_free(argop, argoplist_size);
6944 6949                  return (e.error);
6945 6950          }
6946 6951  
6947 6952          /* 2/3: getfh fh of created object */
6948 6953          ASSERT(idx_create + 1 == idx_fattr - 1);
6949 6954          argop[idx_create + 1].argop = OP_GETFH;
6950 6955  
6951 6956          /* 3/4: getattr of new object */
6952 6957          argop[idx_fattr].argop = OP_GETATTR;
6953 6958          argop[idx_fattr].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
6954 6959          argop[idx_fattr].nfs_argop4_u.opgetattr.mi = mi;
6955 6960  
6956 6961          if (setgid_flag) {
6957 6962                  vattr_t _v;
6958 6963  
6959 6964                  argop[4].argop = OP_SAVEFH;
6960 6965  
6961 6966                  argop[5].argop = OP_CPUTFH;
6962 6967                  argop[5].nfs_argop4_u.opcputfh.sfh = drp->r_fh;
6963 6968  
6964 6969                  argop[6].argop = OP_GETATTR;
6965 6970                  argop[6].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
6966 6971                  argop[6].nfs_argop4_u.opgetattr.mi = mi;
6967 6972  
6968 6973                  argop[7].argop = OP_RESTOREFH;
6969 6974  
6970 6975                  /*
6971 6976                   * nverify
6972 6977                   *
6973 6978                   * XXX - Revisit the last argument to nfs4_end_op()
6974 6979                   *       once 5020486 is fixed.
6975 6980                   */
6976 6981                  _v.va_mask = AT_GID;
6977 6982                  _v.va_gid = va->va_gid;
6978 6983                  if (e.error = nfs4args_verify(&argop[8], &_v, OP_NVERIFY,
6979 6984                      supp_attrs)) {
6980 6985                          nfs4_end_op(mi, dvp, *vpp, &recov_state, TRUE);
6981 6986                          nfs_rw_exit(&drp->r_rwlock);
6982 6987                          nfs4_fattr4_free(crattr);
6983 6988                          kmem_free(argop, argoplist_size);
6984 6989                          return (e.error);
6985 6990                  }
6986 6991  
6987 6992                  /*
6988 6993                   * setattr
6989 6994                   *
6990 6995                   * We _know_ we're not messing with AT_SIZE or AT_XTIME,
6991 6996                   * so no need for stateid or flags. Also we specify NULL
6992 6997                   * rp since we're only interested in setting owner_group
6993 6998                   * attributes.
6994 6999                   */
6995 7000                  nfs4args_setattr(&argop[9], &_v, NULL, 0, NULL, cr, supp_attrs,
6996 7001                      &e.error, 0);
6997 7002  
6998 7003                  if (e.error) {
6999 7004                          nfs4_end_op(mi, dvp, *vpp, &recov_state, TRUE);
7000 7005                          nfs_rw_exit(&drp->r_rwlock);
7001 7006                          nfs4_fattr4_free(crattr);
7002 7007                          nfs4args_verify_free(&argop[8]);
7003 7008                          kmem_free(argop, argoplist_size);
7004 7009                          return (e.error);
7005 7010                  }
7006 7011          } else {
7007 7012                  argop[1].argop = OP_SAVEFH;
7008 7013  
7009 7014                  argop[5].argop = OP_RESTOREFH;
7010 7015  
7011 7016                  argop[6].argop = OP_GETATTR;
7012 7017                  argop[6].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
7013 7018                  argop[6].nfs_argop4_u.opgetattr.mi = mi;
7014 7019          }
7015 7020  
7016 7021          dnlc_remove(dvp, nm);
7017 7022  
7018 7023          doqueue = 1;
7019 7024          t = gethrtime();
7020 7025          rfs4call(mi, &args, &res, cr, &doqueue, 0, &e);
7021 7026  
7022 7027          needrecov = nfs4_needs_recovery(&e, FALSE, mi->mi_vfsp);
7023 7028          if (e.error) {
7024 7029                  PURGE_ATTRCACHE4(dvp);
7025 7030                  if (!needrecov)
7026 7031                          goto out;
7027 7032          }
7028 7033  
7029 7034          if (needrecov) {
7030 7035                  if (nfs4_start_recovery(&e, mi, dvp, NULL, NULL, NULL,
7031 7036                      OP_CREATE, NULL, NULL, NULL) == FALSE) {
7032 7037                          nfs4_end_op(mi, dvp, NULL, &recov_state,
7033 7038                              needrecov);
7034 7039                          need_end_op = FALSE;
7035 7040                          nfs4_fattr4_free(crattr);
7036 7041                          if (setgid_flag) {
7037 7042                                  nfs4args_verify_free(&argop[8]);
7038 7043                                  nfs4args_setattr_free(&argop[9]);
7039 7044                          }
7040 7045                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
7041 7046                          goto recov_retry;
7042 7047                  }
7043 7048          }
7044 7049  
7045 7050          resp = &res;
7046 7051  
7047 7052          if (res.status != NFS4_OK && res.array_len <= idx_fattr + 1) {
7048 7053  
7049 7054                  if (res.status == NFS4ERR_BADOWNER)
7050 7055                          nfs4_log_badowner(mi, OP_CREATE);
7051 7056  
7052 7057                  e.error = geterrno4(res.status);
7053 7058  
7054 7059                  /*
7055 7060                   * This check is left over from when create was implemented
7056 7061                   * using a setattr op (instead of createattrs).  If the
7057 7062                   * putfh/create/getfh failed, the error was returned.  If
7058 7063                   * setattr/getattr failed, we keep going.
7059 7064                   *
7060 7065                   * It might be better to get rid of the GETFH also, and just
7061 7066                   * do PUTFH/CREATE/GETATTR since the FH attr is mandatory.
7062 7067                   * Then if any of the operations failed, we could return the
7063 7068                   * error now, and remove much of the error code below.
7064 7069                   */
7065 7070                  if (res.array_len <= idx_fattr) {
7066 7071                          /*
7067 7072                           * Either Putfh, Create or Getfh failed.
7068 7073                           */
7069 7074                          PURGE_ATTRCACHE4(dvp);
7070 7075                          /*
7071 7076                           * nfs4_purge_stale_fh() may generate otw calls through
7072 7077                           * nfs4_invalidate_pages. Hence the need to call
7073 7078                           * nfs4_end_op() here to avoid nfs4_start_op() deadlock.
7074 7079                           */
7075 7080                          nfs4_end_op(mi, dvp, NULL, &recov_state,
7076 7081                              needrecov);
7077 7082                          need_end_op = FALSE;
7078 7083                          nfs4_purge_stale_fh(e.error, dvp, cr);
7079 7084                          goto out;
7080 7085                  }
7081 7086          }
7082 7087  
7083 7088          resop = &res.array[idx_create]; /* create res */
7084 7089          cinfo = &resop->nfs_resop4_u.opcreate.cinfo;
7085 7090  
7086 7091          resop = &res.array[idx_create + 1]; /* getfh res */
7087 7092          gf_res = &resop->nfs_resop4_u.opgetfh;
7088 7093  
7089 7094          sfhp = sfh4_get(&gf_res->object, mi);
7090 7095          if (e.error) {
7091 7096                  *vpp = vp = makenfs4node(sfhp, NULL, dvp->v_vfsp, t, cr, dvp,
7092 7097                      fn_get(VTOSV(dvp)->sv_name, nm, sfhp));
7093 7098                  if (vp->v_type == VNON) {
7094 7099                          vattr.va_mask = AT_TYPE;
7095 7100                          /*
7096 7101                           * Need to call nfs4_end_op before nfs4getattr to avoid
7097 7102                           * potential nfs4_start_op deadlock. See RFE 4777612.
7098 7103                           */
7099 7104                          nfs4_end_op(mi, dvp, NULL, &recov_state,
7100 7105                              needrecov);
7101 7106                          need_end_op = FALSE;
7102 7107                          e.error = nfs4getattr(vp, &vattr, cr);
7103 7108                          if (e.error) {
7104 7109                                  VN_RELE(vp);
7105 7110                                  *vpp = NULL;
7106 7111                                  goto out;
7107 7112                          }
7108 7113                          vp->v_type = vattr.va_type;
7109 7114                  }
7110 7115                  e.error = 0;
7111 7116          } else {
7112 7117                  *vpp = vp = makenfs4node(sfhp,
7113 7118                      &res.array[idx_fattr].nfs_resop4_u.opgetattr.ga_res,
7114 7119                      dvp->v_vfsp, t, cr,
7115 7120                      dvp, fn_get(VTOSV(dvp)->sv_name, nm, sfhp));
7116 7121          }
7117 7122  
7118 7123          /*
7119 7124           * If compound succeeded, then update dir attrs
7120 7125           */
7121 7126          if (res.status == NFS4_OK) {
7122 7127                  dinfo.di_garp = &res.array[6].nfs_resop4_u.opgetattr.ga_res;
7123 7128                  dinfo.di_cred = cr;
7124 7129                  dinfo.di_time_call = t;
7125 7130                  dinfop = &dinfo;
7126 7131          } else
7127 7132                  dinfop = NULL;
7128 7133  
7129 7134          /* Update directory cache attribute, readdir and dnlc caches */
7130 7135          nfs4_update_dircaches(cinfo, dvp, vp, nm, dinfop);
7131 7136  
7132 7137  out:
7133 7138          if (sfhp != NULL)
7134 7139                  sfh4_rele(&sfhp);
7135 7140          nfs_rw_exit(&drp->r_rwlock);
7136 7141          nfs4_fattr4_free(crattr);
7137 7142          if (setgid_flag) {
7138 7143                  nfs4args_verify_free(&argop[8]);
7139 7144                  nfs4args_setattr_free(&argop[9]);
7140 7145          }
7141 7146          if (resp)
7142 7147                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)resp);
7143 7148          if (need_end_op)
7144 7149                  nfs4_end_op(mi, dvp, NULL, &recov_state, needrecov);
7145 7150  
7146 7151          kmem_free(argop, argoplist_size);
7147 7152          return (e.error);
7148 7153  }
7149 7154  
7150 7155  /* ARGSUSED */
7151 7156  static int
7152 7157  nfs4mknod(vnode_t *dvp, char *nm, struct vattr *va, enum vcexcl exclusive,
7153 7158      int mode, vnode_t **vpp, cred_t *cr)
7154 7159  {
7155 7160          int error;
7156 7161          vnode_t *vp;
7157 7162          nfs_ftype4 type;
7158 7163          specdata4 spec, *specp = NULL;
7159 7164  
7160 7165          ASSERT(nfs_zone() == VTOMI4(dvp)->mi_zone);
7161 7166  
7162 7167          switch (va->va_type) {
7163 7168          case VCHR:
7164 7169          case VBLK:
7165 7170                  type = (va->va_type == VCHR) ? NF4CHR : NF4BLK;
7166 7171                  spec.specdata1 = getmajor(va->va_rdev);
7167 7172                  spec.specdata2 = getminor(va->va_rdev);
7168 7173                  specp = &spec;
7169 7174                  break;
7170 7175  
7171 7176          case VFIFO:
7172 7177                  type = NF4FIFO;
7173 7178                  break;
7174 7179          case VSOCK:
7175 7180                  type = NF4SOCK;
7176 7181                  break;
7177 7182  
7178 7183          default:
7179 7184                  return (EINVAL);
7180 7185          }
7181 7186  
7182 7187          error = call_nfs4_create_req(dvp, nm, specp, va, &vp, cr, type);
7183 7188          if (error) {
7184 7189                  return (error);
7185 7190          }
7186 7191  
7187 7192          /*
7188 7193           * This might not be needed any more; special case to deal
7189 7194           * with problematic v2/v3 servers.  Since create was unable
7190 7195           * to set group correctly, not sure what hope setattr has.
7191 7196           */
7192 7197          if (va->va_gid != VTOR4(vp)->r_attr.va_gid) {
7193 7198                  va->va_mask = AT_GID;
7194 7199                  (void) nfs4setattr(vp, va, 0, cr, NULL);
7195 7200          }
7196 7201  
7197 7202          /*
7198 7203           * If vnode is a device create special vnode
7199 7204           */
7200 7205          if (ISVDEV(vp->v_type)) {
7201 7206                  *vpp = specvp(vp, vp->v_rdev, vp->v_type, cr);
7202 7207                  VN_RELE(vp);
7203 7208          } else {
7204 7209                  *vpp = vp;
7205 7210          }
7206 7211          return (error);
7207 7212  }
7208 7213  
7209 7214  /*
7210 7215   * Remove requires that the current fh be the target directory.
7211 7216   * After the operation, the current fh is unchanged.
7212 7217   * The compound op structure is:
7213 7218   *      PUTFH(targetdir), REMOVE
7214 7219   *
7215 7220   * Weirdness: if the vnode to be removed is open
7216 7221   * we rename it instead of removing it and nfs_inactive
7217 7222   * will remove the new name.
7218 7223   */
7219 7224  /* ARGSUSED */
7220 7225  static int
7221 7226  nfs4_remove(vnode_t *dvp, char *nm, cred_t *cr, caller_context_t *ct, int flags)
7222 7227  {
7223 7228          COMPOUND4args_clnt args;
7224 7229          COMPOUND4res_clnt res, *resp = NULL;
7225 7230          REMOVE4res *rm_res;
7226 7231          nfs_argop4 argop[3];
7227 7232          nfs_resop4 *resop;
7228 7233          vnode_t *vp;
7229 7234          char *tmpname;
7230 7235          int doqueue;
7231 7236          mntinfo4_t *mi;
7232 7237          rnode4_t *rp;
7233 7238          rnode4_t *drp;
7234 7239          int needrecov = 0;
7235 7240          nfs4_recov_state_t recov_state;
7236 7241          int isopen;
7237 7242          nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS };
7238 7243          dirattr_info_t dinfo;
7239 7244  
7240 7245          if (nfs_zone() != VTOMI4(dvp)->mi_zone)
7241 7246                  return (EPERM);
7242 7247          drp = VTOR4(dvp);
7243 7248          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR4(dvp)))
7244 7249                  return (EINTR);
7245 7250  
7246 7251          e.error = nfs4lookup(dvp, nm, &vp, cr, 0);
7247 7252          if (e.error) {
7248 7253                  nfs_rw_exit(&drp->r_rwlock);
7249 7254                  return (e.error);
7250 7255          }
7251 7256  
7252 7257          if (vp->v_type == VDIR) {
7253 7258                  VN_RELE(vp);
7254 7259                  nfs_rw_exit(&drp->r_rwlock);
7255 7260                  return (EISDIR);
7256 7261          }
7257 7262  
7258 7263          /*
7259 7264           * First just remove the entry from the name cache, as it
7260 7265           * is most likely the only entry for this vp.
7261 7266           */
7262 7267          dnlc_remove(dvp, nm);
7263 7268  
7264 7269          rp = VTOR4(vp);
7265 7270  
7266 7271          /*
7267 7272           * For regular file types, check to see if the file is open by looking
7268 7273           * at the open streams.
7269 7274           * For all other types, check the reference count on the vnode.  Since
7270 7275           * they are not opened OTW they never have an open stream.
7271 7276           *
7272 7277           * If the file is open, rename it to .nfsXXXX.
7273 7278           */
7274 7279          if (vp->v_type != VREG) {
7275 7280                  /*
7276 7281                   * If the file has a v_count > 1 then there may be more than one
7277 7282                   * entry in the name cache due multiple links or an open file,
7278 7283                   * but we don't have the real reference count so flush all
7279 7284                   * possible entries.
7280 7285                   */
7281 7286                  if (vp->v_count > 1)
7282 7287                          dnlc_purge_vp(vp);
7283 7288  
7284 7289                  /*
7285 7290                   * Now we have the real reference count.
7286 7291                   */
7287 7292                  isopen = vp->v_count > 1;
7288 7293          } else {
7289 7294                  mutex_enter(&rp->r_os_lock);
7290 7295                  isopen = list_head(&rp->r_open_streams) != NULL;
7291 7296                  mutex_exit(&rp->r_os_lock);
7292 7297          }
7293 7298  
7294 7299          mutex_enter(&rp->r_statelock);
7295 7300          if (isopen &&
7296 7301              (rp->r_unldvp == NULL || strcmp(nm, rp->r_unlname) == 0)) {
7297 7302                  mutex_exit(&rp->r_statelock);
7298 7303                  tmpname = newname();
7299 7304                  e.error = nfs4rename(dvp, nm, dvp, tmpname, cr, ct);
7300 7305                  if (e.error)
7301 7306                          kmem_free(tmpname, MAXNAMELEN);
7302 7307                  else {
7303 7308                          mutex_enter(&rp->r_statelock);
7304 7309                          if (rp->r_unldvp == NULL) {
7305 7310                                  VN_HOLD(dvp);
7306 7311                                  rp->r_unldvp = dvp;
7307 7312                                  if (rp->r_unlcred != NULL)
7308 7313                                          crfree(rp->r_unlcred);
7309 7314                                  crhold(cr);
7310 7315                                  rp->r_unlcred = cr;
7311 7316                                  rp->r_unlname = tmpname;
7312 7317                          } else {
7313 7318                                  kmem_free(rp->r_unlname, MAXNAMELEN);
7314 7319                                  rp->r_unlname = tmpname;
7315 7320                          }
7316 7321                          mutex_exit(&rp->r_statelock);
7317 7322                  }
7318 7323                  VN_RELE(vp);
7319 7324                  nfs_rw_exit(&drp->r_rwlock);
7320 7325                  return (e.error);
7321 7326          }
7322 7327          /*
7323 7328           * Actually remove the file/dir
7324 7329           */
7325 7330          mutex_exit(&rp->r_statelock);
7326 7331  
7327 7332          /*
7328 7333           * We need to flush any dirty pages which happen to
7329 7334           * be hanging around before removing the file.
7330 7335           * This shouldn't happen very often since in NFSv4
7331 7336           * we should be close to open consistent.
7332 7337           */
7333 7338          if (nfs4_has_pages(vp) &&
7334 7339              ((rp->r_flags & R4DIRTY) || rp->r_count > 0)) {
7335 7340                  e.error = nfs4_putpage(vp, (u_offset_t)0, 0, 0, cr, ct);
7336 7341                  if (e.error && (e.error == ENOSPC || e.error == EDQUOT)) {
7337 7342                          mutex_enter(&rp->r_statelock);
7338 7343                          if (!rp->r_error)
7339 7344                                  rp->r_error = e.error;
7340 7345                          mutex_exit(&rp->r_statelock);
7341 7346                  }
7342 7347          }
7343 7348  
7344 7349          mi = VTOMI4(dvp);
7345 7350  
7346 7351          (void) nfs4delegreturn(rp, NFS4_DR_REOPEN);
7347 7352          recov_state.rs_flags = 0;
7348 7353          recov_state.rs_num_retry_despite_err = 0;
7349 7354  
7350 7355  recov_retry:
7351 7356          /*
7352 7357           * Remove ops: putfh dir; remove
7353 7358           */
7354 7359          args.ctag = TAG_REMOVE;
7355 7360          args.array_len = 3;
7356 7361          args.array = argop;
7357 7362  
7358 7363          e.error = nfs4_start_op(VTOMI4(dvp), dvp, NULL, &recov_state);
7359 7364          if (e.error) {
7360 7365                  nfs_rw_exit(&drp->r_rwlock);
7361 7366                  VN_RELE(vp);
7362 7367                  return (e.error);
7363 7368          }
7364 7369  
7365 7370          /* putfh directory */
7366 7371          argop[0].argop = OP_CPUTFH;
7367 7372          argop[0].nfs_argop4_u.opcputfh.sfh = drp->r_fh;
7368 7373  
7369 7374          /* remove */
7370 7375          argop[1].argop = OP_CREMOVE;
7371 7376          argop[1].nfs_argop4_u.opcremove.ctarget = nm;
7372 7377  
7373 7378          /* getattr dir */
7374 7379          argop[2].argop = OP_GETATTR;
7375 7380          argop[2].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
7376 7381          argop[2].nfs_argop4_u.opgetattr.mi = mi;
7377 7382  
7378 7383          doqueue = 1;
7379 7384          dinfo.di_time_call = gethrtime();
7380 7385          rfs4call(mi, &args, &res, cr, &doqueue, 0, &e);
7381 7386  
7382 7387          PURGE_ATTRCACHE4(vp);
7383 7388  
7384 7389          needrecov = nfs4_needs_recovery(&e, FALSE, mi->mi_vfsp);
7385 7390          if (e.error)
7386 7391                  PURGE_ATTRCACHE4(dvp);
7387 7392  
7388 7393          if (needrecov) {
7389 7394                  if (nfs4_start_recovery(&e, VTOMI4(dvp), dvp,
7390 7395                      NULL, NULL, NULL, OP_REMOVE, NULL, NULL, NULL) == FALSE) {
7391 7396                          if (!e.error)
7392 7397                                  (void) xdr_free(xdr_COMPOUND4res_clnt,
7393 7398                                      (caddr_t)&res);
7394 7399                          nfs4_end_op(VTOMI4(dvp), dvp, NULL, &recov_state,
7395 7400                              needrecov);
7396 7401                          goto recov_retry;
7397 7402                  }
7398 7403          }
7399 7404  
7400 7405          /*
7401 7406           * Matching nfs4_end_op() for start_op() above.
7402 7407           * There is a path in the code below which calls
7403 7408           * nfs4_purge_stale_fh(), which may generate otw calls through
7404 7409           * nfs4_invalidate_pages. Hence we need to call nfs4_end_op()
7405 7410           * here to avoid nfs4_start_op() deadlock.
7406 7411           */
7407 7412          nfs4_end_op(VTOMI4(dvp), dvp, NULL, &recov_state, needrecov);
7408 7413  
7409 7414          if (!e.error) {
7410 7415                  resp = &res;
7411 7416  
7412 7417                  if (res.status) {
7413 7418                          e.error = geterrno4(res.status);
7414 7419                          PURGE_ATTRCACHE4(dvp);
7415 7420                          nfs4_purge_stale_fh(e.error, dvp, cr);
7416 7421                  } else {
7417 7422                          resop = &res.array[1];  /* remove res */
7418 7423                          rm_res = &resop->nfs_resop4_u.opremove;
7419 7424  
7420 7425                          dinfo.di_garp =
7421 7426                              &res.array[2].nfs_resop4_u.opgetattr.ga_res;
7422 7427                          dinfo.di_cred = cr;
7423 7428  
7424 7429                          /* Update directory attr, readdir and dnlc caches */
7425 7430                          nfs4_update_dircaches(&rm_res->cinfo, dvp, NULL, NULL,
7426 7431                              &dinfo);
7427 7432                  }
7428 7433          }
7429 7434          nfs_rw_exit(&drp->r_rwlock);
7430 7435          if (resp)
7431 7436                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)resp);
7432 7437  
7433 7438          if (e.error == 0) {
7434 7439                  vnode_t *tvp;
7435 7440                  rnode4_t *trp;
7436 7441                  trp = VTOR4(vp);
7437 7442                  tvp = vp;
7438 7443                  if (IS_SHADOW(vp, trp))
7439 7444                          tvp = RTOV4(trp);
7440 7445                  vnevent_remove(tvp, dvp, nm, ct);
7441 7446          }
7442 7447          VN_RELE(vp);
7443 7448          return (e.error);
7444 7449  }
7445 7450  
7446 7451  /*
7447 7452   * Link requires that the current fh be the target directory and the
7448 7453   * saved fh be the source fh. After the operation, the current fh is unchanged.
7449 7454   * Thus the compound op structure is:
7450 7455   *      PUTFH(file), SAVEFH, PUTFH(targetdir), LINK, RESTOREFH,
7451 7456   *      GETATTR(file)
7452 7457   */
7453 7458  /* ARGSUSED */
7454 7459  static int
7455 7460  nfs4_link(vnode_t *tdvp, vnode_t *svp, char *tnm, cred_t *cr,
7456 7461      caller_context_t *ct, int flags)
7457 7462  {
7458 7463          COMPOUND4args_clnt args;
7459 7464          COMPOUND4res_clnt res, *resp = NULL;
7460 7465          LINK4res *ln_res;
7461 7466          int argoplist_size  = 7 * sizeof (nfs_argop4);
7462 7467          nfs_argop4 *argop;
7463 7468          nfs_resop4 *resop;
7464 7469          vnode_t *realvp, *nvp;
7465 7470          int doqueue;
7466 7471          mntinfo4_t *mi;
7467 7472          rnode4_t *tdrp;
7468 7473          bool_t needrecov = FALSE;
7469 7474          nfs4_recov_state_t recov_state;
7470 7475          hrtime_t t;
7471 7476          nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS };
7472 7477          dirattr_info_t dinfo;
7473 7478  
7474 7479          ASSERT(*tnm != '\0');
7475 7480          ASSERT(tdvp->v_type == VDIR);
7476 7481          ASSERT(nfs4_consistent_type(tdvp));
7477 7482          ASSERT(nfs4_consistent_type(svp));
7478 7483  
7479 7484          if (nfs_zone() != VTOMI4(tdvp)->mi_zone)
7480 7485                  return (EPERM);
7481 7486          if (VOP_REALVP(svp, &realvp, ct) == 0) {
7482 7487                  svp = realvp;
7483 7488                  ASSERT(nfs4_consistent_type(svp));
7484 7489          }
7485 7490  
7486 7491          tdrp = VTOR4(tdvp);
7487 7492          mi = VTOMI4(svp);
7488 7493  
7489 7494          if (!(mi->mi_flags & MI4_LINK)) {
7490 7495                  return (EOPNOTSUPP);
7491 7496          }
7492 7497          recov_state.rs_flags = 0;
7493 7498          recov_state.rs_num_retry_despite_err = 0;
7494 7499  
7495 7500          if (nfs_rw_enter_sig(&tdrp->r_rwlock, RW_WRITER, INTR4(tdvp)))
7496 7501                  return (EINTR);
7497 7502  
7498 7503  recov_retry:
7499 7504          argop = kmem_alloc(argoplist_size, KM_SLEEP);
7500 7505  
7501 7506          args.ctag = TAG_LINK;
7502 7507  
7503 7508          /*
7504 7509           * Link ops: putfh fl; savefh; putfh tdir; link; getattr(dir);
7505 7510           * restorefh; getattr(fl)
7506 7511           */
7507 7512          args.array_len = 7;
7508 7513          args.array = argop;
7509 7514  
7510 7515          e.error = nfs4_start_op(VTOMI4(svp), svp, tdvp, &recov_state);
7511 7516          if (e.error) {
7512 7517                  kmem_free(argop, argoplist_size);
7513 7518                  nfs_rw_exit(&tdrp->r_rwlock);
7514 7519                  return (e.error);
7515 7520          }
7516 7521  
7517 7522          /* 0. putfh file */
7518 7523          argop[0].argop = OP_CPUTFH;
7519 7524          argop[0].nfs_argop4_u.opcputfh.sfh = VTOR4(svp)->r_fh;
7520 7525  
7521 7526          /* 1. save current fh to free up the space for the dir */
7522 7527          argop[1].argop = OP_SAVEFH;
7523 7528  
7524 7529          /* 2. putfh targetdir */
7525 7530          argop[2].argop = OP_CPUTFH;
7526 7531          argop[2].nfs_argop4_u.opcputfh.sfh = tdrp->r_fh;
7527 7532  
7528 7533          /* 3. link: current_fh is targetdir, saved_fh is source */
7529 7534          argop[3].argop = OP_CLINK;
7530 7535          argop[3].nfs_argop4_u.opclink.cnewname = tnm;
7531 7536  
7532 7537          /* 4. Get attributes of dir */
7533 7538          argop[4].argop = OP_GETATTR;
7534 7539          argop[4].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
7535 7540          argop[4].nfs_argop4_u.opgetattr.mi = mi;
7536 7541  
7537 7542          /* 5. If link was successful, restore current vp to file */
7538 7543          argop[5].argop = OP_RESTOREFH;
7539 7544  
7540 7545          /* 6. Get attributes of linked object */
7541 7546          argop[6].argop = OP_GETATTR;
7542 7547          argop[6].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
7543 7548          argop[6].nfs_argop4_u.opgetattr.mi = mi;
7544 7549  
7545 7550          dnlc_remove(tdvp, tnm);
7546 7551  
7547 7552          doqueue = 1;
7548 7553          t = gethrtime();
7549 7554  
7550 7555          rfs4call(VTOMI4(svp), &args, &res, cr, &doqueue, 0, &e);
7551 7556  
7552 7557          needrecov = nfs4_needs_recovery(&e, FALSE, svp->v_vfsp);
7553 7558          if (e.error != 0 && !needrecov) {
7554 7559                  PURGE_ATTRCACHE4(tdvp);
7555 7560                  PURGE_ATTRCACHE4(svp);
7556 7561                  nfs4_end_op(VTOMI4(svp), svp, tdvp, &recov_state, needrecov);
7557 7562                  goto out;
7558 7563          }
7559 7564  
7560 7565          if (needrecov) {
7561 7566                  bool_t abort;
7562 7567  
7563 7568                  abort = nfs4_start_recovery(&e, VTOMI4(svp), svp, tdvp,
7564 7569                      NULL, NULL, OP_LINK, NULL, NULL, NULL);
7565 7570                  if (abort == FALSE) {
7566 7571                          nfs4_end_op(VTOMI4(svp), svp, tdvp, &recov_state,
7567 7572                              needrecov);
7568 7573                          kmem_free(argop, argoplist_size);
7569 7574                          if (!e.error)
7570 7575                                  (void) xdr_free(xdr_COMPOUND4res_clnt,
7571 7576                                      (caddr_t)&res);
7572 7577                          goto recov_retry;
7573 7578                  } else {
7574 7579                          if (e.error != 0) {
7575 7580                                  PURGE_ATTRCACHE4(tdvp);
7576 7581                                  PURGE_ATTRCACHE4(svp);
7577 7582                                  nfs4_end_op(VTOMI4(svp), svp, tdvp,
7578 7583                                      &recov_state, needrecov);
7579 7584                                  goto out;
7580 7585                          }
7581 7586                          /* fall through for res.status case */
7582 7587                  }
7583 7588          }
7584 7589  
7585 7590          nfs4_end_op(VTOMI4(svp), svp, tdvp, &recov_state, needrecov);
7586 7591  
7587 7592          resp = &res;
7588 7593          if (res.status) {
7589 7594                  /* If link succeeded, then don't return error */
7590 7595                  e.error = geterrno4(res.status);
7591 7596                  if (res.array_len <= 4) {
7592 7597                          /*
7593 7598                           * Either Putfh, Savefh, Putfh dir, or Link failed
7594 7599                           */
7595 7600                          PURGE_ATTRCACHE4(svp);
7596 7601                          PURGE_ATTRCACHE4(tdvp);
7597 7602                          if (e.error == EOPNOTSUPP) {
7598 7603                                  mutex_enter(&mi->mi_lock);
7599 7604                                  mi->mi_flags &= ~MI4_LINK;
7600 7605                                  mutex_exit(&mi->mi_lock);
7601 7606                          }
7602 7607                          /* Remap EISDIR to EPERM for non-root user for SVVS */
7603 7608                          /* XXX-LP */
7604 7609                          if (e.error == EISDIR && crgetuid(cr) != 0)
7605 7610                                  e.error = EPERM;
7606 7611                          goto out;
7607 7612                  }
7608 7613          }
7609 7614  
7610 7615          /* either no error or one of the postop getattr failed */
7611 7616  
7612 7617          /*
7613 7618           * XXX - if LINK succeeded, but no attrs were returned for link
7614 7619           * file, purge its cache.
7615 7620           *
7616 7621           * XXX Perform a simplified version of wcc checking. Instead of
7617 7622           * have another getattr to get pre-op, just purge cache if
7618 7623           * any of the ops prior to and including the getattr failed.
7619 7624           * If the getattr succeeded then update the attrcache accordingly.
7620 7625           */
7621 7626  
7622 7627          /*
7623 7628           * update cache with link file postattrs.
7624 7629           * Note: at this point resop points to link res.
7625 7630           */
7626 7631          resop = &res.array[3];  /* link res */
7627 7632          ln_res = &resop->nfs_resop4_u.oplink;
7628 7633          if (res.status == NFS4_OK)
7629 7634                  e.error = nfs4_update_attrcache(res.status,
7630 7635                      &res.array[6].nfs_resop4_u.opgetattr.ga_res,
7631 7636                      t, svp, cr);
7632 7637  
7633 7638          /*
7634 7639           * Call makenfs4node to create the new shadow vp for tnm.
7635 7640           * We pass NULL attrs because we just cached attrs for
7636 7641           * the src object.  All we're trying to accomplish is to
7637 7642           * to create the new shadow vnode.
7638 7643           */
7639 7644          nvp = makenfs4node(VTOR4(svp)->r_fh, NULL, tdvp->v_vfsp, t, cr,
7640 7645              tdvp, fn_get(VTOSV(tdvp)->sv_name, tnm, VTOR4(svp)->r_fh));
7641 7646  
7642 7647          /* Update target cache attribute, readdir and dnlc caches */
7643 7648          dinfo.di_garp = &res.array[4].nfs_resop4_u.opgetattr.ga_res;
7644 7649          dinfo.di_time_call = t;
7645 7650          dinfo.di_cred = cr;
7646 7651  
7647 7652          nfs4_update_dircaches(&ln_res->cinfo, tdvp, nvp, tnm, &dinfo);
7648 7653          ASSERT(nfs4_consistent_type(tdvp));
7649 7654          ASSERT(nfs4_consistent_type(svp));
7650 7655          ASSERT(nfs4_consistent_type(nvp));
7651 7656          VN_RELE(nvp);
7652 7657  
7653 7658          if (!e.error) {
7654 7659                  vnode_t *tvp;
7655 7660                  rnode4_t *trp;
7656 7661                  /*
7657 7662                   * Notify the source file of this link operation.
7658 7663                   */
7659 7664                  trp = VTOR4(svp);
7660 7665                  tvp = svp;
7661 7666                  if (IS_SHADOW(svp, trp))
7662 7667                          tvp = RTOV4(trp);
7663 7668                  vnevent_link(tvp, ct);
7664 7669          }
7665 7670  out:
7666 7671          kmem_free(argop, argoplist_size);
7667 7672          if (resp)
7668 7673                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)resp);
7669 7674  
7670 7675          nfs_rw_exit(&tdrp->r_rwlock);
7671 7676  
7672 7677          return (e.error);
7673 7678  }
7674 7679  
7675 7680  /* ARGSUSED */
7676 7681  static int
7677 7682  nfs4_rename(vnode_t *odvp, char *onm, vnode_t *ndvp, char *nnm, cred_t *cr,
7678 7683      caller_context_t *ct, int flags)
7679 7684  {
7680 7685          vnode_t *realvp;
7681 7686  
7682 7687          if (nfs_zone() != VTOMI4(odvp)->mi_zone)
7683 7688                  return (EPERM);
7684 7689          if (VOP_REALVP(ndvp, &realvp, ct) == 0)
7685 7690                  ndvp = realvp;
7686 7691  
7687 7692          return (nfs4rename(odvp, onm, ndvp, nnm, cr, ct));
7688 7693  }
7689 7694  
7690 7695  /*
7691 7696   * nfs4rename does the real work of renaming in NFS Version 4.
7692 7697   *
7693 7698   * A file handle is considered volatile for renaming purposes if either
7694 7699   * of the volatile bits are turned on. However, the compound may differ
7695 7700   * based on the likelihood of the filehandle to change during rename.
7696 7701   */
7697 7702  static int
7698 7703  nfs4rename(vnode_t *odvp, char *onm, vnode_t *ndvp, char *nnm, cred_t *cr,
7699 7704      caller_context_t *ct)
7700 7705  {
7701 7706          int error;
7702 7707          mntinfo4_t *mi;
7703 7708          vnode_t *nvp = NULL;
7704 7709          vnode_t *ovp = NULL;
7705 7710          char *tmpname = NULL;
7706 7711          rnode4_t *rp;
7707 7712          rnode4_t *odrp;
7708 7713          rnode4_t *ndrp;
7709 7714          int did_link = 0;
7710 7715          int do_link = 1;
7711 7716          nfsstat4 stat = NFS4_OK;
7712 7717  
7713 7718          ASSERT(nfs_zone() == VTOMI4(odvp)->mi_zone);
7714 7719          ASSERT(nfs4_consistent_type(odvp));
7715 7720          ASSERT(nfs4_consistent_type(ndvp));
7716 7721  
7717 7722          if (onm[0] == '.' && (onm[1] == '\0' ||
7718 7723              (onm[1] == '.' && onm[2] == '\0')))
7719 7724                  return (EINVAL);
7720 7725  
7721 7726          if (nnm[0] == '.' && (nnm[1] == '\0' ||
7722 7727              (nnm[1] == '.' && nnm[2] == '\0')))
7723 7728                  return (EINVAL);
7724 7729  
7725 7730          odrp = VTOR4(odvp);
7726 7731          ndrp = VTOR4(ndvp);
7727 7732          if ((intptr_t)odrp < (intptr_t)ndrp) {
7728 7733                  if (nfs_rw_enter_sig(&odrp->r_rwlock, RW_WRITER, INTR4(odvp)))
7729 7734                          return (EINTR);
7730 7735                  if (nfs_rw_enter_sig(&ndrp->r_rwlock, RW_WRITER, INTR4(ndvp))) {
7731 7736                          nfs_rw_exit(&odrp->r_rwlock);
7732 7737                          return (EINTR);
7733 7738                  }
7734 7739          } else {
7735 7740                  if (nfs_rw_enter_sig(&ndrp->r_rwlock, RW_WRITER, INTR4(ndvp)))
7736 7741                          return (EINTR);
7737 7742                  if (nfs_rw_enter_sig(&odrp->r_rwlock, RW_WRITER, INTR4(odvp))) {
7738 7743                          nfs_rw_exit(&ndrp->r_rwlock);
7739 7744                          return (EINTR);
7740 7745                  }
7741 7746          }
7742 7747  
7743 7748          /*
7744 7749           * Lookup the target file.  If it exists, it needs to be
7745 7750           * checked to see whether it is a mount point and whether
7746 7751           * it is active (open).
7747 7752           */
7748 7753          error = nfs4lookup(ndvp, nnm, &nvp, cr, 0);
7749 7754          if (!error) {
7750 7755                  int     isactive;
7751 7756  
7752 7757                  ASSERT(nfs4_consistent_type(nvp));
7753 7758                  /*
7754 7759                   * If this file has been mounted on, then just
7755 7760                   * return busy because renaming to it would remove
7756 7761                   * the mounted file system from the name space.
7757 7762                   */
7758 7763                  if (vn_ismntpt(nvp)) {
7759 7764                          VN_RELE(nvp);
7760 7765                          nfs_rw_exit(&odrp->r_rwlock);
7761 7766                          nfs_rw_exit(&ndrp->r_rwlock);
7762 7767                          return (EBUSY);
7763 7768                  }
7764 7769  
7765 7770                  /*
7766 7771                   * First just remove the entry from the name cache, as it
7767 7772                   * is most likely the only entry for this vp.
7768 7773                   */
7769 7774                  dnlc_remove(ndvp, nnm);
7770 7775  
7771 7776                  rp = VTOR4(nvp);
7772 7777  
7773 7778                  if (nvp->v_type != VREG) {
7774 7779                          /*
7775 7780                           * Purge the name cache of all references to this vnode
7776 7781                           * so that we can check the reference count to infer
7777 7782                           * whether it is active or not.
7778 7783                           */
7779 7784                          if (nvp->v_count > 1)
7780 7785                                  dnlc_purge_vp(nvp);
7781 7786  
7782 7787                          isactive = nvp->v_count > 1;
7783 7788                  } else {
7784 7789                          mutex_enter(&rp->r_os_lock);
7785 7790                          isactive = list_head(&rp->r_open_streams) != NULL;
7786 7791                          mutex_exit(&rp->r_os_lock);
7787 7792                  }
7788 7793  
7789 7794                  /*
7790 7795                   * If the vnode is active and is not a directory,
7791 7796                   * arrange to rename it to a
7792 7797                   * temporary file so that it will continue to be
7793 7798                   * accessible.  This implements the "unlink-open-file"
7794 7799                   * semantics for the target of a rename operation.
7795 7800                   * Before doing this though, make sure that the
7796 7801                   * source and target files are not already the same.
7797 7802                   */
7798 7803                  if (isactive && nvp->v_type != VDIR) {
7799 7804                          /*
7800 7805                           * Lookup the source name.
7801 7806                           */
7802 7807                          error = nfs4lookup(odvp, onm, &ovp, cr, 0);
7803 7808  
7804 7809                          /*
7805 7810                           * The source name *should* already exist.
7806 7811                           */
7807 7812                          if (error) {
7808 7813                                  VN_RELE(nvp);
7809 7814                                  nfs_rw_exit(&odrp->r_rwlock);
7810 7815                                  nfs_rw_exit(&ndrp->r_rwlock);
7811 7816                                  return (error);
7812 7817                          }
7813 7818  
7814 7819                          ASSERT(nfs4_consistent_type(ovp));
7815 7820  
7816 7821                          /*
7817 7822                           * Compare the two vnodes.  If they are the same,
7818 7823                           * just release all held vnodes and return success.
7819 7824                           */
7820 7825                          if (VN_CMP(ovp, nvp)) {
7821 7826                                  VN_RELE(ovp);
7822 7827                                  VN_RELE(nvp);
7823 7828                                  nfs_rw_exit(&odrp->r_rwlock);
7824 7829                                  nfs_rw_exit(&ndrp->r_rwlock);
7825 7830                                  return (0);
7826 7831                          }
7827 7832  
7828 7833                          /*
7829 7834                           * Can't mix and match directories and non-
7830 7835                           * directories in rename operations.  We already
7831 7836                           * know that the target is not a directory.  If
7832 7837                           * the source is a directory, return an error.
7833 7838                           */
7834 7839                          if (ovp->v_type == VDIR) {
7835 7840                                  VN_RELE(ovp);
7836 7841                                  VN_RELE(nvp);
7837 7842                                  nfs_rw_exit(&odrp->r_rwlock);
7838 7843                                  nfs_rw_exit(&ndrp->r_rwlock);
7839 7844                                  return (ENOTDIR);
7840 7845                          }
7841 7846  link_call:
7842 7847                          /*
7843 7848                           * The target file exists, is not the same as
7844 7849                           * the source file, and is active.  We first
7845 7850                           * try to Link it to a temporary filename to
7846 7851                           * avoid having the server removing the file
7847 7852                           * completely (which could cause data loss to
7848 7853                           * the user's POV in the event the Rename fails
7849 7854                           * -- see bug 1165874).
7850 7855                           */
7851 7856                          /*
7852 7857                           * The do_link and did_link booleans are
7853 7858                           * introduced in the event we get NFS4ERR_FILE_OPEN
7854 7859                           * returned for the Rename.  Some servers can
7855 7860                           * not Rename over an Open file, so they return
7856 7861                           * this error.  The client needs to Remove the
7857 7862                           * newly created Link and do two Renames, just
7858 7863                           * as if the server didn't support LINK.
7859 7864                           */
7860 7865                          tmpname = newname();
7861 7866                          error = 0;
7862 7867  
7863 7868                          if (do_link) {
7864 7869                                  error = nfs4_link(ndvp, nvp, tmpname, cr,
7865 7870                                      NULL, 0);
7866 7871                          }
7867 7872                          if (error == EOPNOTSUPP || !do_link) {
7868 7873                                  error = nfs4_rename(ndvp, nnm, ndvp, tmpname,
7869 7874                                      cr, NULL, 0);
7870 7875                                  did_link = 0;
7871 7876                          } else {
7872 7877                                  did_link = 1;
7873 7878                          }
7874 7879                          if (error) {
7875 7880                                  kmem_free(tmpname, MAXNAMELEN);
7876 7881                                  VN_RELE(ovp);
7877 7882                                  VN_RELE(nvp);
7878 7883                                  nfs_rw_exit(&odrp->r_rwlock);
7879 7884                                  nfs_rw_exit(&ndrp->r_rwlock);
7880 7885                                  return (error);
7881 7886                          }
7882 7887  
7883 7888                          mutex_enter(&rp->r_statelock);
7884 7889                          if (rp->r_unldvp == NULL) {
7885 7890                                  VN_HOLD(ndvp);
7886 7891                                  rp->r_unldvp = ndvp;
7887 7892                                  if (rp->r_unlcred != NULL)
7888 7893                                          crfree(rp->r_unlcred);
7889 7894                                  crhold(cr);
7890 7895                                  rp->r_unlcred = cr;
7891 7896                                  rp->r_unlname = tmpname;
7892 7897                          } else {
7893 7898                                  if (rp->r_unlname)
7894 7899                                          kmem_free(rp->r_unlname, MAXNAMELEN);
7895 7900                                  rp->r_unlname = tmpname;
7896 7901                          }
7897 7902                          mutex_exit(&rp->r_statelock);
7898 7903                  }
7899 7904  
7900 7905                  (void) nfs4delegreturn(VTOR4(nvp), NFS4_DR_PUSH|NFS4_DR_REOPEN);
7901 7906  
7902 7907                  ASSERT(nfs4_consistent_type(nvp));
7903 7908          }
7904 7909  
7905 7910          if (ovp == NULL) {
7906 7911                  /*
7907 7912                   * When renaming directories to be a subdirectory of a
7908 7913                   * different parent, the dnlc entry for ".." will no
7909 7914                   * longer be valid, so it must be removed.
7910 7915                   *
7911 7916                   * We do a lookup here to determine whether we are renaming
7912 7917                   * a directory and we need to check if we are renaming
7913 7918                   * an unlinked file.  This might have already been done
7914 7919                   * in previous code, so we check ovp == NULL to avoid
7915 7920                   * doing it twice.
7916 7921                   */
7917 7922                  error = nfs4lookup(odvp, onm, &ovp, cr, 0);
7918 7923                  /*
7919 7924                   * The source name *should* already exist.
7920 7925                   */
7921 7926                  if (error) {
7922 7927                          nfs_rw_exit(&odrp->r_rwlock);
7923 7928                          nfs_rw_exit(&ndrp->r_rwlock);
7924 7929                          if (nvp) {
7925 7930                                  VN_RELE(nvp);
7926 7931                          }
7927 7932                          return (error);
7928 7933                  }
7929 7934                  ASSERT(ovp != NULL);
7930 7935                  ASSERT(nfs4_consistent_type(ovp));
7931 7936          }
7932 7937  
7933 7938          /*
7934 7939           * Is the object being renamed a dir, and if so, is
7935 7940           * it being renamed to a child of itself?  The underlying
7936 7941           * fs should ultimately return EINVAL for this case;
7937 7942           * however, buggy beta non-Solaris NFSv4 servers at
7938 7943           * interop testing events have allowed this behavior,
7939 7944           * and it caused our client to panic due to a recursive
7940 7945           * mutex_enter in fn_move.
7941 7946           *
7942 7947           * The tedious locking in fn_move could be changed to
7943 7948           * deal with this case, and the client could avoid the
7944 7949           * panic; however, the client would just confuse itself
7945 7950           * later and misbehave.  A better way to handle the broken
7946 7951           * server is to detect this condition and return EINVAL
7947 7952           * without ever sending the the bogus rename to the server.
7948 7953           * We know the rename is invalid -- just fail it now.
7949 7954           */
7950 7955          if (ovp->v_type == VDIR && VN_CMP(ndvp, ovp)) {
7951 7956                  VN_RELE(ovp);
7952 7957                  nfs_rw_exit(&odrp->r_rwlock);
7953 7958                  nfs_rw_exit(&ndrp->r_rwlock);
7954 7959                  if (nvp) {
7955 7960                          VN_RELE(nvp);
7956 7961                  }
7957 7962                  return (EINVAL);
7958 7963          }
7959 7964  
7960 7965          (void) nfs4delegreturn(VTOR4(ovp), NFS4_DR_PUSH|NFS4_DR_REOPEN);
7961 7966  
7962 7967          /*
7963 7968           * If FH4_VOL_RENAME or FH4_VOLATILE_ANY bits are set, it is
7964 7969           * possible for the filehandle to change due to the rename.
7965 7970           * If neither of these bits is set, but FH4_VOL_MIGRATION is set,
7966 7971           * the fh will not change because of the rename, but we still need
7967 7972           * to update its rnode entry with the new name for
7968 7973           * an eventual fh change due to migration. The FH4_NOEXPIRE_ON_OPEN
7969 7974           * has no effect on these for now, but for future improvements,
7970 7975           * we might want to use it too to simplify handling of files
7971 7976           * that are open with that flag on. (XXX)
7972 7977           */
7973 7978          mi = VTOMI4(odvp);
7974 7979          if (NFS4_VOLATILE_FH(mi))
7975 7980                  error = nfs4rename_volatile_fh(odvp, onm, ovp, ndvp, nnm, cr,
7976 7981                      &stat);
7977 7982          else
7978 7983                  error = nfs4rename_persistent_fh(odvp, onm, ovp, ndvp, nnm, cr,
7979 7984                      &stat);
7980 7985  
7981 7986          ASSERT(nfs4_consistent_type(odvp));
7982 7987          ASSERT(nfs4_consistent_type(ndvp));
7983 7988          ASSERT(nfs4_consistent_type(ovp));
7984 7989  
7985 7990          if (stat == NFS4ERR_FILE_OPEN && did_link) {
7986 7991                  do_link = 0;
7987 7992                  /*
7988 7993                   * Before the 'link_call' code, we did a nfs4_lookup
7989 7994                   * that puts a VN_HOLD on nvp.  After the nfs4_link
7990 7995                   * call we call VN_RELE to match that hold.  We need
7991 7996                   * to place an additional VN_HOLD here since we will
7992 7997                   * be hitting that VN_RELE again.
7993 7998                   */
7994 7999                  VN_HOLD(nvp);
7995 8000  
7996 8001                  (void) nfs4_remove(ndvp, tmpname, cr, NULL, 0);
7997 8002  
7998 8003                  /* Undo the unlinked file naming stuff we just did */
7999 8004                  mutex_enter(&rp->r_statelock);
8000 8005                  if (rp->r_unldvp) {
8001 8006                          VN_RELE(ndvp);
8002 8007                          rp->r_unldvp = NULL;
8003 8008                          if (rp->r_unlcred != NULL)
8004 8009                                  crfree(rp->r_unlcred);
8005 8010                          rp->r_unlcred = NULL;
8006 8011                          /* rp->r_unlanme points to tmpname */
8007 8012                          if (rp->r_unlname)
8008 8013                                  kmem_free(rp->r_unlname, MAXNAMELEN);
8009 8014                          rp->r_unlname = NULL;
8010 8015                  }
8011 8016                  mutex_exit(&rp->r_statelock);
8012 8017  
8013 8018                  if (nvp) {
8014 8019                          VN_RELE(nvp);
8015 8020                  }
8016 8021                  goto link_call;
8017 8022          }
8018 8023  
8019 8024          if (error) {
8020 8025                  VN_RELE(ovp);
8021 8026                  nfs_rw_exit(&odrp->r_rwlock);
8022 8027                  nfs_rw_exit(&ndrp->r_rwlock);
8023 8028                  if (nvp) {
8024 8029                          VN_RELE(nvp);
8025 8030                  }
8026 8031                  return (error);
8027 8032          }
8028 8033  
8029 8034          /*
8030 8035           * when renaming directories to be a subdirectory of a
8031 8036           * different parent, the dnlc entry for ".." will no
8032 8037           * longer be valid, so it must be removed
8033 8038           */
8034 8039          rp = VTOR4(ovp);
8035 8040          if (ndvp != odvp) {
8036 8041                  if (ovp->v_type == VDIR) {
8037 8042                          dnlc_remove(ovp, "..");
8038 8043                          if (rp->r_dir != NULL)
8039 8044                                  nfs4_purge_rddir_cache(ovp);
8040 8045                  }
8041 8046          }
8042 8047  
8043 8048          /*
8044 8049           * If we are renaming the unlinked file, update the
8045 8050           * r_unldvp and r_unlname as needed.
8046 8051           */
8047 8052          mutex_enter(&rp->r_statelock);
8048 8053          if (rp->r_unldvp != NULL) {
8049 8054                  if (strcmp(rp->r_unlname, onm) == 0) {
8050 8055                          (void) strncpy(rp->r_unlname, nnm, MAXNAMELEN);
8051 8056                          rp->r_unlname[MAXNAMELEN - 1] = '\0';
8052 8057                          if (ndvp != rp->r_unldvp) {
8053 8058                                  VN_RELE(rp->r_unldvp);
8054 8059                                  rp->r_unldvp = ndvp;
8055 8060                                  VN_HOLD(ndvp);
  
    | 
      ↓ open down ↓ | 
    4302 lines elided | 
    
      ↑ open up ↑ | 
  
8056 8061                          }
8057 8062                  }
8058 8063          }
8059 8064          mutex_exit(&rp->r_statelock);
8060 8065  
8061 8066          /*
8062 8067           * Notify the rename vnevents to source vnode, and to the target
8063 8068           * vnode if it already existed.
8064 8069           */
8065 8070          if (error == 0) {
8066      -                vnode_t *tvp;
     8071 +                vnode_t *tvp, *tovp;
8067 8072                  rnode4_t *trp;
     8073 +
8068 8074                  /*
8069 8075                   * Notify the vnode. Each links is represented by
8070 8076                   * a different vnode, in nfsv4.
8071 8077                   */
8072 8078                  if (nvp) {
8073 8079                          trp = VTOR4(nvp);
8074 8080                          tvp = nvp;
8075 8081                          if (IS_SHADOW(nvp, trp))
8076 8082                                  tvp = RTOV4(trp);
8077 8083                          vnevent_rename_dest(tvp, ndvp, nnm, ct);
8078 8084                  }
8079 8085  
8080      -                /*
8081      -                 * if the source and destination directory are not the
8082      -                 * same notify the destination directory.
8083      -                 */
8084      -                if (VTOR4(odvp) != VTOR4(ndvp)) {
8085      -                        trp = VTOR4(ndvp);
8086      -                        tvp = ndvp;
8087      -                        if (IS_SHADOW(ndvp, trp))
8088      -                                tvp = RTOV4(trp);
8089      -                        vnevent_rename_dest_dir(tvp, ct);
8090      -                }
8091      -
8092 8086                  trp = VTOR4(ovp);
8093      -                tvp = ovp;
     8087 +                tovp = ovp;
8094 8088                  if (IS_SHADOW(ovp, trp))
     8089 +                        tovp = RTOV4(trp);
     8090 +
     8091 +                vnevent_rename_src(tovp, odvp, onm, ct);
     8092 +
     8093 +                trp = VTOR4(ndvp);
     8094 +                tvp = ndvp;
     8095 +
     8096 +                if (IS_SHADOW(ndvp, trp))
8095 8097                          tvp = RTOV4(trp);
8096      -                vnevent_rename_src(tvp, odvp, onm, ct);
     8098 +
     8099 +                vnevent_rename_dest_dir(tvp, tovp, nnm, ct);
8097 8100          }
8098 8101  
8099 8102          if (nvp) {
8100 8103                  VN_RELE(nvp);
8101 8104          }
8102 8105          VN_RELE(ovp);
8103 8106  
8104 8107          nfs_rw_exit(&odrp->r_rwlock);
8105 8108          nfs_rw_exit(&ndrp->r_rwlock);
8106 8109  
8107 8110          return (error);
8108 8111  }
8109 8112  
8110 8113  /*
8111 8114   * When the parent directory has changed, sv_dfh must be updated
8112 8115   */
8113 8116  static void
8114 8117  update_parentdir_sfh(vnode_t *vp, vnode_t *ndvp)
8115 8118  {
8116 8119          svnode_t *sv = VTOSV(vp);
8117 8120          nfs4_sharedfh_t *old_dfh = sv->sv_dfh;
8118 8121          nfs4_sharedfh_t *new_dfh = VTOR4(ndvp)->r_fh;
8119 8122  
8120 8123          sfh4_hold(new_dfh);
8121 8124          sv->sv_dfh = new_dfh;
8122 8125          sfh4_rele(&old_dfh);
8123 8126  }
8124 8127  
8125 8128  /*
8126 8129   * nfs4rename_persistent does the otw portion of renaming in NFS Version 4,
8127 8130   * when it is known that the filehandle is persistent through rename.
8128 8131   *
8129 8132   * Rename requires that the current fh be the target directory and the
8130 8133   * saved fh be the source directory. After the operation, the current fh
8131 8134   * is unchanged.
8132 8135   * The compound op structure for persistent fh rename is:
8133 8136   *      PUTFH(sourcdir), SAVEFH, PUTFH(targetdir), RENAME
8134 8137   * Rather than bother with the directory postop args, we'll simply
8135 8138   * update that a change occurred in the cache, so no post-op getattrs.
8136 8139   */
8137 8140  static int
8138 8141  nfs4rename_persistent_fh(vnode_t *odvp, char *onm, vnode_t *renvp,
8139 8142      vnode_t *ndvp, char *nnm, cred_t *cr, nfsstat4 *statp)
8140 8143  {
8141 8144          COMPOUND4args_clnt args;
8142 8145          COMPOUND4res_clnt res, *resp = NULL;
8143 8146          nfs_argop4 *argop;
8144 8147          nfs_resop4 *resop;
8145 8148          int doqueue, argoplist_size;
8146 8149          mntinfo4_t *mi;
8147 8150          rnode4_t *odrp = VTOR4(odvp);
8148 8151          rnode4_t *ndrp = VTOR4(ndvp);
8149 8152          RENAME4res *rn_res;
8150 8153          bool_t needrecov;
8151 8154          nfs4_recov_state_t recov_state;
8152 8155          nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS };
8153 8156          dirattr_info_t dinfo, *dinfop;
8154 8157  
8155 8158          ASSERT(nfs_zone() == VTOMI4(odvp)->mi_zone);
8156 8159  
8157 8160          recov_state.rs_flags = 0;
8158 8161          recov_state.rs_num_retry_despite_err = 0;
8159 8162  
8160 8163          /*
8161 8164           * Rename ops: putfh sdir; savefh; putfh tdir; rename; getattr tdir
8162 8165           *
8163 8166           * If source/target are different dirs, then append putfh(src); getattr
8164 8167           */
8165 8168          args.array_len = (odvp == ndvp) ? 5 : 7;
8166 8169          argoplist_size = args.array_len * sizeof (nfs_argop4);
8167 8170          args.array = argop = kmem_alloc(argoplist_size, KM_SLEEP);
8168 8171  
8169 8172  recov_retry:
8170 8173          *statp = NFS4_OK;
8171 8174  
8172 8175          /* No need to Lookup the file, persistent fh */
8173 8176          args.ctag = TAG_RENAME;
8174 8177  
8175 8178          mi = VTOMI4(odvp);
8176 8179          e.error = nfs4_start_op(mi, odvp, ndvp, &recov_state);
8177 8180          if (e.error) {
8178 8181                  kmem_free(argop, argoplist_size);
8179 8182                  return (e.error);
8180 8183          }
8181 8184  
8182 8185          /* 0: putfh source directory */
8183 8186          argop[0].argop = OP_CPUTFH;
8184 8187          argop[0].nfs_argop4_u.opcputfh.sfh = odrp->r_fh;
8185 8188  
8186 8189          /* 1: Save source fh to free up current for target */
8187 8190          argop[1].argop = OP_SAVEFH;
8188 8191  
8189 8192          /* 2: putfh targetdir */
8190 8193          argop[2].argop = OP_CPUTFH;
8191 8194          argop[2].nfs_argop4_u.opcputfh.sfh = ndrp->r_fh;
8192 8195  
8193 8196          /* 3: current_fh is targetdir, saved_fh is sourcedir */
8194 8197          argop[3].argop = OP_CRENAME;
8195 8198          argop[3].nfs_argop4_u.opcrename.coldname = onm;
8196 8199          argop[3].nfs_argop4_u.opcrename.cnewname = nnm;
8197 8200  
8198 8201          /* 4: getattr (targetdir) */
8199 8202          argop[4].argop = OP_GETATTR;
8200 8203          argop[4].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
8201 8204          argop[4].nfs_argop4_u.opgetattr.mi = mi;
8202 8205  
8203 8206          if (ndvp != odvp) {
8204 8207  
8205 8208                  /* 5: putfh (sourcedir) */
8206 8209                  argop[5].argop = OP_CPUTFH;
8207 8210                  argop[5].nfs_argop4_u.opcputfh.sfh = ndrp->r_fh;
8208 8211  
8209 8212                  /* 6: getattr (sourcedir) */
8210 8213                  argop[6].argop = OP_GETATTR;
8211 8214                  argop[6].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
8212 8215                  argop[6].nfs_argop4_u.opgetattr.mi = mi;
8213 8216          }
8214 8217  
8215 8218          dnlc_remove(odvp, onm);
8216 8219          dnlc_remove(ndvp, nnm);
8217 8220  
8218 8221          doqueue = 1;
8219 8222          dinfo.di_time_call = gethrtime();
8220 8223          rfs4call(mi, &args, &res, cr, &doqueue, 0, &e);
8221 8224  
8222 8225          needrecov = nfs4_needs_recovery(&e, FALSE, mi->mi_vfsp);
8223 8226          if (e.error) {
8224 8227                  PURGE_ATTRCACHE4(odvp);
8225 8228                  PURGE_ATTRCACHE4(ndvp);
8226 8229          } else {
8227 8230                  *statp = res.status;
8228 8231          }
8229 8232  
8230 8233          if (needrecov) {
8231 8234                  if (nfs4_start_recovery(&e, mi, odvp, ndvp, NULL, NULL,
8232 8235                      OP_RENAME, NULL, NULL, NULL) == FALSE) {
8233 8236                          nfs4_end_op(mi, odvp, ndvp, &recov_state, needrecov);
8234 8237                          if (!e.error)
8235 8238                                  (void) xdr_free(xdr_COMPOUND4res_clnt,
8236 8239                                      (caddr_t)&res);
8237 8240                          goto recov_retry;
8238 8241                  }
8239 8242          }
8240 8243  
8241 8244          if (!e.error) {
8242 8245                  resp = &res;
8243 8246                  /*
8244 8247                   * as long as OP_RENAME
8245 8248                   */
8246 8249                  if (res.status != NFS4_OK && res.array_len <= 4) {
8247 8250                          e.error = geterrno4(res.status);
8248 8251                          PURGE_ATTRCACHE4(odvp);
8249 8252                          PURGE_ATTRCACHE4(ndvp);
8250 8253                          /*
8251 8254                           * System V defines rename to return EEXIST, not
8252 8255                           * ENOTEMPTY if the target directory is not empty.
8253 8256                           * Over the wire, the error is NFSERR_ENOTEMPTY
8254 8257                           * which geterrno4 maps to ENOTEMPTY.
8255 8258                           */
8256 8259                          if (e.error == ENOTEMPTY)
8257 8260                                  e.error = EEXIST;
8258 8261                  } else {
8259 8262  
8260 8263                          resop = &res.array[3];  /* rename res */
8261 8264                          rn_res = &resop->nfs_resop4_u.oprename;
8262 8265  
8263 8266                          if (res.status == NFS4_OK) {
8264 8267                                  /*
8265 8268                                   * Update target attribute, readdir and dnlc
8266 8269                                   * caches.
8267 8270                                   */
8268 8271                                  dinfo.di_garp =
8269 8272                                      &res.array[4].nfs_resop4_u.opgetattr.ga_res;
8270 8273                                  dinfo.di_cred = cr;
8271 8274                                  dinfop = &dinfo;
8272 8275                          } else
8273 8276                                  dinfop = NULL;
8274 8277  
8275 8278                          nfs4_update_dircaches(&rn_res->target_cinfo,
8276 8279                              ndvp, NULL, NULL, dinfop);
8277 8280  
8278 8281                          /*
8279 8282                           * Update source attribute, readdir and dnlc caches
8280 8283                           *
8281 8284                           */
8282 8285                          if (ndvp != odvp) {
8283 8286                                  update_parentdir_sfh(renvp, ndvp);
8284 8287  
8285 8288                                  if (dinfop)
8286 8289                                          dinfo.di_garp =
8287 8290                                              &(res.array[6].nfs_resop4_u.
8288 8291                                              opgetattr.ga_res);
8289 8292  
8290 8293                                  nfs4_update_dircaches(&rn_res->source_cinfo,
8291 8294                                      odvp, NULL, NULL, dinfop);
8292 8295                          }
8293 8296  
8294 8297                          fn_move(VTOSV(renvp)->sv_name, VTOSV(ndvp)->sv_name,
8295 8298                              nnm);
8296 8299                  }
8297 8300          }
8298 8301  
8299 8302          if (resp)
8300 8303                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)resp);
8301 8304          nfs4_end_op(mi, odvp, ndvp, &recov_state, needrecov);
8302 8305          kmem_free(argop, argoplist_size);
8303 8306  
8304 8307          return (e.error);
8305 8308  }
8306 8309  
8307 8310  /*
8308 8311   * nfs4rename_volatile_fh does the otw part of renaming in NFS Version 4, when
8309 8312   * it is possible for the filehandle to change due to the rename.
8310 8313   *
8311 8314   * The compound req in this case includes a post-rename lookup and getattr
8312 8315   * to ensure that we have the correct fh and attributes for the object.
8313 8316   *
8314 8317   * Rename requires that the current fh be the target directory and the
8315 8318   * saved fh be the source directory. After the operation, the current fh
8316 8319   * is unchanged.
8317 8320   *
8318 8321   * We need the new filehandle (hence a LOOKUP and GETFH) so that we can
8319 8322   * update the filehandle for the renamed object.  We also get the old
8320 8323   * filehandle for historical reasons; this should be taken out sometime.
8321 8324   * This results in a rather cumbersome compound...
8322 8325   *
8323 8326   *    PUTFH(sourcdir), SAVEFH, LOOKUP(src), GETFH(old),
8324 8327   *    PUTFH(targetdir), RENAME, LOOKUP(trgt), GETFH(new), GETATTR
8325 8328   *
8326 8329   */
8327 8330  static int
8328 8331  nfs4rename_volatile_fh(vnode_t *odvp, char *onm, vnode_t *ovp,
8329 8332      vnode_t *ndvp, char *nnm, cred_t *cr, nfsstat4 *statp)
8330 8333  {
8331 8334          COMPOUND4args_clnt args;
8332 8335          COMPOUND4res_clnt res, *resp = NULL;
8333 8336          int argoplist_size;
8334 8337          nfs_argop4 *argop;
8335 8338          nfs_resop4 *resop;
8336 8339          int doqueue;
8337 8340          mntinfo4_t *mi;
8338 8341          rnode4_t *odrp = VTOR4(odvp);   /* old directory */
8339 8342          rnode4_t *ndrp = VTOR4(ndvp);   /* new directory */
8340 8343          rnode4_t *orp = VTOR4(ovp);     /* object being renamed */
8341 8344          RENAME4res *rn_res;
8342 8345          GETFH4res *ngf_res;
8343 8346          bool_t needrecov;
8344 8347          nfs4_recov_state_t recov_state;
8345 8348          hrtime_t t;
8346 8349          nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS };
8347 8350          dirattr_info_t dinfo, *dinfop = &dinfo;
8348 8351  
8349 8352          ASSERT(nfs_zone() == VTOMI4(odvp)->mi_zone);
8350 8353  
8351 8354          recov_state.rs_flags = 0;
8352 8355          recov_state.rs_num_retry_despite_err = 0;
8353 8356  
8354 8357  recov_retry:
8355 8358          *statp = NFS4_OK;
8356 8359  
8357 8360          /*
8358 8361           * There is a window between the RPC and updating the path and
8359 8362           * filehandle stored in the rnode.  Lock out the FHEXPIRED recovery
8360 8363           * code, so that it doesn't try to use the old path during that
8361 8364           * window.
8362 8365           */
8363 8366          mutex_enter(&orp->r_statelock);
8364 8367          while (orp->r_flags & R4RECEXPFH) {
8365 8368                  klwp_t *lwp = ttolwp(curthread);
8366 8369  
8367 8370                  if (lwp != NULL)
8368 8371                          lwp->lwp_nostop++;
8369 8372                  if (cv_wait_sig(&orp->r_cv, &orp->r_statelock) == 0) {
8370 8373                          mutex_exit(&orp->r_statelock);
8371 8374                          if (lwp != NULL)
8372 8375                                  lwp->lwp_nostop--;
8373 8376                          return (EINTR);
8374 8377                  }
8375 8378                  if (lwp != NULL)
8376 8379                          lwp->lwp_nostop--;
8377 8380          }
8378 8381          orp->r_flags |= R4RECEXPFH;
8379 8382          mutex_exit(&orp->r_statelock);
8380 8383  
8381 8384          mi = VTOMI4(odvp);
8382 8385  
8383 8386          args.ctag = TAG_RENAME_VFH;
8384 8387          args.array_len = (odvp == ndvp) ? 10 : 12;
8385 8388          argoplist_size  = args.array_len * sizeof (nfs_argop4);
8386 8389          argop = kmem_alloc(argoplist_size, KM_SLEEP);
8387 8390  
8388 8391          /*
8389 8392           * Rename ops:
8390 8393           *    PUTFH(sourcdir), SAVEFH, LOOKUP(src), GETFH(old),
8391 8394           *    PUTFH(targetdir), RENAME, GETATTR(targetdir)
8392 8395           *    LOOKUP(trgt), GETFH(new), GETATTR,
8393 8396           *
8394 8397           *    if (odvp != ndvp)
8395 8398           *      add putfh(sourcedir), getattr(sourcedir) }
8396 8399           */
8397 8400          args.array = argop;
8398 8401  
8399 8402          e.error = nfs4_start_fop(mi, odvp, ndvp, OH_VFH_RENAME,
8400 8403              &recov_state, NULL);
8401 8404          if (e.error) {
8402 8405                  kmem_free(argop, argoplist_size);
8403 8406                  mutex_enter(&orp->r_statelock);
8404 8407                  orp->r_flags &= ~R4RECEXPFH;
8405 8408                  cv_broadcast(&orp->r_cv);
8406 8409                  mutex_exit(&orp->r_statelock);
8407 8410                  return (e.error);
8408 8411          }
8409 8412  
8410 8413          /* 0: putfh source directory */
8411 8414          argop[0].argop = OP_CPUTFH;
8412 8415          argop[0].nfs_argop4_u.opcputfh.sfh = odrp->r_fh;
8413 8416  
8414 8417          /* 1: Save source fh to free up current for target */
8415 8418          argop[1].argop = OP_SAVEFH;
8416 8419  
8417 8420          /* 2: Lookup pre-rename fh of renamed object */
8418 8421          argop[2].argop = OP_CLOOKUP;
8419 8422          argop[2].nfs_argop4_u.opclookup.cname = onm;
8420 8423  
8421 8424          /* 3: getfh fh of renamed object (before rename) */
8422 8425          argop[3].argop = OP_GETFH;
8423 8426  
8424 8427          /* 4: putfh targetdir */
8425 8428          argop[4].argop = OP_CPUTFH;
8426 8429          argop[4].nfs_argop4_u.opcputfh.sfh = ndrp->r_fh;
8427 8430  
8428 8431          /* 5: current_fh is targetdir, saved_fh is sourcedir */
8429 8432          argop[5].argop = OP_CRENAME;
8430 8433          argop[5].nfs_argop4_u.opcrename.coldname = onm;
8431 8434          argop[5].nfs_argop4_u.opcrename.cnewname = nnm;
8432 8435  
8433 8436          /* 6: getattr of target dir (post op attrs) */
8434 8437          argop[6].argop = OP_GETATTR;
8435 8438          argop[6].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
8436 8439          argop[6].nfs_argop4_u.opgetattr.mi = mi;
8437 8440  
8438 8441          /* 7: Lookup post-rename fh of renamed object */
8439 8442          argop[7].argop = OP_CLOOKUP;
8440 8443          argop[7].nfs_argop4_u.opclookup.cname = nnm;
8441 8444  
8442 8445          /* 8: getfh fh of renamed object (after rename) */
8443 8446          argop[8].argop = OP_GETFH;
8444 8447  
8445 8448          /* 9: getattr of renamed object */
8446 8449          argop[9].argop = OP_GETATTR;
8447 8450          argop[9].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
8448 8451          argop[9].nfs_argop4_u.opgetattr.mi = mi;
8449 8452  
8450 8453          /*
8451 8454           * If source/target dirs are different, then get new post-op
8452 8455           * attrs for source dir also.
8453 8456           */
8454 8457          if (ndvp != odvp) {
8455 8458                  /* 10: putfh (sourcedir) */
8456 8459                  argop[10].argop = OP_CPUTFH;
8457 8460                  argop[10].nfs_argop4_u.opcputfh.sfh = ndrp->r_fh;
8458 8461  
8459 8462                  /* 11: getattr (sourcedir) */
8460 8463                  argop[11].argop = OP_GETATTR;
8461 8464                  argop[11].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
8462 8465                  argop[11].nfs_argop4_u.opgetattr.mi = mi;
8463 8466          }
8464 8467  
8465 8468          dnlc_remove(odvp, onm);
8466 8469          dnlc_remove(ndvp, nnm);
8467 8470  
8468 8471          doqueue = 1;
8469 8472          t = gethrtime();
8470 8473          rfs4call(mi, &args, &res, cr, &doqueue, 0, &e);
8471 8474  
8472 8475          needrecov = nfs4_needs_recovery(&e, FALSE, mi->mi_vfsp);
8473 8476          if (e.error) {
8474 8477                  PURGE_ATTRCACHE4(odvp);
8475 8478                  PURGE_ATTRCACHE4(ndvp);
8476 8479                  if (!needrecov) {
8477 8480                          nfs4_end_fop(mi, odvp, ndvp, OH_VFH_RENAME,
8478 8481                              &recov_state, needrecov);
8479 8482                          goto out;
8480 8483                  }
8481 8484          } else {
8482 8485                  *statp = res.status;
8483 8486          }
8484 8487  
8485 8488          if (needrecov) {
8486 8489                  bool_t abort;
8487 8490  
8488 8491                  abort = nfs4_start_recovery(&e, mi, odvp, ndvp, NULL, NULL,
8489 8492                      OP_RENAME, NULL, NULL, NULL);
8490 8493                  if (abort == FALSE) {
8491 8494                          nfs4_end_fop(mi, odvp, ndvp, OH_VFH_RENAME,
8492 8495                              &recov_state, needrecov);
8493 8496                          kmem_free(argop, argoplist_size);
8494 8497                          if (!e.error)
8495 8498                                  (void) xdr_free(xdr_COMPOUND4res_clnt,
8496 8499                                      (caddr_t)&res);
8497 8500                          mutex_enter(&orp->r_statelock);
8498 8501                          orp->r_flags &= ~R4RECEXPFH;
8499 8502                          cv_broadcast(&orp->r_cv);
8500 8503                          mutex_exit(&orp->r_statelock);
8501 8504                          goto recov_retry;
8502 8505                  } else {
8503 8506                          if (e.error != 0) {
8504 8507                                  nfs4_end_fop(mi, odvp, ndvp, OH_VFH_RENAME,
8505 8508                                      &recov_state, needrecov);
8506 8509                                  goto out;
8507 8510                          }
8508 8511                          /* fall through for res.status case */
8509 8512                  }
8510 8513          }
8511 8514  
8512 8515          resp = &res;
8513 8516          /*
8514 8517           * If OP_RENAME (or any prev op) failed, then return an error.
8515 8518           * OP_RENAME is index 5, so if array len <= 6 we return an error.
8516 8519           */
8517 8520          if ((res.status != NFS4_OK) && (res.array_len <= 6)) {
8518 8521                  /*
8519 8522                   * Error in an op other than last Getattr
8520 8523                   */
8521 8524                  e.error = geterrno4(res.status);
8522 8525                  PURGE_ATTRCACHE4(odvp);
8523 8526                  PURGE_ATTRCACHE4(ndvp);
8524 8527                  /*
8525 8528                   * System V defines rename to return EEXIST, not
8526 8529                   * ENOTEMPTY if the target directory is not empty.
8527 8530                   * Over the wire, the error is NFSERR_ENOTEMPTY
8528 8531                   * which geterrno4 maps to ENOTEMPTY.
8529 8532                   */
8530 8533                  if (e.error == ENOTEMPTY)
8531 8534                          e.error = EEXIST;
8532 8535                  nfs4_end_fop(mi, odvp, ndvp, OH_VFH_RENAME, &recov_state,
8533 8536                      needrecov);
8534 8537                  goto out;
8535 8538          }
8536 8539  
8537 8540          /* rename results */
8538 8541          rn_res = &res.array[5].nfs_resop4_u.oprename;
8539 8542  
8540 8543          if (res.status == NFS4_OK) {
8541 8544                  /* Update target attribute, readdir and dnlc caches */
8542 8545                  dinfo.di_garp =
8543 8546                      &res.array[6].nfs_resop4_u.opgetattr.ga_res;
8544 8547                  dinfo.di_cred = cr;
8545 8548                  dinfo.di_time_call = t;
8546 8549          } else
8547 8550                  dinfop = NULL;
8548 8551  
8549 8552          /* Update source cache attribute, readdir and dnlc caches */
8550 8553          nfs4_update_dircaches(&rn_res->target_cinfo, ndvp, NULL, NULL, dinfop);
8551 8554  
8552 8555          /* Update source cache attribute, readdir and dnlc caches */
8553 8556          if (ndvp != odvp) {
8554 8557                  update_parentdir_sfh(ovp, ndvp);
8555 8558  
8556 8559                  /*
8557 8560                   * If dinfop is non-NULL, then compound succeded, so
8558 8561                   * set di_garp to attrs for source dir.  dinfop is only
8559 8562                   * set to NULL when compound fails.
8560 8563                   */
8561 8564                  if (dinfop)
8562 8565                          dinfo.di_garp =
8563 8566                              &res.array[11].nfs_resop4_u.opgetattr.ga_res;
8564 8567                  nfs4_update_dircaches(&rn_res->source_cinfo, odvp, NULL, NULL,
8565 8568                      dinfop);
8566 8569          }
8567 8570  
8568 8571          /*
8569 8572           * Update the rnode with the new component name and args,
8570 8573           * and if the file handle changed, also update it with the new fh.
8571 8574           * This is only necessary if the target object has an rnode
8572 8575           * entry and there is no need to create one for it.
8573 8576           */
8574 8577          resop = &res.array[8];  /* getfh new res */
8575 8578          ngf_res = &resop->nfs_resop4_u.opgetfh;
8576 8579  
8577 8580          /*
8578 8581           * Update the path and filehandle for the renamed object.
8579 8582           */
8580 8583          nfs4rename_update(ovp, ndvp, &ngf_res->object, nnm);
8581 8584  
8582 8585          nfs4_end_fop(mi, odvp, ndvp, OH_VFH_RENAME, &recov_state, needrecov);
8583 8586  
8584 8587          if (res.status == NFS4_OK) {
8585 8588                  resop++;        /* getattr res */
8586 8589                  e.error = nfs4_update_attrcache(res.status,
8587 8590                      &resop->nfs_resop4_u.opgetattr.ga_res,
8588 8591                      t, ovp, cr);
8589 8592          }
8590 8593  
8591 8594  out:
8592 8595          kmem_free(argop, argoplist_size);
8593 8596          if (resp)
8594 8597                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)resp);
8595 8598          mutex_enter(&orp->r_statelock);
8596 8599          orp->r_flags &= ~R4RECEXPFH;
8597 8600          cv_broadcast(&orp->r_cv);
8598 8601          mutex_exit(&orp->r_statelock);
8599 8602  
8600 8603          return (e.error);
8601 8604  }
8602 8605  
8603 8606  /* ARGSUSED */
8604 8607  static int
8605 8608  nfs4_mkdir(vnode_t *dvp, char *nm, struct vattr *va, vnode_t **vpp, cred_t *cr,
8606 8609      caller_context_t *ct, int flags, vsecattr_t *vsecp)
8607 8610  {
8608 8611          int error;
8609 8612          vnode_t *vp;
8610 8613  
8611 8614          if (nfs_zone() != VTOMI4(dvp)->mi_zone)
8612 8615                  return (EPERM);
8613 8616          /*
8614 8617           * As ".." has special meaning and rather than send a mkdir
8615 8618           * over the wire to just let the server freak out, we just
8616 8619           * short circuit it here and return EEXIST
8617 8620           */
8618 8621          if (nm[0] == '.' && nm[1] == '.' && nm[2] == '\0')
8619 8622                  return (EEXIST);
8620 8623  
8621 8624          /*
8622 8625           * Decision to get the right gid and setgid bit of the
8623 8626           * new directory is now made in call_nfs4_create_req.
8624 8627           */
8625 8628          va->va_mask |= AT_MODE;
8626 8629          error = call_nfs4_create_req(dvp, nm, NULL, va, &vp, cr, NF4DIR);
8627 8630          if (error)
8628 8631                  return (error);
8629 8632  
8630 8633          *vpp = vp;
8631 8634          return (0);
8632 8635  }
8633 8636  
8634 8637  
8635 8638  /*
8636 8639   * rmdir is using the same remove v4 op as does remove.
8637 8640   * Remove requires that the current fh be the target directory.
8638 8641   * After the operation, the current fh is unchanged.
8639 8642   * The compound op structure is:
8640 8643   *      PUTFH(targetdir), REMOVE
8641 8644   */
8642 8645  /*ARGSUSED4*/
8643 8646  static int
8644 8647  nfs4_rmdir(vnode_t *dvp, char *nm, vnode_t *cdir, cred_t *cr,
8645 8648      caller_context_t *ct, int flags)
8646 8649  {
8647 8650          int need_end_op = FALSE;
8648 8651          COMPOUND4args_clnt args;
8649 8652          COMPOUND4res_clnt res, *resp = NULL;
8650 8653          REMOVE4res *rm_res;
8651 8654          nfs_argop4 argop[3];
8652 8655          nfs_resop4 *resop;
8653 8656          vnode_t *vp;
8654 8657          int doqueue;
8655 8658          mntinfo4_t *mi;
8656 8659          rnode4_t *drp;
8657 8660          bool_t needrecov = FALSE;
8658 8661          nfs4_recov_state_t recov_state;
8659 8662          nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS };
8660 8663          dirattr_info_t dinfo, *dinfop;
8661 8664  
8662 8665          if (nfs_zone() != VTOMI4(dvp)->mi_zone)
8663 8666                  return (EPERM);
8664 8667          /*
8665 8668           * As ".." has special meaning and rather than send a rmdir
8666 8669           * over the wire to just let the server freak out, we just
8667 8670           * short circuit it here and return EEXIST
8668 8671           */
8669 8672          if (nm[0] == '.' && nm[1] == '.' && nm[2] == '\0')
8670 8673                  return (EEXIST);
8671 8674  
8672 8675          drp = VTOR4(dvp);
8673 8676          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR4(dvp)))
8674 8677                  return (EINTR);
8675 8678  
8676 8679          /*
8677 8680           * Attempt to prevent a rmdir(".") from succeeding.
8678 8681           */
8679 8682          e.error = nfs4lookup(dvp, nm, &vp, cr, 0);
8680 8683          if (e.error) {
8681 8684                  nfs_rw_exit(&drp->r_rwlock);
8682 8685                  return (e.error);
8683 8686          }
8684 8687          if (vp == cdir) {
8685 8688                  VN_RELE(vp);
8686 8689                  nfs_rw_exit(&drp->r_rwlock);
8687 8690                  return (EINVAL);
8688 8691          }
8689 8692  
8690 8693          /*
8691 8694           * Since nfsv4 remove op works on both files and directories,
8692 8695           * check that the removed object is indeed a directory.
8693 8696           */
8694 8697          if (vp->v_type != VDIR) {
8695 8698                  VN_RELE(vp);
8696 8699                  nfs_rw_exit(&drp->r_rwlock);
8697 8700                  return (ENOTDIR);
8698 8701          }
8699 8702  
8700 8703          /*
8701 8704           * First just remove the entry from the name cache, as it
8702 8705           * is most likely an entry for this vp.
8703 8706           */
8704 8707          dnlc_remove(dvp, nm);
8705 8708  
8706 8709          /*
8707 8710           * If there vnode reference count is greater than one, then
8708 8711           * there may be additional references in the DNLC which will
8709 8712           * need to be purged.  First, trying removing the entry for
8710 8713           * the parent directory and see if that removes the additional
8711 8714           * reference(s).  If that doesn't do it, then use dnlc_purge_vp
8712 8715           * to completely remove any references to the directory which
8713 8716           * might still exist in the DNLC.
8714 8717           */
8715 8718          if (vp->v_count > 1) {
8716 8719                  dnlc_remove(vp, "..");
8717 8720                  if (vp->v_count > 1)
8718 8721                          dnlc_purge_vp(vp);
8719 8722          }
8720 8723  
8721 8724          mi = VTOMI4(dvp);
8722 8725          recov_state.rs_flags = 0;
8723 8726          recov_state.rs_num_retry_despite_err = 0;
8724 8727  
8725 8728  recov_retry:
8726 8729          args.ctag = TAG_RMDIR;
8727 8730  
8728 8731          /*
8729 8732           * Rmdir ops: putfh dir; remove
8730 8733           */
8731 8734          args.array_len = 3;
8732 8735          args.array = argop;
8733 8736  
8734 8737          e.error = nfs4_start_op(VTOMI4(dvp), dvp, NULL, &recov_state);
8735 8738          if (e.error) {
8736 8739                  nfs_rw_exit(&drp->r_rwlock);
8737 8740                  return (e.error);
8738 8741          }
8739 8742          need_end_op = TRUE;
8740 8743  
8741 8744          /* putfh directory */
8742 8745          argop[0].argop = OP_CPUTFH;
8743 8746          argop[0].nfs_argop4_u.opcputfh.sfh = drp->r_fh;
8744 8747  
8745 8748          /* remove */
8746 8749          argop[1].argop = OP_CREMOVE;
8747 8750          argop[1].nfs_argop4_u.opcremove.ctarget = nm;
8748 8751  
8749 8752          /* getattr (postop attrs for dir that contained removed dir) */
8750 8753          argop[2].argop = OP_GETATTR;
8751 8754          argop[2].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
8752 8755          argop[2].nfs_argop4_u.opgetattr.mi = mi;
8753 8756  
8754 8757          dinfo.di_time_call = gethrtime();
8755 8758          doqueue = 1;
8756 8759          rfs4call(mi, &args, &res, cr, &doqueue, 0, &e);
8757 8760  
8758 8761          PURGE_ATTRCACHE4(vp);
8759 8762  
8760 8763          needrecov = nfs4_needs_recovery(&e, FALSE, mi->mi_vfsp);
8761 8764          if (e.error) {
8762 8765                  PURGE_ATTRCACHE4(dvp);
8763 8766          }
8764 8767  
8765 8768          if (needrecov) {
8766 8769                  if (nfs4_start_recovery(&e, VTOMI4(dvp), dvp, NULL, NULL,
8767 8770                      NULL, OP_REMOVE, NULL, NULL, NULL) == FALSE) {
8768 8771                          if (!e.error)
8769 8772                                  (void) xdr_free(xdr_COMPOUND4res_clnt,
8770 8773                                      (caddr_t)&res);
8771 8774  
8772 8775                          nfs4_end_op(VTOMI4(dvp), dvp, NULL, &recov_state,
8773 8776                              needrecov);
8774 8777                          need_end_op = FALSE;
8775 8778                          goto recov_retry;
8776 8779                  }
8777 8780          }
8778 8781  
8779 8782          if (!e.error) {
8780 8783                  resp = &res;
8781 8784  
8782 8785                  /*
8783 8786                   * Only return error if first 2 ops (OP_REMOVE or earlier)
8784 8787                   * failed.
8785 8788                   */
8786 8789                  if (res.status != NFS4_OK && res.array_len <= 2) {
8787 8790                          e.error = geterrno4(res.status);
8788 8791                          PURGE_ATTRCACHE4(dvp);
8789 8792                          nfs4_end_op(VTOMI4(dvp), dvp, NULL,
8790 8793                              &recov_state, needrecov);
8791 8794                          need_end_op = FALSE;
8792 8795                          nfs4_purge_stale_fh(e.error, dvp, cr);
8793 8796                          /*
8794 8797                           * System V defines rmdir to return EEXIST, not
8795 8798                           * ENOTEMPTY if the directory is not empty.  Over
8796 8799                           * the wire, the error is NFSERR_ENOTEMPTY which
8797 8800                           * geterrno4 maps to ENOTEMPTY.
8798 8801                           */
8799 8802                          if (e.error == ENOTEMPTY)
8800 8803                                  e.error = EEXIST;
8801 8804                  } else {
8802 8805                          resop = &res.array[1];  /* remove res */
8803 8806                          rm_res = &resop->nfs_resop4_u.opremove;
8804 8807  
8805 8808                          if (res.status == NFS4_OK) {
8806 8809                                  resop = &res.array[2];  /* dir attrs */
8807 8810                                  dinfo.di_garp =
8808 8811                                      &resop->nfs_resop4_u.opgetattr.ga_res;
8809 8812                                  dinfo.di_cred = cr;
8810 8813                                  dinfop = &dinfo;
8811 8814                          } else
8812 8815                                  dinfop = NULL;
8813 8816  
8814 8817                          /* Update dir attribute, readdir and dnlc caches */
8815 8818                          nfs4_update_dircaches(&rm_res->cinfo, dvp, NULL, NULL,
8816 8819                              dinfop);
8817 8820  
8818 8821                          /* destroy rddir cache for dir that was removed */
8819 8822                          if (VTOR4(vp)->r_dir != NULL)
8820 8823                                  nfs4_purge_rddir_cache(vp);
8821 8824                  }
8822 8825          }
8823 8826  
8824 8827          if (need_end_op)
8825 8828                  nfs4_end_op(VTOMI4(dvp), dvp, NULL, &recov_state, needrecov);
8826 8829  
8827 8830          nfs_rw_exit(&drp->r_rwlock);
8828 8831  
8829 8832          if (resp)
8830 8833                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)resp);
8831 8834  
8832 8835          if (e.error == 0) {
8833 8836                  vnode_t *tvp;
8834 8837                  rnode4_t *trp;
8835 8838                  trp = VTOR4(vp);
8836 8839                  tvp = vp;
8837 8840                  if (IS_SHADOW(vp, trp))
8838 8841                          tvp = RTOV4(trp);
8839 8842                  vnevent_rmdir(tvp, dvp, nm, ct);
8840 8843          }
8841 8844  
8842 8845          VN_RELE(vp);
8843 8846  
8844 8847          return (e.error);
8845 8848  }
8846 8849  
8847 8850  /* ARGSUSED */
8848 8851  static int
8849 8852  nfs4_symlink(vnode_t *dvp, char *lnm, struct vattr *tva, char *tnm, cred_t *cr,
8850 8853      caller_context_t *ct, int flags)
8851 8854  {
8852 8855          int error;
8853 8856          vnode_t *vp;
8854 8857          rnode4_t *rp;
8855 8858          char *contents;
8856 8859          mntinfo4_t *mi = VTOMI4(dvp);
8857 8860  
8858 8861          if (nfs_zone() != mi->mi_zone)
8859 8862                  return (EPERM);
8860 8863          if (!(mi->mi_flags & MI4_SYMLINK))
8861 8864                  return (EOPNOTSUPP);
8862 8865  
8863 8866          error = call_nfs4_create_req(dvp, lnm, tnm, tva, &vp, cr, NF4LNK);
8864 8867          if (error)
8865 8868                  return (error);
8866 8869  
8867 8870          ASSERT(nfs4_consistent_type(vp));
8868 8871          rp = VTOR4(vp);
8869 8872          if (nfs4_do_symlink_cache && rp->r_symlink.contents == NULL) {
8870 8873  
8871 8874                  contents = kmem_alloc(MAXPATHLEN, KM_SLEEP);
8872 8875  
8873 8876                  if (contents != NULL) {
8874 8877                          mutex_enter(&rp->r_statelock);
8875 8878                          if (rp->r_symlink.contents == NULL) {
8876 8879                                  rp->r_symlink.len = strlen(tnm);
8877 8880                                  bcopy(tnm, contents, rp->r_symlink.len);
8878 8881                                  rp->r_symlink.contents = contents;
8879 8882                                  rp->r_symlink.size = MAXPATHLEN;
8880 8883                                  mutex_exit(&rp->r_statelock);
8881 8884                          } else {
8882 8885                                  mutex_exit(&rp->r_statelock);
8883 8886                                  kmem_free((void *)contents, MAXPATHLEN);
8884 8887                          }
8885 8888                  }
8886 8889          }
8887 8890          VN_RELE(vp);
8888 8891  
8889 8892          return (error);
8890 8893  }
8891 8894  
8892 8895  
8893 8896  /*
8894 8897   * Read directory entries.
8895 8898   * There are some weird things to look out for here.  The uio_loffset
8896 8899   * field is either 0 or it is the offset returned from a previous
8897 8900   * readdir.  It is an opaque value used by the server to find the
8898 8901   * correct directory block to read. The count field is the number
8899 8902   * of blocks to read on the server.  This is advisory only, the server
8900 8903   * may return only one block's worth of entries.  Entries may be compressed
8901 8904   * on the server.
8902 8905   */
8903 8906  /* ARGSUSED */
8904 8907  static int
8905 8908  nfs4_readdir(vnode_t *vp, struct uio *uiop, cred_t *cr, int *eofp,
8906 8909      caller_context_t *ct, int flags)
8907 8910  {
8908 8911          int error;
8909 8912          uint_t count;
8910 8913          rnode4_t *rp;
8911 8914          rddir4_cache *rdc;
8912 8915          rddir4_cache *rrdc;
8913 8916  
8914 8917          if (nfs_zone() != VTOMI4(vp)->mi_zone)
8915 8918                  return (EIO);
8916 8919          rp = VTOR4(vp);
8917 8920  
8918 8921          ASSERT(nfs_rw_lock_held(&rp->r_rwlock, RW_READER));
8919 8922  
8920 8923          /*
8921 8924           * Make sure that the directory cache is valid.
8922 8925           */
8923 8926          if (rp->r_dir != NULL) {
8924 8927                  if (nfs_disable_rddir_cache != 0) {
8925 8928                          /*
8926 8929                           * Setting nfs_disable_rddir_cache in /etc/system
8927 8930                           * allows interoperability with servers that do not
8928 8931                           * properly update the attributes of directories.
8929 8932                           * Any cached information gets purged before an
8930 8933                           * access is made to it.
8931 8934                           */
8932 8935                          nfs4_purge_rddir_cache(vp);
8933 8936                  }
8934 8937  
8935 8938                  error = nfs4_validate_caches(vp, cr);
8936 8939                  if (error)
8937 8940                          return (error);
8938 8941          }
8939 8942  
8940 8943          count = MIN(uiop->uio_iov->iov_len, MAXBSIZE);
8941 8944  
8942 8945          /*
8943 8946           * Short circuit last readdir which always returns 0 bytes.
8944 8947           * This can be done after the directory has been read through
8945 8948           * completely at least once.  This will set r_direof which
8946 8949           * can be used to find the value of the last cookie.
8947 8950           */
8948 8951          mutex_enter(&rp->r_statelock);
8949 8952          if (rp->r_direof != NULL &&
8950 8953              uiop->uio_loffset == rp->r_direof->nfs4_ncookie) {
8951 8954                  mutex_exit(&rp->r_statelock);
8952 8955  #ifdef DEBUG
8953 8956                  nfs4_readdir_cache_shorts++;
8954 8957  #endif
8955 8958                  if (eofp)
8956 8959                          *eofp = 1;
8957 8960                  return (0);
8958 8961          }
8959 8962  
8960 8963          /*
8961 8964           * Look for a cache entry.  Cache entries are identified
8962 8965           * by the NFS cookie value and the byte count requested.
8963 8966           */
8964 8967          rdc = rddir4_cache_lookup(rp, uiop->uio_loffset, count);
8965 8968  
8966 8969          /*
8967 8970           * If rdc is NULL then the lookup resulted in an unrecoverable error.
8968 8971           */
8969 8972          if (rdc == NULL) {
8970 8973                  mutex_exit(&rp->r_statelock);
8971 8974                  return (EINTR);
8972 8975          }
8973 8976  
8974 8977          /*
8975 8978           * Check to see if we need to fill this entry in.
8976 8979           */
8977 8980          if (rdc->flags & RDDIRREQ) {
8978 8981                  rdc->flags &= ~RDDIRREQ;
8979 8982                  rdc->flags |= RDDIR;
8980 8983                  mutex_exit(&rp->r_statelock);
8981 8984  
8982 8985                  /*
8983 8986                   * Do the readdir.
8984 8987                   */
8985 8988                  nfs4readdir(vp, rdc, cr);
8986 8989  
8987 8990                  /*
8988 8991                   * Reacquire the lock, so that we can continue
8989 8992                   */
8990 8993                  mutex_enter(&rp->r_statelock);
8991 8994                  /*
8992 8995                   * The entry is now complete
8993 8996                   */
8994 8997                  rdc->flags &= ~RDDIR;
8995 8998          }
8996 8999  
8997 9000          ASSERT(!(rdc->flags & RDDIR));
8998 9001  
8999 9002          /*
9000 9003           * If an error occurred while attempting
9001 9004           * to fill the cache entry, mark the entry invalid and
9002 9005           * just return the error.
9003 9006           */
9004 9007          if (rdc->error) {
9005 9008                  error = rdc->error;
9006 9009                  rdc->flags |= RDDIRREQ;
9007 9010                  rddir4_cache_rele(rp, rdc);
9008 9011                  mutex_exit(&rp->r_statelock);
9009 9012                  return (error);
9010 9013          }
9011 9014  
9012 9015          /*
9013 9016           * The cache entry is complete and good,
9014 9017           * copyout the dirent structs to the calling
9015 9018           * thread.
9016 9019           */
9017 9020          error = uiomove(rdc->entries, rdc->actlen, UIO_READ, uiop);
9018 9021  
9019 9022          /*
9020 9023           * If no error occurred during the copyout,
9021 9024           * update the offset in the uio struct to
9022 9025           * contain the value of the next NFS 4 cookie
9023 9026           * and set the eof value appropriately.
9024 9027           */
9025 9028          if (!error) {
9026 9029                  uiop->uio_loffset = rdc->nfs4_ncookie;
9027 9030                  if (eofp)
9028 9031                          *eofp = rdc->eof;
9029 9032          }
9030 9033  
9031 9034          /*
9032 9035           * Decide whether to do readahead.  Don't if we
9033 9036           * have already read to the end of directory.
9034 9037           */
9035 9038          if (rdc->eof) {
9036 9039                  /*
9037 9040                   * Make the entry the direof only if it is cached
9038 9041                   */
9039 9042                  if (rdc->flags & RDDIRCACHED)
9040 9043                          rp->r_direof = rdc;
9041 9044                  rddir4_cache_rele(rp, rdc);
9042 9045                  mutex_exit(&rp->r_statelock);
9043 9046                  return (error);
9044 9047          }
9045 9048  
9046 9049          /* Determine if a readdir readahead should be done */
9047 9050          if (!(rp->r_flags & R4LOOKUP)) {
9048 9051                  rddir4_cache_rele(rp, rdc);
9049 9052                  mutex_exit(&rp->r_statelock);
9050 9053                  return (error);
9051 9054          }
9052 9055  
9053 9056          /*
9054 9057           * Now look for a readahead entry.
9055 9058           *
9056 9059           * Check to see whether we found an entry for the readahead.
9057 9060           * If so, we don't need to do anything further, so free the new
9058 9061           * entry if one was allocated.  Otherwise, allocate a new entry, add
9059 9062           * it to the cache, and then initiate an asynchronous readdir
9060 9063           * operation to fill it.
9061 9064           */
9062 9065          rrdc = rddir4_cache_lookup(rp, rdc->nfs4_ncookie, count);
9063 9066  
9064 9067          /*
9065 9068           * A readdir cache entry could not be obtained for the readahead.  In
9066 9069           * this case we skip the readahead and return.
9067 9070           */
9068 9071          if (rrdc == NULL) {
9069 9072                  rddir4_cache_rele(rp, rdc);
9070 9073                  mutex_exit(&rp->r_statelock);
9071 9074                  return (error);
9072 9075          }
9073 9076  
9074 9077          /*
9075 9078           * Check to see if we need to fill this entry in.
9076 9079           */
9077 9080          if (rrdc->flags & RDDIRREQ) {
9078 9081                  rrdc->flags &= ~RDDIRREQ;
9079 9082                  rrdc->flags |= RDDIR;
9080 9083                  rddir4_cache_rele(rp, rdc);
9081 9084                  mutex_exit(&rp->r_statelock);
9082 9085  #ifdef DEBUG
9083 9086                  nfs4_readdir_readahead++;
9084 9087  #endif
9085 9088                  /*
9086 9089                   * Do the readdir.
9087 9090                   */
9088 9091                  nfs4_async_readdir(vp, rrdc, cr, do_nfs4readdir);
9089 9092                  return (error);
9090 9093          }
9091 9094  
9092 9095          rddir4_cache_rele(rp, rrdc);
9093 9096          rddir4_cache_rele(rp, rdc);
9094 9097          mutex_exit(&rp->r_statelock);
9095 9098          return (error);
9096 9099  }
9097 9100  
9098 9101  static int
9099 9102  do_nfs4readdir(vnode_t *vp, rddir4_cache *rdc, cred_t *cr)
9100 9103  {
9101 9104          int error;
9102 9105          rnode4_t *rp;
9103 9106  
9104 9107          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
9105 9108  
9106 9109          rp = VTOR4(vp);
9107 9110  
9108 9111          /*
9109 9112           * Obtain the readdir results for the caller.
9110 9113           */
9111 9114          nfs4readdir(vp, rdc, cr);
9112 9115  
9113 9116          mutex_enter(&rp->r_statelock);
9114 9117          /*
9115 9118           * The entry is now complete
9116 9119           */
9117 9120          rdc->flags &= ~RDDIR;
9118 9121  
9119 9122          error = rdc->error;
9120 9123          if (error)
9121 9124                  rdc->flags |= RDDIRREQ;
9122 9125          rddir4_cache_rele(rp, rdc);
9123 9126          mutex_exit(&rp->r_statelock);
9124 9127  
9125 9128          return (error);
9126 9129  }
9127 9130  
9128 9131  /*
9129 9132   * Read directory entries.
9130 9133   * There are some weird things to look out for here.  The uio_loffset
9131 9134   * field is either 0 or it is the offset returned from a previous
9132 9135   * readdir.  It is an opaque value used by the server to find the
9133 9136   * correct directory block to read. The count field is the number
9134 9137   * of blocks to read on the server.  This is advisory only, the server
9135 9138   * may return only one block's worth of entries.  Entries may be compressed
9136 9139   * on the server.
9137 9140   *
9138 9141   * Generates the following compound request:
9139 9142   * 1. If readdir offset is zero and no dnlc entry for parent exists,
9140 9143   *    must include a Lookupp as well. In this case, send:
9141 9144   *    { Putfh <fh>; Readdir; Lookupp; Getfh; Getattr }
9142 9145   * 2. Otherwise just do: { Putfh <fh>; Readdir }
9143 9146   *
9144 9147   * Get complete attributes and filehandles for entries if this is the
9145 9148   * first read of the directory. Otherwise, just get fileid's.
9146 9149   */
9147 9150  static void
9148 9151  nfs4readdir(vnode_t *vp, rddir4_cache *rdc, cred_t *cr)
9149 9152  {
9150 9153          COMPOUND4args_clnt args;
9151 9154          COMPOUND4res_clnt res;
9152 9155          READDIR4args *rargs;
9153 9156          READDIR4res_clnt *rd_res;
9154 9157          bitmap4 rd_bitsval;
9155 9158          nfs_argop4 argop[5];
9156 9159          nfs_resop4 *resop;
9157 9160          rnode4_t *rp = VTOR4(vp);
9158 9161          mntinfo4_t *mi = VTOMI4(vp);
9159 9162          int doqueue;
9160 9163          u_longlong_t nodeid, pnodeid;   /* id's of dir and its parents */
9161 9164          vnode_t *dvp;
9162 9165          nfs_cookie4 cookie = (nfs_cookie4)rdc->nfs4_cookie;
9163 9166          int num_ops, res_opcnt;
9164 9167          bool_t needrecov = FALSE;
9165 9168          nfs4_recov_state_t recov_state;
9166 9169          hrtime_t t;
9167 9170          nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS };
9168 9171  
9169 9172          ASSERT(nfs_zone() == mi->mi_zone);
9170 9173          ASSERT(rdc->flags & RDDIR);
9171 9174          ASSERT(rdc->entries == NULL);
9172 9175  
9173 9176          /*
9174 9177           * If rp were a stub, it should have triggered and caused
9175 9178           * a mount for us to get this far.
9176 9179           */
9177 9180          ASSERT(!RP_ISSTUB(rp));
9178 9181  
9179 9182          num_ops = 2;
9180 9183          if (cookie == (nfs_cookie4)0 || cookie == (nfs_cookie4)1) {
9181 9184                  /*
9182 9185                   * Since nfsv4 readdir may not return entries for "." and "..",
9183 9186                   * the client must recreate them:
9184 9187                   * To find the correct nodeid, do the following:
9185 9188                   * For current node, get nodeid from dnlc.
9186 9189                   * - if current node is rootvp, set pnodeid to nodeid.
9187 9190                   * - else if parent is in the dnlc, get its nodeid from there.
9188 9191                   * - else add LOOKUPP+GETATTR to compound.
9189 9192                   */
9190 9193                  nodeid = rp->r_attr.va_nodeid;
9191 9194                  if (vp->v_flag & VROOT) {
9192 9195                          pnodeid = nodeid;       /* root of mount point */
9193 9196                  } else {
9194 9197                          dvp = dnlc_lookup(vp, "..");
9195 9198                          if (dvp != NULL && dvp != DNLC_NO_VNODE) {
9196 9199                                  /* parent in dnlc cache - no need for otw */
9197 9200                                  pnodeid = VTOR4(dvp)->r_attr.va_nodeid;
9198 9201                          } else {
9199 9202                                  /*
9200 9203                                   * parent not in dnlc cache,
9201 9204                                   * do lookupp to get its id
9202 9205                                   */
9203 9206                                  num_ops = 5;
9204 9207                                  pnodeid = 0; /* set later by getattr parent */
9205 9208                          }
9206 9209                          if (dvp)
9207 9210                                  VN_RELE(dvp);
9208 9211                  }
9209 9212          }
9210 9213          recov_state.rs_flags = 0;
9211 9214          recov_state.rs_num_retry_despite_err = 0;
9212 9215  
9213 9216          /* Save the original mount point security flavor */
9214 9217          (void) save_mnt_secinfo(mi->mi_curr_serv);
9215 9218  
9216 9219  recov_retry:
9217 9220          args.ctag = TAG_READDIR;
9218 9221  
9219 9222          args.array = argop;
9220 9223          args.array_len = num_ops;
9221 9224  
9222 9225          if (e.error = nfs4_start_fop(VTOMI4(vp), vp, NULL, OH_READDIR,
9223 9226              &recov_state, NULL)) {
9224 9227                  /*
9225 9228                   * If readdir a node that is a stub for a crossed mount point,
9226 9229                   * keep the original secinfo flavor for the current file
9227 9230                   * system, not the crossed one.
9228 9231                   */
9229 9232                  (void) check_mnt_secinfo(mi->mi_curr_serv, vp);
9230 9233                  rdc->error = e.error;
9231 9234                  return;
9232 9235          }
9233 9236  
9234 9237          /*
9235 9238           * Determine which attrs to request for dirents.  This code
9236 9239           * must be protected by nfs4_start/end_fop because of r_server
9237 9240           * (which will change during failover recovery).
9238 9241           *
9239 9242           */
9240 9243          if (rp->r_flags & (R4LOOKUP | R4READDIRWATTR)) {
9241 9244                  /*
9242 9245                   * Get all vattr attrs plus filehandle and rdattr_error
9243 9246                   */
9244 9247                  rd_bitsval = NFS4_VATTR_MASK |
9245 9248                      FATTR4_RDATTR_ERROR_MASK |
9246 9249                      FATTR4_FILEHANDLE_MASK;
9247 9250  
9248 9251                  if (rp->r_flags & R4READDIRWATTR) {
9249 9252                          mutex_enter(&rp->r_statelock);
9250 9253                          rp->r_flags &= ~R4READDIRWATTR;
9251 9254                          mutex_exit(&rp->r_statelock);
9252 9255                  }
9253 9256          } else {
9254 9257                  servinfo4_t *svp = rp->r_server;
9255 9258  
9256 9259                  /*
9257 9260                   * Already read directory. Use readdir with
9258 9261                   * no attrs (except for mounted_on_fileid) for updates.
9259 9262                   */
9260 9263                  rd_bitsval = FATTR4_RDATTR_ERROR_MASK;
9261 9264  
9262 9265                  /*
9263 9266                   * request mounted on fileid if supported, else request
9264 9267                   * fileid.  maybe we should verify that fileid is supported
9265 9268                   * and request something else if not.
9266 9269                   */
9267 9270                  (void) nfs_rw_enter_sig(&svp->sv_lock, RW_READER, 0);
9268 9271                  if (svp->sv_supp_attrs & FATTR4_MOUNTED_ON_FILEID_MASK)
9269 9272                          rd_bitsval |= FATTR4_MOUNTED_ON_FILEID_MASK;
9270 9273                  nfs_rw_exit(&svp->sv_lock);
9271 9274          }
9272 9275  
9273 9276          /* putfh directory fh */
9274 9277          argop[0].argop = OP_CPUTFH;
9275 9278          argop[0].nfs_argop4_u.opcputfh.sfh = rp->r_fh;
9276 9279  
9277 9280          argop[1].argop = OP_READDIR;
9278 9281          rargs = &argop[1].nfs_argop4_u.opreaddir;
9279 9282          /*
9280 9283           * 1 and 2 are reserved for client "." and ".." entry offset.
9281 9284           * cookie 0 should be used over-the-wire to start reading at
9282 9285           * the beginning of the directory excluding "." and "..".
9283 9286           */
9284 9287          if (rdc->nfs4_cookie == 0 ||
9285 9288              rdc->nfs4_cookie == 1 ||
9286 9289              rdc->nfs4_cookie == 2) {
9287 9290                  rargs->cookie = (nfs_cookie4)0;
9288 9291                  rargs->cookieverf = 0;
9289 9292          } else {
9290 9293                  rargs->cookie = (nfs_cookie4)rdc->nfs4_cookie;
9291 9294                  mutex_enter(&rp->r_statelock);
9292 9295                  rargs->cookieverf = rp->r_cookieverf4;
9293 9296                  mutex_exit(&rp->r_statelock);
9294 9297          }
9295 9298          rargs->dircount = MIN(rdc->buflen, mi->mi_tsize);
9296 9299          rargs->maxcount = mi->mi_tsize;
9297 9300          rargs->attr_request = rd_bitsval;
9298 9301          rargs->rdc = rdc;
9299 9302          rargs->dvp = vp;
9300 9303          rargs->mi = mi;
9301 9304          rargs->cr = cr;
9302 9305  
9303 9306  
9304 9307          /*
9305 9308           * If count < than the minimum required, we return no entries
9306 9309           * and fail with EINVAL
9307 9310           */
9308 9311          if (rargs->dircount < (DIRENT64_RECLEN(1) + DIRENT64_RECLEN(2))) {
9309 9312                  rdc->error = EINVAL;
9310 9313                  goto out;
9311 9314          }
9312 9315  
9313 9316          if (args.array_len == 5) {
9314 9317                  /*
9315 9318                   * Add lookupp and getattr for parent nodeid.
9316 9319                   */
9317 9320                  argop[2].argop = OP_LOOKUPP;
9318 9321  
9319 9322                  argop[3].argop = OP_GETFH;
9320 9323  
9321 9324                  /* getattr parent */
9322 9325                  argop[4].argop = OP_GETATTR;
9323 9326                  argop[4].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK;
9324 9327                  argop[4].nfs_argop4_u.opgetattr.mi = mi;
9325 9328          }
9326 9329  
9327 9330          doqueue = 1;
9328 9331  
9329 9332          if (mi->mi_io_kstats) {
9330 9333                  mutex_enter(&mi->mi_lock);
9331 9334                  kstat_runq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
9332 9335                  mutex_exit(&mi->mi_lock);
9333 9336          }
9334 9337  
9335 9338          /* capture the time of this call */
9336 9339          rargs->t = t = gethrtime();
9337 9340  
9338 9341          rfs4call(mi, &args, &res, cr, &doqueue, 0, &e);
9339 9342  
9340 9343          if (mi->mi_io_kstats) {
9341 9344                  mutex_enter(&mi->mi_lock);
9342 9345                  kstat_runq_exit(KSTAT_IO_PTR(mi->mi_io_kstats));
9343 9346                  mutex_exit(&mi->mi_lock);
9344 9347          }
9345 9348  
9346 9349          needrecov = nfs4_needs_recovery(&e, FALSE, mi->mi_vfsp);
9347 9350  
9348 9351          /*
9349 9352           * If RPC error occurred and it isn't an error that
9350 9353           * triggers recovery, then go ahead and fail now.
9351 9354           */
9352 9355          if (e.error != 0 && !needrecov) {
9353 9356                  rdc->error = e.error;
9354 9357                  goto out;
9355 9358          }
9356 9359  
9357 9360          if (needrecov) {
9358 9361                  bool_t abort;
9359 9362  
9360 9363                  NFS4_DEBUG(nfs4_client_recov_debug, (CE_NOTE,
9361 9364                      "nfs4readdir: initiating recovery.\n"));
9362 9365  
9363 9366                  abort = nfs4_start_recovery(&e, VTOMI4(vp), vp, NULL, NULL,
9364 9367                      NULL, OP_READDIR, NULL, NULL, NULL);
9365 9368                  if (abort == FALSE) {
9366 9369                          nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_READDIR,
9367 9370                              &recov_state, needrecov);
9368 9371                          if (!e.error)
9369 9372                                  (void) xdr_free(xdr_COMPOUND4res_clnt,
9370 9373                                      (caddr_t)&res);
9371 9374                          if (rdc->entries != NULL) {
9372 9375                                  kmem_free(rdc->entries, rdc->entlen);
9373 9376                                  rdc->entries = NULL;
9374 9377                          }
9375 9378                          goto recov_retry;
9376 9379                  }
9377 9380  
9378 9381                  if (e.error != 0) {
9379 9382                          rdc->error = e.error;
9380 9383                          goto out;
9381 9384                  }
9382 9385  
9383 9386                  /* fall through for res.status case */
9384 9387          }
9385 9388  
9386 9389          res_opcnt = res.array_len;
9387 9390  
9388 9391          /*
9389 9392           * If compound failed first 2 ops (PUTFH+READDIR), then return
9390 9393           * failure here.  Subsequent ops are for filling out dot-dot
9391 9394           * dirent, and if they fail, we still want to give the caller
9392 9395           * the dirents returned by (the successful) READDIR op, so we need
9393 9396           * to silently ignore failure for subsequent ops (LOOKUPP+GETATTR).
9394 9397           *
9395 9398           * One example where PUTFH+READDIR ops would succeed but
9396 9399           * LOOKUPP+GETATTR would fail would be a dir that has r perm
9397 9400           * but lacks x.  In this case, a POSIX server's VOP_READDIR
9398 9401           * would succeed; however, VOP_LOOKUP(..) would fail since no
9399 9402           * x perm.  We need to come up with a non-vendor-specific way
9400 9403           * for a POSIX server to return d_ino from dotdot's dirent if
9401 9404           * client only requests mounted_on_fileid, and just say the
9402 9405           * LOOKUPP succeeded and fill out the GETATTR.  However, if
9403 9406           * client requested any mandatory attrs, server would be required
9404 9407           * to fail the GETATTR op because it can't call VOP_LOOKUP+VOP_GETATTR
9405 9408           * for dotdot.
9406 9409           */
9407 9410  
9408 9411          if (res.status) {
9409 9412                  if (res_opcnt <= 2) {
9410 9413                          e.error = geterrno4(res.status);
9411 9414                          nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_READDIR,
9412 9415                              &recov_state, needrecov);
9413 9416                          nfs4_purge_stale_fh(e.error, vp, cr);
9414 9417                          rdc->error = e.error;
9415 9418                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
9416 9419                          if (rdc->entries != NULL) {
9417 9420                                  kmem_free(rdc->entries, rdc->entlen);
9418 9421                                  rdc->entries = NULL;
9419 9422                          }
9420 9423                          /*
9421 9424                           * If readdir a node that is a stub for a
9422 9425                           * crossed mount point, keep the original
9423 9426                           * secinfo flavor for the current file system,
9424 9427                           * not the crossed one.
9425 9428                           */
9426 9429                          (void) check_mnt_secinfo(mi->mi_curr_serv, vp);
9427 9430                          return;
9428 9431                  }
9429 9432          }
9430 9433  
9431 9434          resop = &res.array[1];  /* readdir res */
9432 9435          rd_res = &resop->nfs_resop4_u.opreaddirclnt;
9433 9436  
9434 9437          mutex_enter(&rp->r_statelock);
9435 9438          rp->r_cookieverf4 = rd_res->cookieverf;
9436 9439          mutex_exit(&rp->r_statelock);
9437 9440  
9438 9441          /*
9439 9442           * For "." and ".." entries
9440 9443           * e.g.
9441 9444           *      seek(cookie=0) -> "." entry with d_off = 1
9442 9445           *      seek(cookie=1) -> ".." entry with d_off = 2
9443 9446           */
9444 9447          if (cookie == (nfs_cookie4) 0) {
9445 9448                  if (rd_res->dotp)
9446 9449                          rd_res->dotp->d_ino = nodeid;
9447 9450                  if (rd_res->dotdotp)
9448 9451                          rd_res->dotdotp->d_ino = pnodeid;
9449 9452          }
9450 9453          if (cookie == (nfs_cookie4) 1) {
9451 9454                  if (rd_res->dotdotp)
9452 9455                          rd_res->dotdotp->d_ino = pnodeid;
9453 9456          }
9454 9457  
9455 9458  
9456 9459          /* LOOKUPP+GETATTR attemped */
9457 9460          if (args.array_len == 5 && rd_res->dotdotp) {
9458 9461                  if (res.status == NFS4_OK && res_opcnt == 5) {
9459 9462                          nfs_fh4 *fhp;
9460 9463                          nfs4_sharedfh_t *sfhp;
9461 9464                          vnode_t *pvp;
9462 9465                          nfs4_ga_res_t *garp;
9463 9466  
9464 9467                          resop++;        /* lookupp */
9465 9468                          resop++;        /* getfh   */
9466 9469                          fhp = &resop->nfs_resop4_u.opgetfh.object;
9467 9470  
9468 9471                          resop++;        /* getattr of parent */
9469 9472  
9470 9473                          /*
9471 9474                           * First, take care of finishing the
9472 9475                           * readdir results.
9473 9476                           */
9474 9477                          garp = &resop->nfs_resop4_u.opgetattr.ga_res;
9475 9478                          /*
9476 9479                           * The d_ino of .. must be the inode number
9477 9480                           * of the mounted filesystem.
9478 9481                           */
9479 9482                          if (garp->n4g_va.va_mask & AT_NODEID)
9480 9483                                  rd_res->dotdotp->d_ino =
9481 9484                                      garp->n4g_va.va_nodeid;
9482 9485  
9483 9486  
9484 9487                          /*
9485 9488                           * Next, create the ".." dnlc entry
9486 9489                           */
9487 9490                          sfhp = sfh4_get(fhp, mi);
9488 9491                          if (!nfs4_make_dotdot(sfhp, t, vp, cr, &pvp, 0)) {
9489 9492                                  dnlc_update(vp, "..", pvp);
9490 9493                                  VN_RELE(pvp);
9491 9494                          }
9492 9495                          sfh4_rele(&sfhp);
9493 9496                  }
9494 9497          }
9495 9498  
9496 9499          if (mi->mi_io_kstats) {
9497 9500                  mutex_enter(&mi->mi_lock);
9498 9501                  KSTAT_IO_PTR(mi->mi_io_kstats)->reads++;
9499 9502                  KSTAT_IO_PTR(mi->mi_io_kstats)->nread += rdc->actlen;
9500 9503                  mutex_exit(&mi->mi_lock);
9501 9504          }
9502 9505  
9503 9506          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
9504 9507  
9505 9508  out:
9506 9509          /*
9507 9510           * If readdir a node that is a stub for a crossed mount point,
9508 9511           * keep the original secinfo flavor for the current file system,
9509 9512           * not the crossed one.
9510 9513           */
9511 9514          (void) check_mnt_secinfo(mi->mi_curr_serv, vp);
9512 9515  
9513 9516          nfs4_end_fop(mi, vp, NULL, OH_READDIR, &recov_state, needrecov);
9514 9517  }
9515 9518  
9516 9519  
9517 9520  static int
9518 9521  nfs4_bio(struct buf *bp, stable_how4 *stab_comm, cred_t *cr, bool_t readahead)
9519 9522  {
9520 9523          rnode4_t *rp = VTOR4(bp->b_vp);
9521 9524          int count;
9522 9525          int error;
9523 9526          cred_t *cred_otw = NULL;
9524 9527          offset_t offset;
9525 9528          nfs4_open_stream_t *osp = NULL;
9526 9529          bool_t first_time = TRUE;       /* first time getting otw cred */
9527 9530          bool_t last_time = FALSE;       /* last time getting otw cred */
9528 9531  
9529 9532          ASSERT(nfs_zone() == VTOMI4(bp->b_vp)->mi_zone);
9530 9533  
9531 9534          DTRACE_IO1(start, struct buf *, bp);
9532 9535          offset = ldbtob(bp->b_lblkno);
9533 9536  
9534 9537          if (bp->b_flags & B_READ) {
9535 9538          read_again:
9536 9539                  /*
9537 9540                   * Releases the osp, if it is provided.
9538 9541                   * Puts a hold on the cred_otw and the new osp (if found).
9539 9542                   */
9540 9543                  cred_otw = nfs4_get_otw_cred_by_osp(rp, cr, &osp,
9541 9544                      &first_time, &last_time);
9542 9545                  error = bp->b_error = nfs4read(bp->b_vp, bp->b_un.b_addr,
9543 9546                      offset, bp->b_bcount, &bp->b_resid, cred_otw,
9544 9547                      readahead, NULL);
9545 9548                  crfree(cred_otw);
9546 9549                  if (!error) {
9547 9550                          if (bp->b_resid) {
9548 9551                                  /*
9549 9552                                   * Didn't get it all because we hit EOF,
9550 9553                                   * zero all the memory beyond the EOF.
9551 9554                                   */
9552 9555                                  /* bzero(rdaddr + */
9553 9556                                  bzero(bp->b_un.b_addr +
9554 9557                                      bp->b_bcount - bp->b_resid, bp->b_resid);
9555 9558                          }
9556 9559                          mutex_enter(&rp->r_statelock);
9557 9560                          if (bp->b_resid == bp->b_bcount &&
9558 9561                              offset >= rp->r_size) {
9559 9562                                  /*
9560 9563                                   * We didn't read anything at all as we are
9561 9564                                   * past EOF.  Return an error indicator back
9562 9565                                   * but don't destroy the pages (yet).
9563 9566                                   */
9564 9567                                  error = NFS_EOF;
9565 9568                          }
9566 9569                          mutex_exit(&rp->r_statelock);
9567 9570                  } else if (error == EACCES && last_time == FALSE) {
9568 9571                                  goto read_again;
9569 9572                  }
9570 9573          } else {
9571 9574                  if (!(rp->r_flags & R4STALE)) {
9572 9575  write_again:
9573 9576                          /*
9574 9577                           * Releases the osp, if it is provided.
9575 9578                           * Puts a hold on the cred_otw and the new
9576 9579                           * osp (if found).
9577 9580                           */
9578 9581                          cred_otw = nfs4_get_otw_cred_by_osp(rp, cr, &osp,
9579 9582                              &first_time, &last_time);
9580 9583                          mutex_enter(&rp->r_statelock);
9581 9584                          count = MIN(bp->b_bcount, rp->r_size - offset);
9582 9585                          mutex_exit(&rp->r_statelock);
9583 9586                          if (count < 0)
9584 9587                                  cmn_err(CE_PANIC, "nfs4_bio: write count < 0");
9585 9588  #ifdef DEBUG
9586 9589                          if (count == 0) {
9587 9590                                  zoneid_t zoneid = getzoneid();
9588 9591  
9589 9592                                  zcmn_err(zoneid, CE_WARN,
9590 9593                                      "nfs4_bio: zero length write at %lld",
9591 9594                                      offset);
9592 9595                                  zcmn_err(zoneid, CE_CONT, "flags=0x%x, "
9593 9596                                      "b_bcount=%ld, file size=%lld",
9594 9597                                      rp->r_flags, (long)bp->b_bcount,
9595 9598                                      rp->r_size);
9596 9599                                  sfh4_printfhandle(VTOR4(bp->b_vp)->r_fh);
9597 9600                                  if (nfs4_bio_do_stop)
9598 9601                                          debug_enter("nfs4_bio");
9599 9602                          }
9600 9603  #endif
9601 9604                          error = nfs4write(bp->b_vp, bp->b_un.b_addr, offset,
9602 9605                              count, cred_otw, stab_comm);
9603 9606                          if (error == EACCES && last_time == FALSE) {
9604 9607                                  crfree(cred_otw);
9605 9608                                  goto write_again;
9606 9609                          }
9607 9610                          bp->b_error = error;
9608 9611                          if (error && error != EINTR &&
9609 9612                              !(bp->b_vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)) {
9610 9613                                  /*
9611 9614                                   * Don't print EDQUOT errors on the console.
9612 9615                                   * Don't print asynchronous EACCES errors.
9613 9616                                   * Don't print EFBIG errors.
9614 9617                                   * Print all other write errors.
9615 9618                                   */
9616 9619                                  if (error != EDQUOT && error != EFBIG &&
9617 9620                                      (error != EACCES ||
9618 9621                                      !(bp->b_flags & B_ASYNC)))
9619 9622                                          nfs4_write_error(bp->b_vp,
9620 9623                                              error, cred_otw);
9621 9624                                  /*
9622 9625                                   * Update r_error and r_flags as appropriate.
9623 9626                                   * If the error was ESTALE, then mark the
9624 9627                                   * rnode as not being writeable and save
9625 9628                                   * the error status.  Otherwise, save any
9626 9629                                   * errors which occur from asynchronous
9627 9630                                   * page invalidations.  Any errors occurring
9628 9631                                   * from other operations should be saved
9629 9632                                   * by the caller.
9630 9633                                   */
9631 9634                                  mutex_enter(&rp->r_statelock);
9632 9635                                  if (error == ESTALE) {
9633 9636                                          rp->r_flags |= R4STALE;
9634 9637                                          if (!rp->r_error)
9635 9638                                                  rp->r_error = error;
9636 9639                                  } else if (!rp->r_error &&
9637 9640                                      (bp->b_flags &
9638 9641                                      (B_INVAL|B_FORCE|B_ASYNC)) ==
9639 9642                                      (B_INVAL|B_FORCE|B_ASYNC)) {
9640 9643                                          rp->r_error = error;
9641 9644                                  }
9642 9645                                  mutex_exit(&rp->r_statelock);
9643 9646                          }
9644 9647                          crfree(cred_otw);
9645 9648                  } else {
9646 9649                          error = rp->r_error;
9647 9650                          /*
9648 9651                           * A close may have cleared r_error, if so,
9649 9652                           * propagate ESTALE error return properly
9650 9653                           */
9651 9654                          if (error == 0)
9652 9655                                  error = ESTALE;
9653 9656                  }
9654 9657          }
9655 9658  
9656 9659          if (error != 0 && error != NFS_EOF)
9657 9660                  bp->b_flags |= B_ERROR;
9658 9661  
9659 9662          if (osp)
9660 9663                  open_stream_rele(osp, rp);
9661 9664  
9662 9665          DTRACE_IO1(done, struct buf *, bp);
9663 9666  
9664 9667          return (error);
9665 9668  }
9666 9669  
9667 9670  /* ARGSUSED */
9668 9671  int
9669 9672  nfs4_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
9670 9673  {
9671 9674          return (EREMOTE);
9672 9675  }
9673 9676  
9674 9677  /* ARGSUSED2 */
9675 9678  int
9676 9679  nfs4_rwlock(vnode_t *vp, int write_lock, caller_context_t *ctp)
9677 9680  {
9678 9681          rnode4_t *rp = VTOR4(vp);
9679 9682  
9680 9683          if (!write_lock) {
9681 9684                  (void) nfs_rw_enter_sig(&rp->r_rwlock, RW_READER, FALSE);
9682 9685                  return (V_WRITELOCK_FALSE);
9683 9686          }
9684 9687  
9685 9688          if ((rp->r_flags & R4DIRECTIO) ||
9686 9689              (VTOMI4(vp)->mi_flags & MI4_DIRECTIO)) {
9687 9690                  (void) nfs_rw_enter_sig(&rp->r_rwlock, RW_READER, FALSE);
9688 9691                  if (rp->r_mapcnt == 0 && !nfs4_has_pages(vp))
9689 9692                          return (V_WRITELOCK_FALSE);
9690 9693                  nfs_rw_exit(&rp->r_rwlock);
9691 9694          }
9692 9695  
9693 9696          (void) nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, FALSE);
9694 9697          return (V_WRITELOCK_TRUE);
9695 9698  }
9696 9699  
9697 9700  /* ARGSUSED */
9698 9701  void
9699 9702  nfs4_rwunlock(vnode_t *vp, int write_lock, caller_context_t *ctp)
9700 9703  {
9701 9704          rnode4_t *rp = VTOR4(vp);
9702 9705  
9703 9706          nfs_rw_exit(&rp->r_rwlock);
9704 9707  }
9705 9708  
9706 9709  /* ARGSUSED */
9707 9710  static int
9708 9711  nfs4_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, caller_context_t *ct)
9709 9712  {
9710 9713          if (nfs_zone() != VTOMI4(vp)->mi_zone)
9711 9714                  return (EIO);
9712 9715  
9713 9716          /*
9714 9717           * Because we stuff the readdir cookie into the offset field
9715 9718           * someone may attempt to do an lseek with the cookie which
9716 9719           * we want to succeed.
9717 9720           */
9718 9721          if (vp->v_type == VDIR)
9719 9722                  return (0);
9720 9723          if (*noffp < 0)
9721 9724                  return (EINVAL);
9722 9725          return (0);
9723 9726  }
9724 9727  
9725 9728  
9726 9729  /*
9727 9730   * Return all the pages from [off..off+len) in file
9728 9731   */
9729 9732  /* ARGSUSED */
9730 9733  static int
9731 9734  nfs4_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
9732 9735      page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
9733 9736      enum seg_rw rw, cred_t *cr, caller_context_t *ct)
9734 9737  {
9735 9738          rnode4_t *rp;
9736 9739          int error;
9737 9740          mntinfo4_t *mi;
9738 9741  
9739 9742          if (nfs_zone() != VTOMI4(vp)->mi_zone)
9740 9743                  return (EIO);
9741 9744          rp = VTOR4(vp);
9742 9745          if (IS_SHADOW(vp, rp))
9743 9746                  vp = RTOV4(rp);
9744 9747  
9745 9748          if (vp->v_flag & VNOMAP)
9746 9749                  return (ENOSYS);
9747 9750  
9748 9751          if (protp != NULL)
9749 9752                  *protp = PROT_ALL;
9750 9753  
9751 9754          /*
9752 9755           * Now validate that the caches are up to date.
9753 9756           */
9754 9757          if (error = nfs4_validate_caches(vp, cr))
9755 9758                  return (error);
9756 9759  
9757 9760          mi = VTOMI4(vp);
9758 9761  retry:
9759 9762          mutex_enter(&rp->r_statelock);
9760 9763  
9761 9764          /*
9762 9765           * Don't create dirty pages faster than they
9763 9766           * can be cleaned so that the system doesn't
9764 9767           * get imbalanced.  If the async queue is
9765 9768           * maxed out, then wait for it to drain before
9766 9769           * creating more dirty pages.  Also, wait for
9767 9770           * any threads doing pagewalks in the vop_getattr
9768 9771           * entry points so that they don't block for
9769 9772           * long periods.
9770 9773           */
9771 9774          if (rw == S_CREATE) {
9772 9775                  while ((mi->mi_max_threads != 0 &&
9773 9776                      rp->r_awcount > 2 * mi->mi_max_threads) ||
9774 9777                      rp->r_gcount > 0)
9775 9778                          cv_wait(&rp->r_cv, &rp->r_statelock);
9776 9779          }
9777 9780  
9778 9781          /*
9779 9782           * If we are getting called as a side effect of an nfs_write()
9780 9783           * operation the local file size might not be extended yet.
9781 9784           * In this case we want to be able to return pages of zeroes.
9782 9785           */
9783 9786          if (off + len > rp->r_size + PAGEOFFSET && seg != segkmap) {
9784 9787                  NFS4_DEBUG(nfs4_pageio_debug,
9785 9788                      (CE_NOTE, "getpage beyond EOF: off=%lld, "
9786 9789                      "len=%llu, size=%llu, attrsize =%llu", off,
9787 9790                      (u_longlong_t)len, rp->r_size, rp->r_attr.va_size));
9788 9791                  mutex_exit(&rp->r_statelock);
9789 9792                  return (EFAULT);                /* beyond EOF */
9790 9793          }
9791 9794  
9792 9795          mutex_exit(&rp->r_statelock);
9793 9796  
9794 9797          error = pvn_getpages(nfs4_getapage, vp, off, len, protp,
9795 9798              pl, plsz, seg, addr, rw, cr);
9796 9799          NFS4_DEBUG(nfs4_pageio_debug && error,
9797 9800              (CE_NOTE, "getpages error %d; off=%lld, len=%lld",
9798 9801              error, off, (u_longlong_t)len));
9799 9802  
9800 9803          switch (error) {
9801 9804          case NFS_EOF:
9802 9805                  nfs4_purge_caches(vp, NFS4_NOPURGE_DNLC, cr, FALSE);
9803 9806                  goto retry;
9804 9807          case ESTALE:
9805 9808                  nfs4_purge_stale_fh(error, vp, cr);
9806 9809          }
9807 9810  
9808 9811          return (error);
9809 9812  }
9810 9813  
9811 9814  /*
9812 9815   * Called from pvn_getpages to get a particular page.
9813 9816   */
9814 9817  /* ARGSUSED */
9815 9818  static int
9816 9819  nfs4_getapage(vnode_t *vp, u_offset_t off, size_t len, uint_t *protp,
9817 9820      page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
9818 9821      enum seg_rw rw, cred_t *cr)
9819 9822  {
9820 9823          rnode4_t *rp;
9821 9824          uint_t bsize;
9822 9825          struct buf *bp;
9823 9826          page_t *pp;
9824 9827          u_offset_t lbn;
9825 9828          u_offset_t io_off;
9826 9829          u_offset_t blkoff;
9827 9830          u_offset_t rablkoff;
9828 9831          size_t io_len;
9829 9832          uint_t blksize;
9830 9833          int error;
9831 9834          int readahead;
9832 9835          int readahead_issued = 0;
9833 9836          int ra_window; /* readahead window */
9834 9837          page_t *pagefound;
9835 9838          page_t *savepp;
9836 9839  
9837 9840          if (nfs_zone() != VTOMI4(vp)->mi_zone)
9838 9841                  return (EIO);
9839 9842  
9840 9843          rp = VTOR4(vp);
9841 9844          ASSERT(!IS_SHADOW(vp, rp));
9842 9845          bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
9843 9846  
9844 9847  reread:
9845 9848          bp = NULL;
9846 9849          pp = NULL;
9847 9850          pagefound = NULL;
9848 9851  
9849 9852          if (pl != NULL)
9850 9853                  pl[0] = NULL;
9851 9854  
9852 9855          error = 0;
9853 9856          lbn = off / bsize;
9854 9857          blkoff = lbn * bsize;
9855 9858  
9856 9859          /*
9857 9860           * Queueing up the readahead before doing the synchronous read
9858 9861           * results in a significant increase in read throughput because
9859 9862           * of the increased parallelism between the async threads and
9860 9863           * the process context.
9861 9864           */
9862 9865          if ((off & ((vp->v_vfsp->vfs_bsize) - 1)) == 0 &&
9863 9866              rw != S_CREATE &&
9864 9867              !(vp->v_flag & VNOCACHE)) {
9865 9868                  mutex_enter(&rp->r_statelock);
9866 9869  
9867 9870                  /*
9868 9871                   * Calculate the number of readaheads to do.
9869 9872                   * a) No readaheads at offset = 0.
9870 9873                   * b) Do maximum(nfs4_nra) readaheads when the readahead
9871 9874                   *    window is closed.
9872 9875                   * c) Do readaheads between 1 to (nfs4_nra - 1) depending
9873 9876                   *    upon how far the readahead window is open or close.
9874 9877                   * d) No readaheads if rp->r_nextr is not within the scope
9875 9878                   *    of the readahead window (random i/o).
9876 9879                   */
9877 9880  
9878 9881                  if (off == 0)
9879 9882                          readahead = 0;
9880 9883                  else if (blkoff == rp->r_nextr)
9881 9884                          readahead = nfs4_nra;
9882 9885                  else if (rp->r_nextr > blkoff &&
9883 9886                      ((ra_window = (rp->r_nextr - blkoff) / bsize)
9884 9887                      <= (nfs4_nra - 1)))
9885 9888                          readahead = nfs4_nra - ra_window;
9886 9889                  else
9887 9890                          readahead = 0;
9888 9891  
9889 9892                  rablkoff = rp->r_nextr;
9890 9893                  while (readahead > 0 && rablkoff + bsize < rp->r_size) {
9891 9894                          mutex_exit(&rp->r_statelock);
9892 9895                          if (nfs4_async_readahead(vp, rablkoff + bsize,
9893 9896                              addr + (rablkoff + bsize - off),
9894 9897                              seg, cr, nfs4_readahead) < 0) {
9895 9898                                  mutex_enter(&rp->r_statelock);
9896 9899                                  break;
9897 9900                          }
9898 9901                          readahead--;
9899 9902                          rablkoff += bsize;
9900 9903                          /*
9901 9904                           * Indicate that we did a readahead so
9902 9905                           * readahead offset is not updated
9903 9906                           * by the synchronous read below.
9904 9907                           */
9905 9908                          readahead_issued = 1;
9906 9909                          mutex_enter(&rp->r_statelock);
9907 9910                          /*
9908 9911                           * set readahead offset to
9909 9912                           * offset of last async readahead
9910 9913                           * request.
9911 9914                           */
9912 9915                          rp->r_nextr = rablkoff;
9913 9916                  }
9914 9917                  mutex_exit(&rp->r_statelock);
9915 9918          }
9916 9919  
9917 9920  again:
9918 9921          if ((pagefound = page_exists(vp, off)) == NULL) {
9919 9922                  if (pl == NULL) {
9920 9923                          (void) nfs4_async_readahead(vp, blkoff, addr, seg, cr,
9921 9924                              nfs4_readahead);
9922 9925                  } else if (rw == S_CREATE) {
9923 9926                          /*
9924 9927                           * Block for this page is not allocated, or the offset
9925 9928                           * is beyond the current allocation size, or we're
9926 9929                           * allocating a swap slot and the page was not found,
9927 9930                           * so allocate it and return a zero page.
9928 9931                           */
9929 9932                          if ((pp = page_create_va(vp, off,
9930 9933                              PAGESIZE, PG_WAIT, seg, addr)) == NULL)
9931 9934                                  cmn_err(CE_PANIC, "nfs4_getapage: page_create");
9932 9935                          io_len = PAGESIZE;
9933 9936                          mutex_enter(&rp->r_statelock);
9934 9937                          rp->r_nextr = off + PAGESIZE;
9935 9938                          mutex_exit(&rp->r_statelock);
9936 9939                  } else {
9937 9940                          /*
9938 9941                           * Need to go to server to get a block
9939 9942                           */
9940 9943                          mutex_enter(&rp->r_statelock);
9941 9944                          if (blkoff < rp->r_size &&
9942 9945                              blkoff + bsize > rp->r_size) {
9943 9946                                  /*
9944 9947                                   * If less than a block left in
9945 9948                                   * file read less than a block.
9946 9949                                   */
9947 9950                                  if (rp->r_size <= off) {
9948 9951                                          /*
9949 9952                                           * Trying to access beyond EOF,
9950 9953                                           * set up to get at least one page.
9951 9954                                           */
9952 9955                                          blksize = off + PAGESIZE - blkoff;
9953 9956                                  } else
9954 9957                                          blksize = rp->r_size - blkoff;
9955 9958                          } else if ((off == 0) ||
9956 9959                              (off != rp->r_nextr && !readahead_issued)) {
9957 9960                                  blksize = PAGESIZE;
9958 9961                                  blkoff = off; /* block = page here */
9959 9962                          } else
9960 9963                                  blksize = bsize;
9961 9964                          mutex_exit(&rp->r_statelock);
9962 9965  
9963 9966                          pp = pvn_read_kluster(vp, off, seg, addr, &io_off,
9964 9967                              &io_len, blkoff, blksize, 0);
9965 9968  
9966 9969                          /*
9967 9970                           * Some other thread has entered the page,
9968 9971                           * so just use it.
9969 9972                           */
9970 9973                          if (pp == NULL)
9971 9974                                  goto again;
9972 9975  
9973 9976                          /*
9974 9977                           * Now round the request size up to page boundaries.
9975 9978                           * This ensures that the entire page will be
9976 9979                           * initialized to zeroes if EOF is encountered.
9977 9980                           */
9978 9981                          io_len = ptob(btopr(io_len));
9979 9982  
9980 9983                          bp = pageio_setup(pp, io_len, vp, B_READ);
9981 9984                          ASSERT(bp != NULL);
9982 9985  
9983 9986                          /*
9984 9987                           * pageio_setup should have set b_addr to 0.  This
9985 9988                           * is correct since we want to do I/O on a page
9986 9989                           * boundary.  bp_mapin will use this addr to calculate
9987 9990                           * an offset, and then set b_addr to the kernel virtual
9988 9991                           * address it allocated for us.
9989 9992                           */
9990 9993                          ASSERT(bp->b_un.b_addr == 0);
9991 9994  
9992 9995                          bp->b_edev = 0;
9993 9996                          bp->b_dev = 0;
9994 9997                          bp->b_lblkno = lbtodb(io_off);
9995 9998                          bp->b_file = vp;
9996 9999                          bp->b_offset = (offset_t)off;
9997 10000                          bp_mapin(bp);
9998 10001  
9999 10002                          /*
10000 10003                           * If doing a write beyond what we believe is EOF,
10001 10004                           * don't bother trying to read the pages from the
10002 10005                           * server, we'll just zero the pages here.  We
10003 10006                           * don't check that the rw flag is S_WRITE here
10004 10007                           * because some implementations may attempt a
10005 10008                           * read access to the buffer before copying data.
10006 10009                           */
10007 10010                          mutex_enter(&rp->r_statelock);
10008 10011                          if (io_off >= rp->r_size && seg == segkmap) {
10009 10012                                  mutex_exit(&rp->r_statelock);
10010 10013                                  bzero(bp->b_un.b_addr, io_len);
10011 10014                          } else {
10012 10015                                  mutex_exit(&rp->r_statelock);
10013 10016                                  error = nfs4_bio(bp, NULL, cr, FALSE);
10014 10017                          }
10015 10018  
10016 10019                          /*
10017 10020                           * Unmap the buffer before freeing it.
10018 10021                           */
10019 10022                          bp_mapout(bp);
10020 10023                          pageio_done(bp);
10021 10024  
10022 10025                          savepp = pp;
10023 10026                          do {
10024 10027                                  pp->p_fsdata = C_NOCOMMIT;
10025 10028                          } while ((pp = pp->p_next) != savepp);
10026 10029  
10027 10030                          if (error == NFS_EOF) {
10028 10031                                  /*
10029 10032                                   * If doing a write system call just return
10030 10033                                   * zeroed pages, else user tried to get pages
10031 10034                                   * beyond EOF, return error.  We don't check
10032 10035                                   * that the rw flag is S_WRITE here because
10033 10036                                   * some implementations may attempt a read
10034 10037                                   * access to the buffer before copying data.
10035 10038                                   */
10036 10039                                  if (seg == segkmap)
10037 10040                                          error = 0;
10038 10041                                  else
10039 10042                                          error = EFAULT;
10040 10043                          }
10041 10044  
10042 10045                          if (!readahead_issued && !error) {
10043 10046                                  mutex_enter(&rp->r_statelock);
10044 10047                                  rp->r_nextr = io_off + io_len;
10045 10048                                  mutex_exit(&rp->r_statelock);
10046 10049                          }
10047 10050                  }
10048 10051          }
10049 10052  
10050 10053  out:
10051 10054          if (pl == NULL)
10052 10055                  return (error);
10053 10056  
10054 10057          if (error) {
10055 10058                  if (pp != NULL)
10056 10059                          pvn_read_done(pp, B_ERROR);
10057 10060                  return (error);
10058 10061          }
10059 10062  
10060 10063          if (pagefound) {
10061 10064                  se_t se = (rw == S_CREATE ? SE_EXCL : SE_SHARED);
10062 10065  
10063 10066                  /*
10064 10067                   * Page exists in the cache, acquire the appropriate lock.
10065 10068                   * If this fails, start all over again.
10066 10069                   */
10067 10070                  if ((pp = page_lookup(vp, off, se)) == NULL) {
10068 10071  #ifdef DEBUG
10069 10072                          nfs4_lostpage++;
10070 10073  #endif
10071 10074                          goto reread;
10072 10075                  }
10073 10076                  pl[0] = pp;
10074 10077                  pl[1] = NULL;
10075 10078                  return (0);
10076 10079          }
10077 10080  
10078 10081          if (pp != NULL)
10079 10082                  pvn_plist_init(pp, pl, plsz, off, io_len, rw);
10080 10083  
10081 10084          return (error);
10082 10085  }
10083 10086  
10084 10087  static void
10085 10088  nfs4_readahead(vnode_t *vp, u_offset_t blkoff, caddr_t addr, struct seg *seg,
10086 10089      cred_t *cr)
10087 10090  {
10088 10091          int error;
10089 10092          page_t *pp;
10090 10093          u_offset_t io_off;
10091 10094          size_t io_len;
10092 10095          struct buf *bp;
10093 10096          uint_t bsize, blksize;
10094 10097          rnode4_t *rp = VTOR4(vp);
10095 10098          page_t *savepp;
10096 10099  
10097 10100          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
10098 10101  
10099 10102          bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
10100 10103  
10101 10104          mutex_enter(&rp->r_statelock);
10102 10105          if (blkoff < rp->r_size && blkoff + bsize > rp->r_size) {
10103 10106                  /*
10104 10107                   * If less than a block left in file read less
10105 10108                   * than a block.
10106 10109                   */
10107 10110                  blksize = rp->r_size - blkoff;
10108 10111          } else
10109 10112                  blksize = bsize;
10110 10113          mutex_exit(&rp->r_statelock);
10111 10114  
10112 10115          pp = pvn_read_kluster(vp, blkoff, segkmap, addr,
10113 10116              &io_off, &io_len, blkoff, blksize, 1);
10114 10117          /*
10115 10118           * The isra flag passed to the kluster function is 1, we may have
10116 10119           * gotten a return value of NULL for a variety of reasons (# of free
10117 10120           * pages < minfree, someone entered the page on the vnode etc). In all
10118 10121           * cases, we want to punt on the readahead.
10119 10122           */
10120 10123          if (pp == NULL)
10121 10124                  return;
10122 10125  
10123 10126          /*
10124 10127           * Now round the request size up to page boundaries.
10125 10128           * This ensures that the entire page will be
10126 10129           * initialized to zeroes if EOF is encountered.
10127 10130           */
10128 10131          io_len = ptob(btopr(io_len));
10129 10132  
10130 10133          bp = pageio_setup(pp, io_len, vp, B_READ);
10131 10134          ASSERT(bp != NULL);
10132 10135  
10133 10136          /*
10134 10137           * pageio_setup should have set b_addr to 0.  This is correct since
10135 10138           * we want to do I/O on a page boundary. bp_mapin() will use this addr
10136 10139           * to calculate an offset, and then set b_addr to the kernel virtual
10137 10140           * address it allocated for us.
10138 10141           */
10139 10142          ASSERT(bp->b_un.b_addr == 0);
10140 10143  
10141 10144          bp->b_edev = 0;
10142 10145          bp->b_dev = 0;
10143 10146          bp->b_lblkno = lbtodb(io_off);
10144 10147          bp->b_file = vp;
10145 10148          bp->b_offset = (offset_t)blkoff;
10146 10149          bp_mapin(bp);
10147 10150  
10148 10151          /*
10149 10152           * If doing a write beyond what we believe is EOF, don't bother trying
10150 10153           * to read the pages from the server, we'll just zero the pages here.
10151 10154           * We don't check that the rw flag is S_WRITE here because some
10152 10155           * implementations may attempt a read access to the buffer before
10153 10156           * copying data.
10154 10157           */
10155 10158          mutex_enter(&rp->r_statelock);
10156 10159          if (io_off >= rp->r_size && seg == segkmap) {
10157 10160                  mutex_exit(&rp->r_statelock);
10158 10161                  bzero(bp->b_un.b_addr, io_len);
10159 10162                  error = 0;
10160 10163          } else {
10161 10164                  mutex_exit(&rp->r_statelock);
10162 10165                  error = nfs4_bio(bp, NULL, cr, TRUE);
10163 10166                  if (error == NFS_EOF)
10164 10167                          error = 0;
10165 10168          }
10166 10169  
10167 10170          /*
10168 10171           * Unmap the buffer before freeing it.
10169 10172           */
10170 10173          bp_mapout(bp);
10171 10174          pageio_done(bp);
10172 10175  
10173 10176          savepp = pp;
10174 10177          do {
10175 10178                  pp->p_fsdata = C_NOCOMMIT;
10176 10179          } while ((pp = pp->p_next) != savepp);
10177 10180  
10178 10181          pvn_read_done(pp, error ? B_READ | B_ERROR : B_READ);
10179 10182  
10180 10183          /*
10181 10184           * In case of error set readahead offset
10182 10185           * to the lowest offset.
10183 10186           * pvn_read_done() calls VN_DISPOSE to destroy the pages
10184 10187           */
10185 10188          if (error && rp->r_nextr > io_off) {
10186 10189                  mutex_enter(&rp->r_statelock);
10187 10190                  if (rp->r_nextr > io_off)
10188 10191                          rp->r_nextr = io_off;
10189 10192                  mutex_exit(&rp->r_statelock);
10190 10193          }
10191 10194  }
10192 10195  
10193 10196  /*
10194 10197   * Flags are composed of {B_INVAL, B_FREE, B_DONTNEED, B_FORCE}
10195 10198   * If len == 0, do from off to EOF.
10196 10199   *
10197 10200   * The normal cases should be len == 0 && off == 0 (entire vp list) or
10198 10201   * len == MAXBSIZE (from segmap_release actions), and len == PAGESIZE
10199 10202   * (from pageout).
10200 10203   */
10201 10204  /* ARGSUSED */
10202 10205  static int
10203 10206  nfs4_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr,
10204 10207      caller_context_t *ct)
10205 10208  {
10206 10209          int error;
10207 10210          rnode4_t *rp;
10208 10211  
10209 10212          ASSERT(cr != NULL);
10210 10213  
10211 10214          if (!(flags & B_ASYNC) && nfs_zone() != VTOMI4(vp)->mi_zone)
10212 10215                  return (EIO);
10213 10216  
10214 10217          rp = VTOR4(vp);
10215 10218          if (IS_SHADOW(vp, rp))
10216 10219                  vp = RTOV4(rp);
10217 10220  
10218 10221          /*
10219 10222           * XXX - Why should this check be made here?
10220 10223           */
10221 10224          if (vp->v_flag & VNOMAP)
10222 10225                  return (ENOSYS);
10223 10226  
10224 10227          if (len == 0 && !(flags & B_INVAL) &&
10225 10228              (vp->v_vfsp->vfs_flag & VFS_RDONLY))
10226 10229                  return (0);
10227 10230  
10228 10231          mutex_enter(&rp->r_statelock);
10229 10232          rp->r_count++;
10230 10233          mutex_exit(&rp->r_statelock);
10231 10234          error = nfs4_putpages(vp, off, len, flags, cr);
10232 10235          mutex_enter(&rp->r_statelock);
10233 10236          rp->r_count--;
10234 10237          cv_broadcast(&rp->r_cv);
10235 10238          mutex_exit(&rp->r_statelock);
10236 10239  
10237 10240          return (error);
10238 10241  }
10239 10242  
10240 10243  /*
10241 10244   * Write out a single page, possibly klustering adjacent dirty pages.
10242 10245   */
10243 10246  int
10244 10247  nfs4_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
10245 10248      int flags, cred_t *cr)
10246 10249  {
10247 10250          u_offset_t io_off;
10248 10251          u_offset_t lbn_off;
10249 10252          u_offset_t lbn;
10250 10253          size_t io_len;
10251 10254          uint_t bsize;
10252 10255          int error;
10253 10256          rnode4_t *rp;
10254 10257  
10255 10258          ASSERT(!(vp->v_vfsp->vfs_flag & VFS_RDONLY));
10256 10259          ASSERT(pp != NULL);
10257 10260          ASSERT(cr != NULL);
10258 10261          ASSERT((flags & B_ASYNC) || nfs_zone() == VTOMI4(vp)->mi_zone);
10259 10262  
10260 10263          rp = VTOR4(vp);
10261 10264          ASSERT(rp->r_count > 0);
10262 10265          ASSERT(!IS_SHADOW(vp, rp));
10263 10266  
10264 10267          bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
10265 10268          lbn = pp->p_offset / bsize;
10266 10269          lbn_off = lbn * bsize;
10267 10270  
10268 10271          /*
10269 10272           * Find a kluster that fits in one block, or in
10270 10273           * one page if pages are bigger than blocks.  If
10271 10274           * there is less file space allocated than a whole
10272 10275           * page, we'll shorten the i/o request below.
10273 10276           */
10274 10277          pp = pvn_write_kluster(vp, pp, &io_off, &io_len, lbn_off,
10275 10278              roundup(bsize, PAGESIZE), flags);
10276 10279  
10277 10280          /*
10278 10281           * pvn_write_kluster shouldn't have returned a page with offset
10279 10282           * behind the original page we were given.  Verify that.
10280 10283           */
10281 10284          ASSERT((pp->p_offset / bsize) >= lbn);
10282 10285  
10283 10286          /*
10284 10287           * Now pp will have the list of kept dirty pages marked for
10285 10288           * write back.  It will also handle invalidation and freeing
10286 10289           * of pages that are not dirty.  Check for page length rounding
10287 10290           * problems.
10288 10291           */
10289 10292          if (io_off + io_len > lbn_off + bsize) {
10290 10293                  ASSERT((io_off + io_len) - (lbn_off + bsize) < PAGESIZE);
10291 10294                  io_len = lbn_off + bsize - io_off;
10292 10295          }
10293 10296          /*
10294 10297           * The R4MODINPROGRESS flag makes sure that nfs4_bio() sees a
10295 10298           * consistent value of r_size. R4MODINPROGRESS is set in writerp4().
10296 10299           * When R4MODINPROGRESS is set it indicates that a uiomove() is in
10297 10300           * progress and the r_size has not been made consistent with the
10298 10301           * new size of the file. When the uiomove() completes the r_size is
10299 10302           * updated and the R4MODINPROGRESS flag is cleared.
10300 10303           *
10301 10304           * The R4MODINPROGRESS flag makes sure that nfs4_bio() sees a
10302 10305           * consistent value of r_size. Without this handshaking, it is
10303 10306           * possible that nfs4_bio() picks  up the old value of r_size
10304 10307           * before the uiomove() in writerp4() completes. This will result
10305 10308           * in the write through nfs4_bio() being dropped.
10306 10309           *
10307 10310           * More precisely, there is a window between the time the uiomove()
10308 10311           * completes and the time the r_size is updated. If a VOP_PUTPAGE()
10309 10312           * operation intervenes in this window, the page will be picked up,
10310 10313           * because it is dirty (it will be unlocked, unless it was
10311 10314           * pagecreate'd). When the page is picked up as dirty, the dirty
10312 10315           * bit is reset (pvn_getdirty()). In nfs4write(), r_size is
10313 10316           * checked. This will still be the old size. Therefore the page will
10314 10317           * not be written out. When segmap_release() calls VOP_PUTPAGE(),
10315 10318           * the page will be found to be clean and the write will be dropped.
10316 10319           */
10317 10320          if (rp->r_flags & R4MODINPROGRESS) {
10318 10321                  mutex_enter(&rp->r_statelock);
10319 10322                  if ((rp->r_flags & R4MODINPROGRESS) &&
10320 10323                      rp->r_modaddr + MAXBSIZE > io_off &&
10321 10324                      rp->r_modaddr < io_off + io_len) {
10322 10325                          page_t *plist;
10323 10326                          /*
10324 10327                           * A write is in progress for this region of the file.
10325 10328                           * If we did not detect R4MODINPROGRESS here then this
10326 10329                           * path through nfs_putapage() would eventually go to
10327 10330                           * nfs4_bio() and may not write out all of the data
10328 10331                           * in the pages. We end up losing data. So we decide
10329 10332                           * to set the modified bit on each page in the page
10330 10333                           * list and mark the rnode with R4DIRTY. This write
10331 10334                           * will be restarted at some later time.
10332 10335                           */
10333 10336                          plist = pp;
10334 10337                          while (plist != NULL) {
10335 10338                                  pp = plist;
10336 10339                                  page_sub(&plist, pp);
10337 10340                                  hat_setmod(pp);
10338 10341                                  page_io_unlock(pp);
10339 10342                                  page_unlock(pp);
10340 10343                          }
10341 10344                          rp->r_flags |= R4DIRTY;
10342 10345                          mutex_exit(&rp->r_statelock);
10343 10346                          if (offp)
10344 10347                                  *offp = io_off;
10345 10348                          if (lenp)
10346 10349                                  *lenp = io_len;
10347 10350                          return (0);
10348 10351                  }
10349 10352                  mutex_exit(&rp->r_statelock);
10350 10353          }
10351 10354  
10352 10355          if (flags & B_ASYNC) {
10353 10356                  error = nfs4_async_putapage(vp, pp, io_off, io_len, flags, cr,
10354 10357                      nfs4_sync_putapage);
10355 10358          } else
10356 10359                  error = nfs4_sync_putapage(vp, pp, io_off, io_len, flags, cr);
10357 10360  
10358 10361          if (offp)
10359 10362                  *offp = io_off;
10360 10363          if (lenp)
10361 10364                  *lenp = io_len;
10362 10365          return (error);
10363 10366  }
10364 10367  
10365 10368  static int
10366 10369  nfs4_sync_putapage(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len,
10367 10370      int flags, cred_t *cr)
10368 10371  {
10369 10372          int error;
10370 10373          rnode4_t *rp;
10371 10374  
10372 10375          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
10373 10376  
10374 10377          flags |= B_WRITE;
10375 10378  
10376 10379          error = nfs4_rdwrlbn(vp, pp, io_off, io_len, flags, cr);
10377 10380  
10378 10381          rp = VTOR4(vp);
10379 10382  
10380 10383          if ((error == ENOSPC || error == EDQUOT || error == EFBIG ||
10381 10384              error == EACCES) &&
10382 10385              (flags & (B_INVAL|B_FORCE)) != (B_INVAL|B_FORCE)) {
10383 10386                  if (!(rp->r_flags & R4OUTOFSPACE)) {
10384 10387                          mutex_enter(&rp->r_statelock);
10385 10388                          rp->r_flags |= R4OUTOFSPACE;
10386 10389                          mutex_exit(&rp->r_statelock);
10387 10390                  }
10388 10391                  flags |= B_ERROR;
10389 10392                  pvn_write_done(pp, flags);
10390 10393                  /*
10391 10394                   * If this was not an async thread, then try again to
10392 10395                   * write out the pages, but this time, also destroy
10393 10396                   * them whether or not the write is successful.  This
10394 10397                   * will prevent memory from filling up with these
10395 10398                   * pages and destroying them is the only alternative
10396 10399                   * if they can't be written out.
10397 10400                   *
10398 10401                   * Don't do this if this is an async thread because
10399 10402                   * when the pages are unlocked in pvn_write_done,
10400 10403                   * some other thread could have come along, locked
10401 10404                   * them, and queued for an async thread.  It would be
10402 10405                   * possible for all of the async threads to be tied
10403 10406                   * up waiting to lock the pages again and they would
10404 10407                   * all already be locked and waiting for an async
10405 10408                   * thread to handle them.  Deadlock.
10406 10409                   */
10407 10410                  if (!(flags & B_ASYNC)) {
10408 10411                          error = nfs4_putpage(vp, io_off, io_len,
10409 10412                              B_INVAL | B_FORCE, cr, NULL);
10410 10413                  }
10411 10414          } else {
10412 10415                  if (error)
10413 10416                          flags |= B_ERROR;
10414 10417                  else if (rp->r_flags & R4OUTOFSPACE) {
10415 10418                          mutex_enter(&rp->r_statelock);
10416 10419                          rp->r_flags &= ~R4OUTOFSPACE;
10417 10420                          mutex_exit(&rp->r_statelock);
10418 10421                  }
10419 10422                  pvn_write_done(pp, flags);
10420 10423                  if (freemem < desfree)
10421 10424                          (void) nfs4_commit_vp(vp, (u_offset_t)0, 0, cr,
10422 10425                              NFS4_WRITE_NOWAIT);
10423 10426          }
10424 10427  
10425 10428          return (error);
10426 10429  }
10427 10430  
10428 10431  #ifdef DEBUG
10429 10432  int nfs4_force_open_before_mmap = 0;
10430 10433  #endif
10431 10434  
10432 10435  /* ARGSUSED */
10433 10436  static int
10434 10437  nfs4_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
10435 10438      size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
10436 10439      caller_context_t *ct)
10437 10440  {
10438 10441          struct segvn_crargs vn_a;
10439 10442          int error = 0;
10440 10443          rnode4_t *rp = VTOR4(vp);
10441 10444          mntinfo4_t *mi = VTOMI4(vp);
10442 10445  
10443 10446          if (nfs_zone() != VTOMI4(vp)->mi_zone)
10444 10447                  return (EIO);
10445 10448  
10446 10449          if (vp->v_flag & VNOMAP)
10447 10450                  return (ENOSYS);
10448 10451  
10449 10452          if (off < 0 || (off + len) < 0)
10450 10453                  return (ENXIO);
10451 10454  
10452 10455          if (vp->v_type != VREG)
10453 10456                  return (ENODEV);
10454 10457  
10455 10458          /*
10456 10459           * If the file is delegated to the client don't do anything.
10457 10460           * If the file is not delegated, then validate the data cache.
10458 10461           */
10459 10462          mutex_enter(&rp->r_statev4_lock);
10460 10463          if (rp->r_deleg_type == OPEN_DELEGATE_NONE) {
10461 10464                  mutex_exit(&rp->r_statev4_lock);
10462 10465                  error = nfs4_validate_caches(vp, cr);
10463 10466                  if (error)
10464 10467                          return (error);
10465 10468          } else {
10466 10469                  mutex_exit(&rp->r_statev4_lock);
10467 10470          }
10468 10471  
10469 10472          /*
10470 10473           * Check to see if the vnode is currently marked as not cachable.
10471 10474           * This means portions of the file are locked (through VOP_FRLOCK).
10472 10475           * In this case the map request must be refused.  We use
10473 10476           * rp->r_lkserlock to avoid a race with concurrent lock requests.
10474 10477           *
10475 10478           * Atomically increment r_inmap after acquiring r_rwlock. The
10476 10479           * idea here is to acquire r_rwlock to block read/write and
10477 10480           * not to protect r_inmap. r_inmap will inform nfs4_read/write()
10478 10481           * that we are in nfs4_map(). Now, r_rwlock is acquired in order
10479 10482           * and we can prevent the deadlock that would have occurred
10480 10483           * when nfs4_addmap() would have acquired it out of order.
10481 10484           *
10482 10485           * Since we are not protecting r_inmap by any lock, we do not
10483 10486           * hold any lock when we decrement it. We atomically decrement
10484 10487           * r_inmap after we release r_lkserlock.
10485 10488           */
10486 10489  
10487 10490          if (nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, INTR4(vp)))
10488 10491                  return (EINTR);
10489 10492          atomic_inc_uint(&rp->r_inmap);
10490 10493          nfs_rw_exit(&rp->r_rwlock);
10491 10494  
10492 10495          if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_READER, INTR4(vp))) {
10493 10496                  atomic_dec_uint(&rp->r_inmap);
10494 10497                  return (EINTR);
10495 10498          }
10496 10499  
10497 10500  
10498 10501          if (vp->v_flag & VNOCACHE) {
10499 10502                  error = EAGAIN;
10500 10503                  goto done;
10501 10504          }
10502 10505  
10503 10506          /*
10504 10507           * Don't allow concurrent locks and mapping if mandatory locking is
10505 10508           * enabled.
10506 10509           */
10507 10510          if (flk_has_remote_locks(vp)) {
10508 10511                  struct vattr va;
10509 10512                  va.va_mask = AT_MODE;
10510 10513                  error = nfs4getattr(vp, &va, cr);
10511 10514                  if (error != 0)
10512 10515                          goto done;
10513 10516                  if (MANDLOCK(vp, va.va_mode)) {
10514 10517                          error = EAGAIN;
10515 10518                          goto done;
10516 10519                  }
10517 10520          }
10518 10521  
10519 10522          /*
10520 10523           * It is possible that the rnode has a lost lock request that we
10521 10524           * are still trying to recover, and that the request conflicts with
10522 10525           * this map request.
10523 10526           *
10524 10527           * An alternative approach would be for nfs4_safemap() to consider
10525 10528           * queued lock requests when deciding whether to set or clear
10526 10529           * VNOCACHE.  This would require the frlock code path to call
10527 10530           * nfs4_safemap() after enqueing a lost request.
10528 10531           */
10529 10532          if (nfs4_map_lost_lock_conflict(vp)) {
10530 10533                  error = EAGAIN;
10531 10534                  goto done;
10532 10535          }
10533 10536  
10534 10537          as_rangelock(as);
10535 10538          error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
10536 10539          if (error != 0) {
10537 10540                  as_rangeunlock(as);
10538 10541                  goto done;
10539 10542          }
10540 10543  
10541 10544          if (vp->v_type == VREG) {
10542 10545                  /*
10543 10546                   * We need to retrieve the open stream
10544 10547                   */
10545 10548                  nfs4_open_stream_t      *osp = NULL;
10546 10549                  nfs4_open_owner_t       *oop = NULL;
10547 10550  
10548 10551                  oop = find_open_owner(cr, NFS4_PERM_CREATED, mi);
10549 10552                  if (oop != NULL) {
10550 10553                          /* returns with 'os_sync_lock' held */
10551 10554                          osp = find_open_stream(oop, rp);
10552 10555                          open_owner_rele(oop);
10553 10556                  }
10554 10557                  if (osp == NULL) {
10555 10558  #ifdef DEBUG
10556 10559                          if (nfs4_force_open_before_mmap) {
10557 10560                                  error = EIO;
10558 10561                                  goto done;
10559 10562                          }
10560 10563  #endif
10561 10564                          /* returns with 'os_sync_lock' held */
10562 10565                          error = open_and_get_osp(vp, cr, &osp);
10563 10566                          if (osp == NULL) {
10564 10567                                  NFS4_DEBUG(nfs4_mmap_debug, (CE_NOTE,
10565 10568                                      "nfs4_map: we tried to OPEN the file "
10566 10569                                      "but again no osp, so fail with EIO"));
10567 10570                                  goto done;
10568 10571                          }
10569 10572                  }
10570 10573  
10571 10574                  if (osp->os_failed_reopen) {
10572 10575                          mutex_exit(&osp->os_sync_lock);
10573 10576                          open_stream_rele(osp, rp);
10574 10577                          NFS4_DEBUG(nfs4_open_stream_debug, (CE_NOTE,
10575 10578                              "nfs4_map: os_failed_reopen set on "
10576 10579                              "osp %p, cr %p, rp %s", (void *)osp,
10577 10580                              (void *)cr, rnode4info(rp)));
10578 10581                          error = EIO;
10579 10582                          goto done;
10580 10583                  }
10581 10584                  mutex_exit(&osp->os_sync_lock);
10582 10585                  open_stream_rele(osp, rp);
10583 10586          }
10584 10587  
10585 10588          vn_a.vp = vp;
10586 10589          vn_a.offset = off;
10587 10590          vn_a.type = (flags & MAP_TYPE);
10588 10591          vn_a.prot = (uchar_t)prot;
10589 10592          vn_a.maxprot = (uchar_t)maxprot;
10590 10593          vn_a.flags = (flags & ~MAP_TYPE);
10591 10594          vn_a.cred = cr;
10592 10595          vn_a.amp = NULL;
10593 10596          vn_a.szc = 0;
10594 10597          vn_a.lgrp_mem_policy_flags = 0;
10595 10598  
10596 10599          error = as_map(as, *addrp, len, segvn_create, &vn_a);
10597 10600          as_rangeunlock(as);
10598 10601  
10599 10602  done:
10600 10603          nfs_rw_exit(&rp->r_lkserlock);
10601 10604          atomic_dec_uint(&rp->r_inmap);
10602 10605          return (error);
10603 10606  }
10604 10607  
10605 10608  /*
10606 10609   * We're most likely dealing with a kernel module that likes to READ
10607 10610   * and mmap without OPENing the file (ie: lookup/read/mmap), so lets
10608 10611   * officially OPEN the file to create the necessary client state
10609 10612   * for bookkeeping of os_mmap_read/write counts.
10610 10613   *
10611 10614   * Since VOP_MAP only passes in a pointer to the vnode rather than
10612 10615   * a double pointer, we can't handle the case where nfs4open_otw()
10613 10616   * returns a different vnode than the one passed into VOP_MAP (since
10614 10617   * VOP_DELMAP will not see the vnode nfs4open_otw used).  In this case,
10615 10618   * we return NULL and let nfs4_map() fail.  Note: the only case where
10616 10619   * this should happen is if the file got removed and replaced with the
10617 10620   * same name on the server (in addition to the fact that we're trying
10618 10621   * to VOP_MAP withouth VOP_OPENing the file in the first place).
10619 10622   */
10620 10623  static int
10621 10624  open_and_get_osp(vnode_t *map_vp, cred_t *cr, nfs4_open_stream_t **ospp)
10622 10625  {
10623 10626          rnode4_t                *rp, *drp;
10624 10627          vnode_t                 *dvp, *open_vp;
10625 10628          char                    file_name[MAXNAMELEN];
10626 10629          int                     just_created;
10627 10630          nfs4_open_stream_t      *osp;
10628 10631          nfs4_open_owner_t       *oop;
10629 10632          int                     error;
10630 10633  
10631 10634          *ospp = NULL;
10632 10635          open_vp = map_vp;
10633 10636  
10634 10637          rp = VTOR4(open_vp);
10635 10638          if ((error = vtodv(open_vp, &dvp, cr, TRUE)) != 0)
10636 10639                  return (error);
10637 10640          drp = VTOR4(dvp);
10638 10641  
10639 10642          if (nfs_rw_enter_sig(&drp->r_rwlock, RW_READER, INTR4(dvp))) {
10640 10643                  VN_RELE(dvp);
10641 10644                  return (EINTR);
10642 10645          }
10643 10646  
10644 10647          if ((error = vtoname(open_vp, file_name, MAXNAMELEN)) != 0) {
10645 10648                  nfs_rw_exit(&drp->r_rwlock);
10646 10649                  VN_RELE(dvp);
10647 10650                  return (error);
10648 10651          }
10649 10652  
10650 10653          mutex_enter(&rp->r_statev4_lock);
10651 10654          if (rp->created_v4) {
10652 10655                  rp->created_v4 = 0;
10653 10656                  mutex_exit(&rp->r_statev4_lock);
10654 10657  
10655 10658                  dnlc_update(dvp, file_name, open_vp);
10656 10659                  /* This is needed so we don't bump the open ref count */
10657 10660                  just_created = 1;
10658 10661          } else {
10659 10662                  mutex_exit(&rp->r_statev4_lock);
10660 10663                  just_created = 0;
10661 10664          }
10662 10665  
10663 10666          VN_HOLD(map_vp);
10664 10667  
10665 10668          error = nfs4open_otw(dvp, file_name, NULL, &open_vp, cr, 0, FREAD, 0,
10666 10669              just_created);
10667 10670          if (error) {
10668 10671                  nfs_rw_exit(&drp->r_rwlock);
10669 10672                  VN_RELE(dvp);
10670 10673                  VN_RELE(map_vp);
10671 10674                  return (error);
10672 10675          }
10673 10676  
10674 10677          nfs_rw_exit(&drp->r_rwlock);
10675 10678          VN_RELE(dvp);
10676 10679  
10677 10680          /*
10678 10681           * If nfs4open_otw() returned a different vnode then "undo"
10679 10682           * the open and return failure to the caller.
10680 10683           */
10681 10684          if (!VN_CMP(open_vp, map_vp)) {
10682 10685                  nfs4_error_t e;
10683 10686  
10684 10687                  NFS4_DEBUG(nfs4_mmap_debug, (CE_NOTE, "open_and_get_osp: "
10685 10688                      "open returned a different vnode"));
10686 10689                  /*
10687 10690                   * If there's an error, ignore it,
10688 10691                   * and let VOP_INACTIVE handle it.
10689 10692                   */
10690 10693                  (void) nfs4close_one(open_vp, NULL, cr, FREAD, NULL, &e,
10691 10694                      CLOSE_NORM, 0, 0, 0);
10692 10695                  VN_RELE(map_vp);
10693 10696                  return (EIO);
10694 10697          }
10695 10698  
10696 10699          VN_RELE(map_vp);
10697 10700  
10698 10701          oop = find_open_owner(cr, NFS4_PERM_CREATED, VTOMI4(open_vp));
10699 10702          if (!oop) {
10700 10703                  nfs4_error_t e;
10701 10704  
10702 10705                  NFS4_DEBUG(nfs4_mmap_debug, (CE_NOTE, "open_and_get_osp: "
10703 10706                      "no open owner"));
10704 10707                  /*
10705 10708                   * If there's an error, ignore it,
10706 10709                   * and let VOP_INACTIVE handle it.
10707 10710                   */
10708 10711                  (void) nfs4close_one(open_vp, NULL, cr, FREAD, NULL, &e,
10709 10712                      CLOSE_NORM, 0, 0, 0);
10710 10713                  return (EIO);
10711 10714          }
10712 10715          osp = find_open_stream(oop, rp);
10713 10716          open_owner_rele(oop);
10714 10717          *ospp = osp;
10715 10718          return (0);
10716 10719  }
10717 10720  
10718 10721  /*
10719 10722   * Please be aware that when this function is called, the address space write
10720 10723   * a_lock is held.  Do not put over the wire calls in this function.
10721 10724   */
10722 10725  /* ARGSUSED */
10723 10726  static int
10724 10727  nfs4_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
10725 10728      size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
10726 10729      caller_context_t *ct)
10727 10730  {
10728 10731          rnode4_t                *rp;
10729 10732          int                     error = 0;
10730 10733          mntinfo4_t              *mi;
10731 10734  
10732 10735          mi = VTOMI4(vp);
10733 10736          rp = VTOR4(vp);
10734 10737  
10735 10738          if (nfs_zone() != mi->mi_zone)
10736 10739                  return (EIO);
10737 10740          if (vp->v_flag & VNOMAP)
10738 10741                  return (ENOSYS);
10739 10742  
10740 10743          /*
10741 10744           * Don't need to update the open stream first, since this
10742 10745           * mmap can't add any additional share access that isn't
10743 10746           * already contained in the open stream (for the case where we
10744 10747           * open/mmap/only update rp->r_mapcnt/server reboots/reopen doesn't
10745 10748           * take into account os_mmap_read[write] counts).
10746 10749           */
10747 10750          atomic_add_long((ulong_t *)&rp->r_mapcnt, btopr(len));
10748 10751  
10749 10752          if (vp->v_type == VREG) {
10750 10753                  /*
10751 10754                   * We need to retrieve the open stream and update the counts.
10752 10755                   * If there is no open stream here, something is wrong.
10753 10756                   */
10754 10757                  nfs4_open_stream_t      *osp = NULL;
10755 10758                  nfs4_open_owner_t       *oop = NULL;
10756 10759  
10757 10760                  oop = find_open_owner(cr, NFS4_PERM_CREATED, mi);
10758 10761                  if (oop != NULL) {
10759 10762                          /* returns with 'os_sync_lock' held */
10760 10763                          osp = find_open_stream(oop, rp);
10761 10764                          open_owner_rele(oop);
10762 10765                  }
10763 10766                  if (osp == NULL) {
10764 10767                          NFS4_DEBUG(nfs4_mmap_debug, (CE_NOTE,
10765 10768                              "nfs4_addmap: we should have an osp"
10766 10769                              "but we don't, so fail with EIO"));
10767 10770                          error = EIO;
10768 10771                          goto out;
10769 10772                  }
10770 10773  
10771 10774                  NFS4_DEBUG(nfs4_mmap_debug, (CE_NOTE, "nfs4_addmap: osp %p,"
10772 10775                      " pages %ld, prot 0x%x", (void *)osp, btopr(len), prot));
10773 10776  
10774 10777                  /*
10775 10778                   * Update the map count in the open stream.
10776 10779                   * This is necessary in the case where we
10777 10780                   * open/mmap/close/, then the server reboots, and we
10778 10781                   * attempt to reopen.  If the mmap doesn't add share
10779 10782                   * access then we send an invalid reopen with
10780 10783                   * access = NONE.
10781 10784                   *
10782 10785                   * We need to specifically check each PROT_* so a mmap
10783 10786                   * call of (PROT_WRITE | PROT_EXEC) will ensure us both
10784 10787                   * read and write access.  A simple comparison of prot
10785 10788                   * to ~PROT_WRITE to determine read access is insufficient
10786 10789                   * since prot can be |= with PROT_USER, etc.
10787 10790                   */
10788 10791  
10789 10792                  /*
10790 10793                   * Unless we're MAP_SHARED, no sense in adding os_mmap_write
10791 10794                   */
10792 10795                  if ((flags & MAP_SHARED) && (maxprot & PROT_WRITE))
10793 10796                          osp->os_mmap_write += btopr(len);
10794 10797                  if (maxprot & PROT_READ)
10795 10798                          osp->os_mmap_read += btopr(len);
10796 10799                  if (maxprot & PROT_EXEC)
10797 10800                          osp->os_mmap_read += btopr(len);
10798 10801                  /*
10799 10802                   * Ensure that os_mmap_read gets incremented, even if
10800 10803                   * maxprot were to look like PROT_NONE.
10801 10804                   */
10802 10805                  if (!(maxprot & PROT_READ) && !(maxprot & PROT_WRITE) &&
10803 10806                      !(maxprot & PROT_EXEC))
10804 10807                          osp->os_mmap_read += btopr(len);
10805 10808                  osp->os_mapcnt += btopr(len);
10806 10809                  mutex_exit(&osp->os_sync_lock);
10807 10810                  open_stream_rele(osp, rp);
10808 10811          }
10809 10812  
10810 10813  out:
10811 10814          /*
10812 10815           * If we got an error, then undo our
10813 10816           * incrementing of 'r_mapcnt'.
10814 10817           */
10815 10818  
10816 10819          if (error) {
10817 10820                  atomic_add_long((ulong_t *)&rp->r_mapcnt, -btopr(len));
10818 10821                  ASSERT(rp->r_mapcnt >= 0);
10819 10822          }
10820 10823          return (error);
10821 10824  }
10822 10825  
10823 10826  /* ARGSUSED */
10824 10827  static int
10825 10828  nfs4_cmp(vnode_t *vp1, vnode_t *vp2, caller_context_t *ct)
10826 10829  {
10827 10830  
10828 10831          return (VTOR4(vp1) == VTOR4(vp2));
10829 10832  }
10830 10833  
10831 10834  /* ARGSUSED */
10832 10835  static int
10833 10836  nfs4_frlock(vnode_t *vp, int cmd, struct flock64 *bfp, int flag,
10834 10837      offset_t offset, struct flk_callback *flk_cbp, cred_t *cr,
10835 10838      caller_context_t *ct)
10836 10839  {
10837 10840          int rc;
10838 10841          u_offset_t start, end;
10839 10842          rnode4_t *rp;
10840 10843          int error = 0, intr = INTR4(vp);
10841 10844          nfs4_error_t e;
10842 10845  
10843 10846          if (nfs_zone() != VTOMI4(vp)->mi_zone)
10844 10847                  return (EIO);
10845 10848  
10846 10849          /* check for valid cmd parameter */
10847 10850          if (cmd != F_GETLK && cmd != F_SETLK && cmd != F_SETLKW)
10848 10851                  return (EINVAL);
10849 10852  
10850 10853          /* Verify l_type. */
10851 10854          switch (bfp->l_type) {
10852 10855          case F_RDLCK:
10853 10856                  if (cmd != F_GETLK && !(flag & FREAD))
10854 10857                          return (EBADF);
10855 10858                  break;
10856 10859          case F_WRLCK:
10857 10860                  if (cmd != F_GETLK && !(flag & FWRITE))
10858 10861                          return (EBADF);
10859 10862                  break;
10860 10863          case F_UNLCK:
10861 10864                  intr = 0;
10862 10865                  break;
10863 10866  
10864 10867          default:
10865 10868                  return (EINVAL);
10866 10869          }
10867 10870  
10868 10871          /* check the validity of the lock range */
10869 10872          if (rc = flk_convert_lock_data(vp, bfp, &start, &end, offset))
10870 10873                  return (rc);
10871 10874          if (rc = flk_check_lock_data(start, end, MAXEND))
10872 10875                  return (rc);
10873 10876  
10874 10877          /*
10875 10878           * If the filesystem is mounted using local locking, pass the
10876 10879           * request off to the local locking code.
10877 10880           */
10878 10881          if (VTOMI4(vp)->mi_flags & MI4_LLOCK || vp->v_type != VREG) {
10879 10882                  if (cmd == F_SETLK || cmd == F_SETLKW) {
10880 10883                          /*
10881 10884                           * For complete safety, we should be holding
10882 10885                           * r_lkserlock.  However, we can't call
10883 10886                           * nfs4_safelock and then fs_frlock while
10884 10887                           * holding r_lkserlock, so just invoke
10885 10888                           * nfs4_safelock and expect that this will
10886 10889                           * catch enough of the cases.
10887 10890                           */
10888 10891                          if (!nfs4_safelock(vp, bfp, cr))
10889 10892                                  return (EAGAIN);
10890 10893                  }
10891 10894                  return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
10892 10895          }
10893 10896  
10894 10897          rp = VTOR4(vp);
10895 10898  
10896 10899          /*
10897 10900           * Check whether the given lock request can proceed, given the
10898 10901           * current file mappings.
10899 10902           */
10900 10903          if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_WRITER, intr))
10901 10904                  return (EINTR);
10902 10905          if (cmd == F_SETLK || cmd == F_SETLKW) {
10903 10906                  if (!nfs4_safelock(vp, bfp, cr)) {
10904 10907                          rc = EAGAIN;
10905 10908                          goto done;
10906 10909                  }
10907 10910          }
10908 10911  
10909 10912          /*
10910 10913           * Flush the cache after waiting for async I/O to finish.  For new
10911 10914           * locks, this is so that the process gets the latest bits from the
10912 10915           * server.  For unlocks, this is so that other clients see the
10913 10916           * latest bits once the file has been unlocked.  If currently dirty
10914 10917           * pages can't be flushed, then don't allow a lock to be set.  But
10915 10918           * allow unlocks to succeed, to avoid having orphan locks on the
10916 10919           * server.
10917 10920           */
10918 10921          if (cmd != F_GETLK) {
10919 10922                  mutex_enter(&rp->r_statelock);
10920 10923                  while (rp->r_count > 0) {
10921 10924                          if (intr) {
10922 10925                                  klwp_t *lwp = ttolwp(curthread);
10923 10926  
10924 10927                                  if (lwp != NULL)
10925 10928                                          lwp->lwp_nostop++;
10926 10929                                  if (cv_wait_sig(&rp->r_cv,
10927 10930                                      &rp->r_statelock) == 0) {
10928 10931                                          if (lwp != NULL)
10929 10932                                                  lwp->lwp_nostop--;
10930 10933                                          rc = EINTR;
10931 10934                                          break;
10932 10935                                  }
10933 10936                                  if (lwp != NULL)
10934 10937                                          lwp->lwp_nostop--;
10935 10938                                  } else
10936 10939                                          cv_wait(&rp->r_cv, &rp->r_statelock);
10937 10940                  }
10938 10941                  mutex_exit(&rp->r_statelock);
10939 10942                  if (rc != 0)
10940 10943                          goto done;
10941 10944                  error = nfs4_putpage(vp, (offset_t)0, 0, B_INVAL, cr, ct);
10942 10945                  if (error) {
10943 10946                          if (error == ENOSPC || error == EDQUOT) {
10944 10947                                  mutex_enter(&rp->r_statelock);
10945 10948                                  if (!rp->r_error)
10946 10949                                          rp->r_error = error;
10947 10950                                  mutex_exit(&rp->r_statelock);
10948 10951                          }
10949 10952                          if (bfp->l_type != F_UNLCK) {
10950 10953                                  rc = ENOLCK;
10951 10954                                  goto done;
10952 10955                          }
10953 10956                  }
10954 10957          }
10955 10958  
10956 10959          /*
10957 10960           * Call the lock manager to do the real work of contacting
10958 10961           * the server and obtaining the lock.
10959 10962           */
10960 10963          nfs4frlock(NFS4_LCK_CTYPE_NORM, vp, cmd, bfp, flag, offset,
10961 10964              cr, &e, NULL, NULL);
10962 10965          rc = e.error;
10963 10966  
10964 10967          if (rc == 0)
10965 10968                  nfs4_lockcompletion(vp, cmd);
10966 10969  
10967 10970  done:
10968 10971          nfs_rw_exit(&rp->r_lkserlock);
10969 10972  
10970 10973          return (rc);
10971 10974  }
10972 10975  
10973 10976  /*
10974 10977   * Free storage space associated with the specified vnode.  The portion
10975 10978   * to be freed is specified by bfp->l_start and bfp->l_len (already
10976 10979   * normalized to a "whence" of 0).
10977 10980   *
10978 10981   * This is an experimental facility whose continued existence is not
10979 10982   * guaranteed.  Currently, we only support the special case
10980 10983   * of l_len == 0, meaning free to end of file.
10981 10984   */
10982 10985  /* ARGSUSED */
10983 10986  static int
10984 10987  nfs4_space(vnode_t *vp, int cmd, struct flock64 *bfp, int flag,
10985 10988      offset_t offset, cred_t *cr, caller_context_t *ct)
10986 10989  {
10987 10990          int error;
10988 10991  
10989 10992          if (nfs_zone() != VTOMI4(vp)->mi_zone)
10990 10993                  return (EIO);
10991 10994          ASSERT(vp->v_type == VREG);
10992 10995          if (cmd != F_FREESP)
10993 10996                  return (EINVAL);
10994 10997  
  
    | 
      ↓ open down ↓ | 
    2888 lines elided | 
    
      ↑ open up ↑ | 
  
10995 10998          error = convoff(vp, bfp, 0, offset);
10996 10999          if (!error) {
10997 11000                  ASSERT(bfp->l_start >= 0);
10998 11001                  if (bfp->l_len == 0) {
10999 11002                          struct vattr va;
11000 11003  
11001 11004                          va.va_mask = AT_SIZE;
11002 11005                          va.va_size = bfp->l_start;
11003 11006                          error = nfs4setattr(vp, &va, 0, cr, NULL);
11004 11007  
11005      -                        if (error == 0 && bfp->l_start == 0)
11006      -                                vnevent_truncate(vp, ct);
     11008 +                        if (error == 0) {
     11009 +                                if (bfp->l_start == 0) {
     11010 +                                        vnevent_truncate(vp, ct);
     11011 +                                } else {
     11012 +                                        vnevent_resize(vp, ct);
     11013 +                                }
     11014 +                        }
11007 11015                  } else
11008 11016                          error = EINVAL;
11009 11017          }
11010 11018  
11011 11019          return (error);
11012 11020  }
11013 11021  
11014 11022  /* ARGSUSED */
11015 11023  int
11016 11024  nfs4_realvp(vnode_t *vp, vnode_t **vpp, caller_context_t *ct)
11017 11025  {
11018 11026          rnode4_t *rp;
11019 11027          rp = VTOR4(vp);
11020 11028  
11021 11029          if (vp->v_type == VREG && IS_SHADOW(vp, rp)) {
11022 11030                  vp = RTOV4(rp);
11023 11031          }
11024 11032          *vpp = vp;
11025 11033          return (0);
11026 11034  }
11027 11035  
11028 11036  /*
11029 11037   * Setup and add an address space callback to do the work of the delmap call.
11030 11038   * The callback will (and must be) deleted in the actual callback function.
11031 11039   *
11032 11040   * This is done in order to take care of the problem that we have with holding
11033 11041   * the address space's a_lock for a long period of time (e.g. if the NFS server
11034 11042   * is down).  Callbacks will be executed in the address space code while the
11035 11043   * a_lock is not held.  Holding the address space's a_lock causes things such
11036 11044   * as ps and fork to hang because they are trying to acquire this lock as well.
11037 11045   */
11038 11046  /* ARGSUSED */
11039 11047  static int
11040 11048  nfs4_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
11041 11049      size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr,
11042 11050      caller_context_t *ct)
11043 11051  {
11044 11052          int                     caller_found;
11045 11053          int                     error;
11046 11054          rnode4_t                *rp;
11047 11055          nfs4_delmap_args_t      *dmapp;
11048 11056          nfs4_delmapcall_t       *delmap_call;
11049 11057  
11050 11058          if (vp->v_flag & VNOMAP)
11051 11059                  return (ENOSYS);
11052 11060  
11053 11061          /*
11054 11062           * A process may not change zones if it has NFS pages mmap'ed
11055 11063           * in, so we can't legitimately get here from the wrong zone.
11056 11064           */
11057 11065          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
11058 11066  
11059 11067          rp = VTOR4(vp);
11060 11068  
11061 11069          /*
11062 11070           * The way that the address space of this process deletes its mapping
11063 11071           * of this file is via the following call chains:
11064 11072           * - as_free()->SEGOP_UNMAP()/segvn_unmap()->VOP_DELMAP()/nfs4_delmap()
11065 11073           * - as_unmap()->SEGOP_UNMAP()/segvn_unmap()->VOP_DELMAP()/nfs4_delmap()
11066 11074           *
11067 11075           * With the use of address space callbacks we are allowed to drop the
11068 11076           * address space lock, a_lock, while executing the NFS operations that
11069 11077           * need to go over the wire.  Returning EAGAIN to the caller of this
11070 11078           * function is what drives the execution of the callback that we add
11071 11079           * below.  The callback will be executed by the address space code
11072 11080           * after dropping the a_lock.  When the callback is finished, since
11073 11081           * we dropped the a_lock, it must be re-acquired and segvn_unmap()
11074 11082           * is called again on the same segment to finish the rest of the work
11075 11083           * that needs to happen during unmapping.
11076 11084           *
11077 11085           * This action of calling back into the segment driver causes
11078 11086           * nfs4_delmap() to get called again, but since the callback was
11079 11087           * already executed at this point, it already did the work and there
11080 11088           * is nothing left for us to do.
11081 11089           *
11082 11090           * To Summarize:
11083 11091           * - The first time nfs4_delmap is called by the current thread is when
11084 11092           * we add the caller associated with this delmap to the delmap caller
11085 11093           * list, add the callback, and return EAGAIN.
11086 11094           * - The second time in this call chain when nfs4_delmap is called we
11087 11095           * will find this caller in the delmap caller list and realize there
11088 11096           * is no more work to do thus removing this caller from the list and
11089 11097           * returning the error that was set in the callback execution.
11090 11098           */
11091 11099          caller_found = nfs4_find_and_delete_delmapcall(rp, &error);
11092 11100          if (caller_found) {
11093 11101                  /*
11094 11102                   * 'error' is from the actual delmap operations.  To avoid
11095 11103                   * hangs, we need to handle the return of EAGAIN differently
11096 11104                   * since this is what drives the callback execution.
11097 11105                   * In this case, we don't want to return EAGAIN and do the
11098 11106                   * callback execution because there are none to execute.
11099 11107                   */
11100 11108                  if (error == EAGAIN)
11101 11109                          return (0);
11102 11110                  else
11103 11111                          return (error);
11104 11112          }
11105 11113  
11106 11114          /* current caller was not in the list */
11107 11115          delmap_call = nfs4_init_delmapcall();
11108 11116  
11109 11117          mutex_enter(&rp->r_statelock);
11110 11118          list_insert_tail(&rp->r_indelmap, delmap_call);
11111 11119          mutex_exit(&rp->r_statelock);
11112 11120  
11113 11121          dmapp = kmem_alloc(sizeof (nfs4_delmap_args_t), KM_SLEEP);
11114 11122  
11115 11123          dmapp->vp = vp;
11116 11124          dmapp->off = off;
11117 11125          dmapp->addr = addr;
11118 11126          dmapp->len = len;
11119 11127          dmapp->prot = prot;
11120 11128          dmapp->maxprot = maxprot;
11121 11129          dmapp->flags = flags;
11122 11130          dmapp->cr = cr;
11123 11131          dmapp->caller = delmap_call;
11124 11132  
11125 11133          error = as_add_callback(as, nfs4_delmap_callback, dmapp,
11126 11134              AS_UNMAP_EVENT, addr, len, KM_SLEEP);
11127 11135  
11128 11136          return (error ? error : EAGAIN);
11129 11137  }
11130 11138  
11131 11139  static nfs4_delmapcall_t *
11132 11140  nfs4_init_delmapcall()
11133 11141  {
11134 11142          nfs4_delmapcall_t       *delmap_call;
11135 11143  
11136 11144          delmap_call = kmem_alloc(sizeof (nfs4_delmapcall_t), KM_SLEEP);
11137 11145          delmap_call->call_id = curthread;
11138 11146          delmap_call->error = 0;
11139 11147  
11140 11148          return (delmap_call);
11141 11149  }
11142 11150  
11143 11151  static void
11144 11152  nfs4_free_delmapcall(nfs4_delmapcall_t *delmap_call)
11145 11153  {
11146 11154          kmem_free(delmap_call, sizeof (nfs4_delmapcall_t));
11147 11155  }
11148 11156  
11149 11157  /*
11150 11158   * Searches for the current delmap caller (based on curthread) in the list of
11151 11159   * callers.  If it is found, we remove it and free the delmap caller.
11152 11160   * Returns:
11153 11161   *      0 if the caller wasn't found
11154 11162   *      1 if the caller was found, removed and freed.  *errp will be set
11155 11163   *      to what the result of the delmap was.
11156 11164   */
11157 11165  static int
11158 11166  nfs4_find_and_delete_delmapcall(rnode4_t *rp, int *errp)
11159 11167  {
11160 11168          nfs4_delmapcall_t       *delmap_call;
11161 11169  
11162 11170          /*
11163 11171           * If the list doesn't exist yet, we create it and return
11164 11172           * that the caller wasn't found.  No list = no callers.
11165 11173           */
11166 11174          mutex_enter(&rp->r_statelock);
11167 11175          if (!(rp->r_flags & R4DELMAPLIST)) {
11168 11176                  /* The list does not exist */
11169 11177                  list_create(&rp->r_indelmap, sizeof (nfs4_delmapcall_t),
11170 11178                      offsetof(nfs4_delmapcall_t, call_node));
11171 11179                  rp->r_flags |= R4DELMAPLIST;
11172 11180                  mutex_exit(&rp->r_statelock);
11173 11181                  return (0);
11174 11182          } else {
11175 11183                  /* The list exists so search it */
11176 11184                  for (delmap_call = list_head(&rp->r_indelmap);
11177 11185                      delmap_call != NULL;
11178 11186                      delmap_call = list_next(&rp->r_indelmap, delmap_call)) {
11179 11187                          if (delmap_call->call_id == curthread) {
11180 11188                                  /* current caller is in the list */
11181 11189                                  *errp = delmap_call->error;
11182 11190                                  list_remove(&rp->r_indelmap, delmap_call);
11183 11191                                  mutex_exit(&rp->r_statelock);
11184 11192                                  nfs4_free_delmapcall(delmap_call);
11185 11193                                  return (1);
11186 11194                          }
11187 11195                  }
11188 11196          }
11189 11197          mutex_exit(&rp->r_statelock);
11190 11198          return (0);
11191 11199  }
11192 11200  
11193 11201  /*
11194 11202   * Remove some pages from an mmap'd vnode.  Just update the
11195 11203   * count of pages.  If doing close-to-open, then flush and
11196 11204   * commit all of the pages associated with this file.
11197 11205   * Otherwise, start an asynchronous page flush to write out
11198 11206   * any dirty pages.  This will also associate a credential
11199 11207   * with the rnode which can be used to write the pages.
11200 11208   */
11201 11209  /* ARGSUSED */
11202 11210  static void
11203 11211  nfs4_delmap_callback(struct as *as, void *arg, uint_t event)
11204 11212  {
11205 11213          nfs4_error_t            e = { 0, NFS4_OK, RPC_SUCCESS };
11206 11214          rnode4_t                *rp;
11207 11215          mntinfo4_t              *mi;
11208 11216          nfs4_delmap_args_t      *dmapp = (nfs4_delmap_args_t *)arg;
11209 11217  
11210 11218          rp = VTOR4(dmapp->vp);
11211 11219          mi = VTOMI4(dmapp->vp);
11212 11220  
11213 11221          atomic_add_long((ulong_t *)&rp->r_mapcnt, -btopr(dmapp->len));
11214 11222          ASSERT(rp->r_mapcnt >= 0);
11215 11223  
11216 11224          /*
11217 11225           * Initiate a page flush and potential commit if there are
11218 11226           * pages, the file system was not mounted readonly, the segment
11219 11227           * was mapped shared, and the pages themselves were writeable.
11220 11228           */
11221 11229          if (nfs4_has_pages(dmapp->vp) &&
11222 11230              !(dmapp->vp->v_vfsp->vfs_flag & VFS_RDONLY) &&
11223 11231              dmapp->flags == MAP_SHARED && (dmapp->maxprot & PROT_WRITE)) {
11224 11232                  mutex_enter(&rp->r_statelock);
11225 11233                  rp->r_flags |= R4DIRTY;
11226 11234                  mutex_exit(&rp->r_statelock);
11227 11235                  e.error = nfs4_putpage_commit(dmapp->vp, dmapp->off,
11228 11236                      dmapp->len, dmapp->cr);
11229 11237                  if (!e.error) {
11230 11238                          mutex_enter(&rp->r_statelock);
11231 11239                          e.error = rp->r_error;
11232 11240                          rp->r_error = 0;
11233 11241                          mutex_exit(&rp->r_statelock);
11234 11242                  }
11235 11243          } else
11236 11244                  e.error = 0;
11237 11245  
11238 11246          if ((rp->r_flags & R4DIRECTIO) || (mi->mi_flags & MI4_DIRECTIO))
11239 11247                  (void) nfs4_putpage(dmapp->vp, dmapp->off, dmapp->len,
11240 11248                      B_INVAL, dmapp->cr, NULL);
11241 11249  
11242 11250          if (e.error) {
11243 11251                  e.stat = puterrno4(e.error);
11244 11252                  nfs4_queue_fact(RF_DELMAP_CB_ERR, mi, e.stat, 0,
11245 11253                      OP_COMMIT, FALSE, NULL, 0, dmapp->vp);
11246 11254                  dmapp->caller->error = e.error;
11247 11255          }
11248 11256  
11249 11257          /* Check to see if we need to close the file */
11250 11258  
11251 11259          if (dmapp->vp->v_type == VREG) {
11252 11260                  nfs4close_one(dmapp->vp, NULL, dmapp->cr, 0, NULL, &e,
11253 11261                      CLOSE_DELMAP, dmapp->len, dmapp->maxprot, dmapp->flags);
11254 11262  
11255 11263                  if (e.error != 0 || e.stat != NFS4_OK) {
11256 11264                          /*
11257 11265                           * Since it is possible that e.error == 0 and
11258 11266                           * e.stat != NFS4_OK (and vice versa),
11259 11267                           * we do the proper checking in order to get both
11260 11268                           * e.error and e.stat reporting the correct info.
11261 11269                           */
11262 11270                          if (e.stat == NFS4_OK)
11263 11271                                  e.stat = puterrno4(e.error);
11264 11272                          if (e.error == 0)
11265 11273                                  e.error = geterrno4(e.stat);
11266 11274  
11267 11275                          nfs4_queue_fact(RF_DELMAP_CB_ERR, mi, e.stat, 0,
11268 11276                              OP_CLOSE, FALSE, NULL, 0, dmapp->vp);
11269 11277                          dmapp->caller->error = e.error;
11270 11278                  }
11271 11279          }
11272 11280  
11273 11281          (void) as_delete_callback(as, arg);
11274 11282          kmem_free(dmapp, sizeof (nfs4_delmap_args_t));
11275 11283  }
11276 11284  
11277 11285  
11278 11286  static uint_t
11279 11287  fattr4_maxfilesize_to_bits(uint64_t ll)
11280 11288  {
11281 11289          uint_t l = 1;
11282 11290  
11283 11291          if (ll == 0) {
11284 11292                  return (0);
11285 11293          }
11286 11294  
11287 11295          if (ll & 0xffffffff00000000) {
11288 11296                  l += 32; ll >>= 32;
11289 11297          }
11290 11298          if (ll & 0xffff0000) {
11291 11299                  l += 16; ll >>= 16;
11292 11300          }
11293 11301          if (ll & 0xff00) {
11294 11302                  l += 8; ll >>= 8;
11295 11303          }
11296 11304          if (ll & 0xf0) {
11297 11305                  l += 4; ll >>= 4;
11298 11306          }
11299 11307          if (ll & 0xc) {
11300 11308                  l += 2; ll >>= 2;
11301 11309          }
11302 11310          if (ll & 0x2) {
11303 11311                  l += 1;
11304 11312          }
11305 11313          return (l);
11306 11314  }
11307 11315  
11308 11316  static int
11309 11317  nfs4_have_xattrs(vnode_t *vp, ulong_t *valp, cred_t *cr)
11310 11318  {
11311 11319          vnode_t *avp = NULL;
11312 11320          int error;
11313 11321  
11314 11322          if ((error = nfs4lookup_xattr(vp, "", &avp,
11315 11323              LOOKUP_XATTR, cr)) == 0)
11316 11324                  error = do_xattr_exists_check(avp, valp, cr);
11317 11325          if (avp)
11318 11326                  VN_RELE(avp);
11319 11327  
11320 11328          return (error);
11321 11329  }
11322 11330  
11323 11331  /* ARGSUSED */
11324 11332  int
11325 11333  nfs4_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
11326 11334      caller_context_t *ct)
11327 11335  {
11328 11336          int error;
11329 11337          hrtime_t t;
11330 11338          rnode4_t *rp;
11331 11339          nfs4_ga_res_t gar;
11332 11340          nfs4_ga_ext_res_t ger;
11333 11341  
11334 11342          gar.n4g_ext_res = &ger;
11335 11343  
11336 11344          if (nfs_zone() != VTOMI4(vp)->mi_zone)
11337 11345                  return (EIO);
11338 11346          if (cmd == _PC_PATH_MAX || cmd == _PC_SYMLINK_MAX) {
11339 11347                  *valp = MAXPATHLEN;
11340 11348                  return (0);
11341 11349          }
11342 11350          if (cmd == _PC_ACL_ENABLED) {
11343 11351                  *valp = _ACL_ACE_ENABLED;
11344 11352                  return (0);
11345 11353          }
11346 11354  
11347 11355          rp = VTOR4(vp);
11348 11356          if (cmd == _PC_XATTR_EXISTS) {
11349 11357                  /*
11350 11358                   * The existence of the xattr directory is not sufficient
11351 11359                   * for determining whether generic user attributes exists.
11352 11360                   * The attribute directory could only be a transient directory
11353 11361                   * used for Solaris sysattr support.  Do a small readdir
11354 11362                   * to verify if the only entries are sysattrs or not.
11355 11363                   *
11356 11364                   * pc4_xattr_valid can be only be trusted when r_xattr_dir
11357 11365                   * is NULL.  Once the xadir vp exists, we can create xattrs,
11358 11366                   * and we don't have any way to update the "base" object's
11359 11367                   * pc4_xattr_exists from the xattr or xadir.  Maybe FEM
11360 11368                   * could help out.
11361 11369                   */
11362 11370                  if (ATTRCACHE4_VALID(vp) && rp->r_pathconf.pc4_xattr_valid &&
11363 11371                      rp->r_xattr_dir == NULL) {
11364 11372                          return (nfs4_have_xattrs(vp, valp, cr));
11365 11373                  }
11366 11374          } else {  /* OLD CODE */
11367 11375                  if (ATTRCACHE4_VALID(vp)) {
11368 11376                          mutex_enter(&rp->r_statelock);
11369 11377                          if (rp->r_pathconf.pc4_cache_valid) {
11370 11378                                  error = 0;
11371 11379                                  switch (cmd) {
11372 11380                                  case _PC_FILESIZEBITS:
11373 11381                                          *valp =
11374 11382                                              rp->r_pathconf.pc4_filesizebits;
11375 11383                                          break;
11376 11384                                  case _PC_LINK_MAX:
11377 11385                                          *valp =
11378 11386                                              rp->r_pathconf.pc4_link_max;
11379 11387                                          break;
11380 11388                                  case _PC_NAME_MAX:
11381 11389                                          *valp =
11382 11390                                              rp->r_pathconf.pc4_name_max;
11383 11391                                          break;
11384 11392                                  case _PC_CHOWN_RESTRICTED:
11385 11393                                          *valp =
11386 11394                                              rp->r_pathconf.pc4_chown_restricted;
11387 11395                                          break;
11388 11396                                  case _PC_NO_TRUNC:
11389 11397                                          *valp =
11390 11398                                              rp->r_pathconf.pc4_no_trunc;
11391 11399                                          break;
11392 11400                                  default:
11393 11401                                          error = EINVAL;
11394 11402                                          break;
11395 11403                                  }
11396 11404                                  mutex_exit(&rp->r_statelock);
11397 11405  #ifdef DEBUG
11398 11406                                  nfs4_pathconf_cache_hits++;
11399 11407  #endif
11400 11408                                  return (error);
11401 11409                          }
11402 11410                          mutex_exit(&rp->r_statelock);
11403 11411                  }
11404 11412          }
11405 11413  #ifdef DEBUG
11406 11414          nfs4_pathconf_cache_misses++;
11407 11415  #endif
11408 11416  
11409 11417          t = gethrtime();
11410 11418  
11411 11419          error = nfs4_attr_otw(vp, TAG_PATHCONF, &gar, NFS4_PATHCONF_MASK, cr);
11412 11420  
11413 11421          if (error) {
11414 11422                  mutex_enter(&rp->r_statelock);
11415 11423                  rp->r_pathconf.pc4_cache_valid = FALSE;
11416 11424                  rp->r_pathconf.pc4_xattr_valid = FALSE;
11417 11425                  mutex_exit(&rp->r_statelock);
11418 11426                  return (error);
11419 11427          }
11420 11428  
11421 11429          /* interpret the max filesize */
11422 11430          gar.n4g_ext_res->n4g_pc4.pc4_filesizebits =
11423 11431              fattr4_maxfilesize_to_bits(gar.n4g_ext_res->n4g_maxfilesize);
11424 11432  
11425 11433          /* Store the attributes we just received */
11426 11434          nfs4_attr_cache(vp, &gar, t, cr, TRUE, NULL);
11427 11435  
11428 11436          switch (cmd) {
11429 11437          case _PC_FILESIZEBITS:
11430 11438                  *valp = gar.n4g_ext_res->n4g_pc4.pc4_filesizebits;
11431 11439                  break;
11432 11440          case _PC_LINK_MAX:
11433 11441                  *valp = gar.n4g_ext_res->n4g_pc4.pc4_link_max;
11434 11442                  break;
11435 11443          case _PC_NAME_MAX:
11436 11444                  *valp = gar.n4g_ext_res->n4g_pc4.pc4_name_max;
11437 11445                  break;
11438 11446          case _PC_CHOWN_RESTRICTED:
11439 11447                  *valp = gar.n4g_ext_res->n4g_pc4.pc4_chown_restricted;
11440 11448                  break;
11441 11449          case _PC_NO_TRUNC:
11442 11450                  *valp = gar.n4g_ext_res->n4g_pc4.pc4_no_trunc;
11443 11451                  break;
11444 11452          case _PC_XATTR_EXISTS:
11445 11453                  if (gar.n4g_ext_res->n4g_pc4.pc4_xattr_exists) {
11446 11454                          if (error = nfs4_have_xattrs(vp, valp, cr))
11447 11455                                  return (error);
11448 11456                  }
11449 11457                  break;
11450 11458          default:
11451 11459                  return (EINVAL);
11452 11460          }
11453 11461  
11454 11462          return (0);
11455 11463  }
11456 11464  
11457 11465  /*
11458 11466   * Called by async thread to do synchronous pageio. Do the i/o, wait
11459 11467   * for it to complete, and cleanup the page list when done.
11460 11468   */
11461 11469  static int
11462 11470  nfs4_sync_pageio(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len,
11463 11471      int flags, cred_t *cr)
11464 11472  {
11465 11473          int error;
11466 11474  
11467 11475          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
11468 11476  
11469 11477          error = nfs4_rdwrlbn(vp, pp, io_off, io_len, flags, cr);
11470 11478          if (flags & B_READ)
11471 11479                  pvn_read_done(pp, (error ? B_ERROR : 0) | flags);
11472 11480          else
11473 11481                  pvn_write_done(pp, (error ? B_ERROR : 0) | flags);
11474 11482          return (error);
11475 11483  }
11476 11484  
11477 11485  /* ARGSUSED */
11478 11486  static int
11479 11487  nfs4_pageio(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len,
11480 11488      int flags, cred_t *cr, caller_context_t *ct)
11481 11489  {
11482 11490          int error;
11483 11491          rnode4_t *rp;
11484 11492  
11485 11493          if (!(flags & B_ASYNC) && nfs_zone() != VTOMI4(vp)->mi_zone)
11486 11494                  return (EIO);
11487 11495  
11488 11496          if (pp == NULL)
11489 11497                  return (EINVAL);
11490 11498  
11491 11499          rp = VTOR4(vp);
11492 11500          mutex_enter(&rp->r_statelock);
11493 11501          rp->r_count++;
11494 11502          mutex_exit(&rp->r_statelock);
11495 11503  
11496 11504          if (flags & B_ASYNC) {
11497 11505                  error = nfs4_async_pageio(vp, pp, io_off, io_len, flags, cr,
11498 11506                      nfs4_sync_pageio);
11499 11507          } else
11500 11508                  error = nfs4_rdwrlbn(vp, pp, io_off, io_len, flags, cr);
11501 11509          mutex_enter(&rp->r_statelock);
11502 11510          rp->r_count--;
11503 11511          cv_broadcast(&rp->r_cv);
11504 11512          mutex_exit(&rp->r_statelock);
11505 11513          return (error);
11506 11514  }
11507 11515  
11508 11516  /* ARGSUSED */
11509 11517  static void
11510 11518  nfs4_dispose(vnode_t *vp, page_t *pp, int fl, int dn, cred_t *cr,
11511 11519      caller_context_t *ct)
11512 11520  {
11513 11521          int error;
11514 11522          rnode4_t *rp;
11515 11523          page_t *plist;
11516 11524          page_t *pptr;
11517 11525          offset3 offset;
11518 11526          count3 len;
11519 11527          k_sigset_t smask;
11520 11528  
11521 11529          /*
11522 11530           * We should get called with fl equal to either B_FREE or
11523 11531           * B_INVAL.  Any other value is illegal.
11524 11532           *
11525 11533           * The page that we are either supposed to free or destroy
11526 11534           * should be exclusive locked and its io lock should not
11527 11535           * be held.
11528 11536           */
11529 11537          ASSERT(fl == B_FREE || fl == B_INVAL);
11530 11538          ASSERT((PAGE_EXCL(pp) && !page_iolock_assert(pp)) || panicstr);
11531 11539  
11532 11540          rp = VTOR4(vp);
11533 11541  
11534 11542          /*
11535 11543           * If the page doesn't need to be committed or we shouldn't
11536 11544           * even bother attempting to commit it, then just make sure
11537 11545           * that the p_fsdata byte is clear and then either free or
11538 11546           * destroy the page as appropriate.
11539 11547           */
11540 11548          if (pp->p_fsdata == C_NOCOMMIT || (rp->r_flags & R4STALE)) {
11541 11549                  pp->p_fsdata = C_NOCOMMIT;
11542 11550                  if (fl == B_FREE)
11543 11551                          page_free(pp, dn);
11544 11552                  else
11545 11553                          page_destroy(pp, dn);
11546 11554                  return;
11547 11555          }
11548 11556  
11549 11557          /*
11550 11558           * If there is a page invalidation operation going on, then
11551 11559           * if this is one of the pages being destroyed, then just
11552 11560           * clear the p_fsdata byte and then either free or destroy
11553 11561           * the page as appropriate.
11554 11562           */
11555 11563          mutex_enter(&rp->r_statelock);
11556 11564          if ((rp->r_flags & R4TRUNCATE) && pp->p_offset >= rp->r_truncaddr) {
11557 11565                  mutex_exit(&rp->r_statelock);
11558 11566                  pp->p_fsdata = C_NOCOMMIT;
11559 11567                  if (fl == B_FREE)
11560 11568                          page_free(pp, dn);
11561 11569                  else
11562 11570                          page_destroy(pp, dn);
11563 11571                  return;
11564 11572          }
11565 11573  
11566 11574          /*
11567 11575           * If we are freeing this page and someone else is already
11568 11576           * waiting to do a commit, then just unlock the page and
11569 11577           * return.  That other thread will take care of commiting
11570 11578           * this page.  The page can be freed sometime after the
11571 11579           * commit has finished.  Otherwise, if the page is marked
11572 11580           * as delay commit, then we may be getting called from
11573 11581           * pvn_write_done, one page at a time.   This could result
11574 11582           * in one commit per page, so we end up doing lots of small
11575 11583           * commits instead of fewer larger commits.  This is bad,
11576 11584           * we want do as few commits as possible.
11577 11585           */
11578 11586          if (fl == B_FREE) {
11579 11587                  if (rp->r_flags & R4COMMITWAIT) {
11580 11588                          page_unlock(pp);
11581 11589                          mutex_exit(&rp->r_statelock);
11582 11590                          return;
11583 11591                  }
11584 11592                  if (pp->p_fsdata == C_DELAYCOMMIT) {
11585 11593                          pp->p_fsdata = C_COMMIT;
11586 11594                          page_unlock(pp);
11587 11595                          mutex_exit(&rp->r_statelock);
11588 11596                          return;
11589 11597                  }
11590 11598          }
11591 11599  
11592 11600          /*
11593 11601           * Check to see if there is a signal which would prevent an
11594 11602           * attempt to commit the pages from being successful.  If so,
11595 11603           * then don't bother with all of the work to gather pages and
11596 11604           * generate the unsuccessful RPC.  Just return from here and
11597 11605           * let the page be committed at some later time.
11598 11606           */
11599 11607          sigintr(&smask, VTOMI4(vp)->mi_flags & MI4_INT);
11600 11608          if (ttolwp(curthread) != NULL && ISSIG(curthread, JUSTLOOKING)) {
11601 11609                  sigunintr(&smask);
11602 11610                  page_unlock(pp);
11603 11611                  mutex_exit(&rp->r_statelock);
11604 11612                  return;
11605 11613          }
11606 11614          sigunintr(&smask);
11607 11615  
11608 11616          /*
11609 11617           * We are starting to need to commit pages, so let's try
11610 11618           * to commit as many as possible at once to reduce the
11611 11619           * overhead.
11612 11620           *
11613 11621           * Set the `commit inprogress' state bit.  We must
11614 11622           * first wait until any current one finishes.  Then
11615 11623           * we initialize the c_pages list with this page.
11616 11624           */
11617 11625          while (rp->r_flags & R4COMMIT) {
11618 11626                  rp->r_flags |= R4COMMITWAIT;
11619 11627                  cv_wait(&rp->r_commit.c_cv, &rp->r_statelock);
11620 11628                  rp->r_flags &= ~R4COMMITWAIT;
11621 11629          }
11622 11630          rp->r_flags |= R4COMMIT;
11623 11631          mutex_exit(&rp->r_statelock);
11624 11632          ASSERT(rp->r_commit.c_pages == NULL);
11625 11633          rp->r_commit.c_pages = pp;
11626 11634          rp->r_commit.c_commbase = (offset3)pp->p_offset;
11627 11635          rp->r_commit.c_commlen = PAGESIZE;
11628 11636  
11629 11637          /*
11630 11638           * Gather together all other pages which can be committed.
11631 11639           * They will all be chained off r_commit.c_pages.
11632 11640           */
11633 11641          nfs4_get_commit(vp);
11634 11642  
11635 11643          /*
11636 11644           * Clear the `commit inprogress' status and disconnect
11637 11645           * the list of pages to be committed from the rnode.
11638 11646           * At this same time, we also save the starting offset
11639 11647           * and length of data to be committed on the server.
11640 11648           */
11641 11649          plist = rp->r_commit.c_pages;
11642 11650          rp->r_commit.c_pages = NULL;
11643 11651          offset = rp->r_commit.c_commbase;
11644 11652          len = rp->r_commit.c_commlen;
11645 11653          mutex_enter(&rp->r_statelock);
11646 11654          rp->r_flags &= ~R4COMMIT;
11647 11655          cv_broadcast(&rp->r_commit.c_cv);
11648 11656          mutex_exit(&rp->r_statelock);
11649 11657  
11650 11658          if (curproc == proc_pageout || curproc == proc_fsflush ||
11651 11659              nfs_zone() != VTOMI4(vp)->mi_zone) {
11652 11660                  nfs4_async_commit(vp, plist, offset, len,
11653 11661                      cr, do_nfs4_async_commit);
11654 11662                  return;
11655 11663          }
11656 11664  
11657 11665          /*
11658 11666           * Actually generate the COMMIT op over the wire operation.
11659 11667           */
11660 11668          error = nfs4_commit(vp, (offset4)offset, (count4)len, cr);
11661 11669  
11662 11670          /*
11663 11671           * If we got an error during the commit, just unlock all
11664 11672           * of the pages.  The pages will get retransmitted to the
11665 11673           * server during a putpage operation.
11666 11674           */
11667 11675          if (error) {
11668 11676                  while (plist != NULL) {
11669 11677                          pptr = plist;
11670 11678                          page_sub(&plist, pptr);
11671 11679                          page_unlock(pptr);
11672 11680                  }
11673 11681                  return;
11674 11682          }
11675 11683  
11676 11684          /*
11677 11685           * We've tried as hard as we can to commit the data to stable
11678 11686           * storage on the server.  We just unlock the rest of the pages
11679 11687           * and clear the commit required state.  They will be put
11680 11688           * onto the tail of the cachelist if they are nolonger
11681 11689           * mapped.
11682 11690           */
11683 11691          while (plist != pp) {
11684 11692                  pptr = plist;
11685 11693                  page_sub(&plist, pptr);
11686 11694                  pptr->p_fsdata = C_NOCOMMIT;
11687 11695                  page_unlock(pptr);
11688 11696          }
11689 11697  
11690 11698          /*
11691 11699           * It is possible that nfs4_commit didn't return error but
11692 11700           * some other thread has modified the page we are going
11693 11701           * to free/destroy.
11694 11702           *    In this case we need to rewrite the page. Do an explicit check
11695 11703           * before attempting to free/destroy the page. If modified, needs to
11696 11704           * be rewritten so unlock the page and return.
11697 11705           */
11698 11706          if (hat_ismod(pp)) {
11699 11707                  pp->p_fsdata = C_NOCOMMIT;
11700 11708                  page_unlock(pp);
11701 11709                  return;
11702 11710          }
11703 11711  
11704 11712          /*
11705 11713           * Now, as appropriate, either free or destroy the page
11706 11714           * that we were called with.
11707 11715           */
11708 11716          pp->p_fsdata = C_NOCOMMIT;
11709 11717          if (fl == B_FREE)
11710 11718                  page_free(pp, dn);
11711 11719          else
11712 11720                  page_destroy(pp, dn);
11713 11721  }
11714 11722  
11715 11723  /*
11716 11724   * Commit requires that the current fh be the file written to.
11717 11725   * The compound op structure is:
11718 11726   *      PUTFH(file), COMMIT
11719 11727   */
11720 11728  static int
11721 11729  nfs4_commit(vnode_t *vp, offset4 offset, count4 count, cred_t *cr)
11722 11730  {
11723 11731          COMPOUND4args_clnt args;
11724 11732          COMPOUND4res_clnt res;
11725 11733          COMMIT4res *cm_res;
11726 11734          nfs_argop4 argop[2];
11727 11735          nfs_resop4 *resop;
11728 11736          int doqueue;
11729 11737          mntinfo4_t *mi;
11730 11738          rnode4_t *rp;
11731 11739          cred_t *cred_otw = NULL;
11732 11740          bool_t needrecov = FALSE;
11733 11741          nfs4_recov_state_t recov_state;
11734 11742          nfs4_open_stream_t *osp = NULL;
11735 11743          bool_t first_time = TRUE;       /* first time getting OTW cred */
11736 11744          bool_t last_time = FALSE;       /* last time getting OTW cred */
11737 11745          nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS };
11738 11746  
11739 11747          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
11740 11748  
11741 11749          rp = VTOR4(vp);
11742 11750  
11743 11751          mi = VTOMI4(vp);
11744 11752          recov_state.rs_flags = 0;
11745 11753          recov_state.rs_num_retry_despite_err = 0;
11746 11754  get_commit_cred:
11747 11755          /*
11748 11756           * Releases the osp, if a valid open stream is provided.
11749 11757           * Puts a hold on the cred_otw and the new osp (if found).
11750 11758           */
11751 11759          cred_otw = nfs4_get_otw_cred_by_osp(rp, cr, &osp,
11752 11760              &first_time, &last_time);
11753 11761          args.ctag = TAG_COMMIT;
11754 11762  recov_retry:
11755 11763          /*
11756 11764           * Commit ops: putfh file; commit
11757 11765           */
11758 11766          args.array_len = 2;
11759 11767          args.array = argop;
11760 11768  
11761 11769          e.error = nfs4_start_fop(VTOMI4(vp), vp, NULL, OH_COMMIT,
11762 11770              &recov_state, NULL);
11763 11771          if (e.error) {
11764 11772                  crfree(cred_otw);
11765 11773                  if (osp != NULL)
11766 11774                          open_stream_rele(osp, rp);
11767 11775                  return (e.error);
11768 11776          }
11769 11777  
11770 11778          /* putfh directory */
11771 11779          argop[0].argop = OP_CPUTFH;
11772 11780          argop[0].nfs_argop4_u.opcputfh.sfh = rp->r_fh;
11773 11781  
11774 11782          /* commit */
11775 11783          argop[1].argop = OP_COMMIT;
11776 11784          argop[1].nfs_argop4_u.opcommit.offset = offset;
11777 11785          argop[1].nfs_argop4_u.opcommit.count = count;
11778 11786  
11779 11787          doqueue = 1;
11780 11788          rfs4call(mi, &args, &res, cred_otw, &doqueue, 0, &e);
11781 11789  
11782 11790          needrecov = nfs4_needs_recovery(&e, FALSE, mi->mi_vfsp);
11783 11791          if (!needrecov && e.error) {
11784 11792                  nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_COMMIT, &recov_state,
11785 11793                      needrecov);
11786 11794                  crfree(cred_otw);
11787 11795                  if (e.error == EACCES && last_time == FALSE)
11788 11796                          goto get_commit_cred;
11789 11797                  if (osp != NULL)
11790 11798                          open_stream_rele(osp, rp);
11791 11799                  return (e.error);
11792 11800          }
11793 11801  
11794 11802          if (needrecov) {
11795 11803                  if (nfs4_start_recovery(&e, VTOMI4(vp), vp, NULL, NULL,
11796 11804                      NULL, OP_COMMIT, NULL, NULL, NULL) == FALSE) {
11797 11805                          nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_COMMIT,
11798 11806                              &recov_state, needrecov);
11799 11807                          if (!e.error)
11800 11808                                  (void) xdr_free(xdr_COMPOUND4res_clnt,
11801 11809                                      (caddr_t)&res);
11802 11810                          goto recov_retry;
11803 11811                  }
11804 11812                  if (e.error) {
11805 11813                          nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_COMMIT,
11806 11814                              &recov_state, needrecov);
11807 11815                          crfree(cred_otw);
11808 11816                          if (osp != NULL)
11809 11817                                  open_stream_rele(osp, rp);
11810 11818                          return (e.error);
11811 11819                  }
11812 11820                  /* fall through for res.status case */
11813 11821          }
11814 11822  
11815 11823          if (res.status) {
11816 11824                  e.error = geterrno4(res.status);
11817 11825                  if (e.error == EACCES && last_time == FALSE) {
11818 11826                          crfree(cred_otw);
11819 11827                          nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_COMMIT,
11820 11828                              &recov_state, needrecov);
11821 11829                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
11822 11830                          goto get_commit_cred;
11823 11831                  }
11824 11832                  /*
11825 11833                   * Can't do a nfs4_purge_stale_fh here because this
11826 11834                   * can cause a deadlock.  nfs4_commit can
11827 11835                   * be called from nfs4_dispose which can be called
11828 11836                   * indirectly via pvn_vplist_dirty.  nfs4_purge_stale_fh
11829 11837                   * can call back to pvn_vplist_dirty.
11830 11838                   */
11831 11839                  if (e.error == ESTALE) {
11832 11840                          mutex_enter(&rp->r_statelock);
11833 11841                          rp->r_flags |= R4STALE;
11834 11842                          if (!rp->r_error)
11835 11843                                  rp->r_error = e.error;
11836 11844                          mutex_exit(&rp->r_statelock);
11837 11845                          PURGE_ATTRCACHE4(vp);
11838 11846                  } else {
11839 11847                          mutex_enter(&rp->r_statelock);
11840 11848                          if (!rp->r_error)
11841 11849                                  rp->r_error = e.error;
11842 11850                          mutex_exit(&rp->r_statelock);
11843 11851                  }
11844 11852          } else {
11845 11853                  ASSERT(rp->r_flags & R4HAVEVERF);
11846 11854                  resop = &res.array[1];  /* commit res */
11847 11855                  cm_res = &resop->nfs_resop4_u.opcommit;
11848 11856                  mutex_enter(&rp->r_statelock);
11849 11857                  if (cm_res->writeverf == rp->r_writeverf) {
11850 11858                          mutex_exit(&rp->r_statelock);
11851 11859                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
11852 11860                          nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_COMMIT,
11853 11861                              &recov_state, needrecov);
11854 11862                          crfree(cred_otw);
11855 11863                          if (osp != NULL)
11856 11864                                  open_stream_rele(osp, rp);
11857 11865                          return (0);
11858 11866                  }
11859 11867                  nfs4_set_mod(vp);
11860 11868                  rp->r_writeverf = cm_res->writeverf;
11861 11869                  mutex_exit(&rp->r_statelock);
11862 11870                  e.error = NFS_VERF_MISMATCH;
11863 11871          }
11864 11872  
11865 11873          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
11866 11874          nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_COMMIT, &recov_state, needrecov);
11867 11875          crfree(cred_otw);
11868 11876          if (osp != NULL)
11869 11877                  open_stream_rele(osp, rp);
11870 11878  
11871 11879          return (e.error);
11872 11880  }
11873 11881  
11874 11882  static void
11875 11883  nfs4_set_mod(vnode_t *vp)
11876 11884  {
11877 11885          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
11878 11886  
11879 11887          /* make sure we're looking at the master vnode, not a shadow */
11880 11888          pvn_vplist_setdirty(RTOV4(VTOR4(vp)), nfs_setmod_check);
11881 11889  }
11882 11890  
11883 11891  /*
11884 11892   * This function is used to gather a page list of the pages which
11885 11893   * can be committed on the server.
11886 11894   *
11887 11895   * The calling thread must have set R4COMMIT.  This bit is used to
11888 11896   * serialize access to the commit structure in the rnode.  As long
11889 11897   * as the thread has set R4COMMIT, then it can manipulate the commit
11890 11898   * structure without requiring any other locks.
11891 11899   *
11892 11900   * When this function is called from nfs4_dispose() the page passed
11893 11901   * into nfs4_dispose() will be SE_EXCL locked, and so this function
11894 11902   * will skip it. This is not a problem since we initially add the
11895 11903   * page to the r_commit page list.
11896 11904   *
11897 11905   */
11898 11906  static void
11899 11907  nfs4_get_commit(vnode_t *vp)
11900 11908  {
11901 11909          rnode4_t *rp;
11902 11910          page_t *pp;
11903 11911          kmutex_t *vphm;
11904 11912  
11905 11913          rp = VTOR4(vp);
11906 11914  
11907 11915          ASSERT(rp->r_flags & R4COMMIT);
11908 11916  
11909 11917          /* make sure we're looking at the master vnode, not a shadow */
11910 11918  
11911 11919          if (IS_SHADOW(vp, rp))
11912 11920                  vp = RTOV4(rp);
11913 11921  
11914 11922          vphm = page_vnode_mutex(vp);
11915 11923          mutex_enter(vphm);
11916 11924  
11917 11925          /*
11918 11926           * If there are no pages associated with this vnode, then
11919 11927           * just return.
11920 11928           */
11921 11929          if ((pp = vp->v_pages) == NULL) {
11922 11930                  mutex_exit(vphm);
11923 11931                  return;
11924 11932          }
11925 11933  
11926 11934          /*
11927 11935           * Step through all of the pages associated with this vnode
11928 11936           * looking for pages which need to be committed.
11929 11937           */
11930 11938          do {
11931 11939                  /* Skip marker pages. */
11932 11940                  if (pp->p_hash == PVN_VPLIST_HASH_TAG)
11933 11941                          continue;
11934 11942  
11935 11943                  /*
11936 11944                   * First short-cut everything (without the page_lock)
11937 11945                   * and see if this page does not need to be committed
11938 11946                   * or is modified if so then we'll just skip it.
11939 11947                   */
11940 11948                  if (pp->p_fsdata == C_NOCOMMIT || hat_ismod(pp))
11941 11949                          continue;
11942 11950  
11943 11951                  /*
11944 11952                   * Attempt to lock the page.  If we can't, then
11945 11953                   * someone else is messing with it or we have been
11946 11954                   * called from nfs4_dispose and this is the page that
11947 11955                   * nfs4_dispose was called with.. anyway just skip it.
11948 11956                   */
11949 11957                  if (!page_trylock(pp, SE_EXCL))
11950 11958                          continue;
11951 11959  
11952 11960                  /*
11953 11961                   * Lets check again now that we have the page lock.
11954 11962                   */
11955 11963                  if (pp->p_fsdata == C_NOCOMMIT || hat_ismod(pp)) {
11956 11964                          page_unlock(pp);
11957 11965                          continue;
11958 11966                  }
11959 11967  
11960 11968                  /* this had better not be a free page */
11961 11969                  ASSERT(PP_ISFREE(pp) == 0);
11962 11970  
11963 11971                  /*
11964 11972                   * The page needs to be committed and we locked it.
11965 11973                   * Update the base and length parameters and add it
11966 11974                   * to r_pages.
11967 11975                   */
11968 11976                  if (rp->r_commit.c_pages == NULL) {
11969 11977                          rp->r_commit.c_commbase = (offset3)pp->p_offset;
11970 11978                          rp->r_commit.c_commlen = PAGESIZE;
11971 11979                  } else if (pp->p_offset < rp->r_commit.c_commbase) {
11972 11980                          rp->r_commit.c_commlen = rp->r_commit.c_commbase -
11973 11981                              (offset3)pp->p_offset + rp->r_commit.c_commlen;
11974 11982                          rp->r_commit.c_commbase = (offset3)pp->p_offset;
11975 11983                  } else if ((rp->r_commit.c_commbase + rp->r_commit.c_commlen)
11976 11984                      <= pp->p_offset) {
11977 11985                          rp->r_commit.c_commlen = (offset3)pp->p_offset -
11978 11986                              rp->r_commit.c_commbase + PAGESIZE;
11979 11987                  }
11980 11988                  page_add(&rp->r_commit.c_pages, pp);
11981 11989          } while ((pp = pp->p_vpnext) != vp->v_pages);
11982 11990  
11983 11991          mutex_exit(vphm);
11984 11992  }
11985 11993  
11986 11994  /*
11987 11995   * This routine is used to gather together a page list of the pages
11988 11996   * which are to be committed on the server.  This routine must not
11989 11997   * be called if the calling thread holds any locked pages.
11990 11998   *
11991 11999   * The calling thread must have set R4COMMIT.  This bit is used to
11992 12000   * serialize access to the commit structure in the rnode.  As long
11993 12001   * as the thread has set R4COMMIT, then it can manipulate the commit
11994 12002   * structure without requiring any other locks.
11995 12003   */
11996 12004  static void
11997 12005  nfs4_get_commit_range(vnode_t *vp, u_offset_t soff, size_t len)
11998 12006  {
11999 12007  
12000 12008          rnode4_t *rp;
12001 12009          page_t *pp;
12002 12010          u_offset_t end;
12003 12011          u_offset_t off;
12004 12012          ASSERT(len != 0);
12005 12013          rp = VTOR4(vp);
12006 12014          ASSERT(rp->r_flags & R4COMMIT);
12007 12015  
12008 12016          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
12009 12017  
12010 12018          /* make sure we're looking at the master vnode, not a shadow */
12011 12019  
12012 12020          if (IS_SHADOW(vp, rp))
12013 12021                  vp = RTOV4(rp);
12014 12022  
12015 12023          /*
12016 12024           * If there are no pages associated with this vnode, then
12017 12025           * just return.
12018 12026           */
12019 12027          if ((pp = vp->v_pages) == NULL)
12020 12028                  return;
12021 12029          /*
12022 12030           * Calculate the ending offset.
12023 12031           */
12024 12032          end = soff + len;
12025 12033          for (off = soff; off < end; off += PAGESIZE) {
12026 12034                  /*
12027 12035                   * Lookup each page by vp, offset.
12028 12036                   */
12029 12037                  if ((pp = page_lookup_nowait(vp, off, SE_EXCL)) == NULL)
12030 12038                          continue;
12031 12039                  /*
12032 12040                   * If this page does not need to be committed or is
12033 12041                   * modified, then just skip it.
12034 12042                   */
12035 12043                  if (pp->p_fsdata == C_NOCOMMIT || hat_ismod(pp)) {
12036 12044                          page_unlock(pp);
12037 12045                          continue;
12038 12046                  }
12039 12047  
12040 12048                  ASSERT(PP_ISFREE(pp) == 0);
12041 12049                  /*
12042 12050                   * The page needs to be committed and we locked it.
12043 12051                   * Update the base and length parameters and add it
12044 12052                   * to r_pages.
12045 12053                   */
12046 12054                  if (rp->r_commit.c_pages == NULL) {
12047 12055                          rp->r_commit.c_commbase = (offset3)pp->p_offset;
12048 12056                          rp->r_commit.c_commlen = PAGESIZE;
12049 12057                  } else {
12050 12058                          rp->r_commit.c_commlen = (offset3)pp->p_offset -
12051 12059                              rp->r_commit.c_commbase + PAGESIZE;
12052 12060                  }
12053 12061                  page_add(&rp->r_commit.c_pages, pp);
12054 12062          }
12055 12063  }
12056 12064  
12057 12065  /*
12058 12066   * Called from nfs4_close(), nfs4_fsync() and nfs4_delmap().
12059 12067   * Flushes and commits data to the server.
12060 12068   */
12061 12069  static int
12062 12070  nfs4_putpage_commit(vnode_t *vp, offset_t poff, size_t plen, cred_t *cr)
12063 12071  {
12064 12072          int error;
12065 12073          verifier4 write_verf;
12066 12074          rnode4_t *rp = VTOR4(vp);
12067 12075  
12068 12076          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
12069 12077  
12070 12078          /*
12071 12079           * Flush the data portion of the file and then commit any
12072 12080           * portions which need to be committed.  This may need to
12073 12081           * be done twice if the server has changed state since
12074 12082           * data was last written.  The data will need to be
12075 12083           * rewritten to the server and then a new commit done.
12076 12084           *
12077 12085           * In fact, this may need to be done several times if the
12078 12086           * server is having problems and crashing while we are
12079 12087           * attempting to do this.
12080 12088           */
12081 12089  
12082 12090  top:
12083 12091          /*
12084 12092           * Do a flush based on the poff and plen arguments.  This
12085 12093           * will synchronously write out any modified pages in the
12086 12094           * range specified by (poff, plen). This starts all of the
12087 12095           * i/o operations which will be waited for in the next
12088 12096           * call to nfs4_putpage
12089 12097           */
12090 12098  
12091 12099          mutex_enter(&rp->r_statelock);
12092 12100          write_verf = rp->r_writeverf;
12093 12101          mutex_exit(&rp->r_statelock);
12094 12102  
12095 12103          error = nfs4_putpage(vp, poff, plen, B_ASYNC, cr, NULL);
12096 12104          if (error == EAGAIN)
12097 12105                  error = 0;
12098 12106  
12099 12107          /*
12100 12108           * Do a flush based on the poff and plen arguments.  This
12101 12109           * will synchronously write out any modified pages in the
12102 12110           * range specified by (poff, plen) and wait until all of
12103 12111           * the asynchronous i/o's in that range are done as well.
12104 12112           */
12105 12113          if (!error)
12106 12114                  error = nfs4_putpage(vp, poff, plen, 0, cr, NULL);
12107 12115  
12108 12116          if (error)
12109 12117                  return (error);
12110 12118  
12111 12119          mutex_enter(&rp->r_statelock);
12112 12120          if (rp->r_writeverf != write_verf) {
12113 12121                  mutex_exit(&rp->r_statelock);
12114 12122                  goto top;
12115 12123          }
12116 12124          mutex_exit(&rp->r_statelock);
12117 12125  
12118 12126          /*
12119 12127           * Now commit any pages which might need to be committed.
12120 12128           * If the error, NFS_VERF_MISMATCH, is returned, then
12121 12129           * start over with the flush operation.
12122 12130           */
12123 12131          error = nfs4_commit_vp(vp, poff, plen, cr, NFS4_WRITE_WAIT);
12124 12132  
12125 12133          if (error == NFS_VERF_MISMATCH)
12126 12134                  goto top;
12127 12135  
12128 12136          return (error);
12129 12137  }
12130 12138  
12131 12139  /*
12132 12140   * nfs4_commit_vp()  will wait for other pending commits and
12133 12141   * will either commit the whole file or a range, plen dictates
12134 12142   * if we commit whole file. a value of zero indicates the whole
12135 12143   * file. Called from nfs4_putpage_commit() or nfs4_sync_putapage()
12136 12144   */
12137 12145  static int
12138 12146  nfs4_commit_vp(vnode_t *vp, u_offset_t poff, size_t plen,
12139 12147      cred_t *cr, int wait_on_writes)
12140 12148  {
12141 12149          rnode4_t *rp;
12142 12150          page_t *plist;
12143 12151          offset3 offset;
12144 12152          count3 len;
12145 12153  
12146 12154          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
12147 12155  
12148 12156          rp = VTOR4(vp);
12149 12157  
12150 12158          /*
12151 12159           *  before we gather commitable pages make
12152 12160           *  sure there are no outstanding async writes
12153 12161           */
12154 12162          if (rp->r_count && wait_on_writes == NFS4_WRITE_WAIT) {
12155 12163                  mutex_enter(&rp->r_statelock);
12156 12164                  while (rp->r_count > 0) {
12157 12165                          cv_wait(&rp->r_cv, &rp->r_statelock);
12158 12166                  }
12159 12167                  mutex_exit(&rp->r_statelock);
12160 12168          }
12161 12169  
12162 12170          /*
12163 12171           * Set the `commit inprogress' state bit.  We must
12164 12172           * first wait until any current one finishes.
12165 12173           */
12166 12174          mutex_enter(&rp->r_statelock);
12167 12175          while (rp->r_flags & R4COMMIT) {
12168 12176                  rp->r_flags |= R4COMMITWAIT;
12169 12177                  cv_wait(&rp->r_commit.c_cv, &rp->r_statelock);
12170 12178                  rp->r_flags &= ~R4COMMITWAIT;
12171 12179          }
12172 12180          rp->r_flags |= R4COMMIT;
12173 12181          mutex_exit(&rp->r_statelock);
12174 12182  
12175 12183          /*
12176 12184           * Gather all of the pages which need to be
12177 12185           * committed.
12178 12186           */
12179 12187          if (plen == 0)
12180 12188                  nfs4_get_commit(vp);
12181 12189          else
12182 12190                  nfs4_get_commit_range(vp, poff, plen);
12183 12191  
12184 12192          /*
12185 12193           * Clear the `commit inprogress' bit and disconnect the
12186 12194           * page list which was gathered by nfs4_get_commit.
12187 12195           */
12188 12196          plist = rp->r_commit.c_pages;
12189 12197          rp->r_commit.c_pages = NULL;
12190 12198          offset = rp->r_commit.c_commbase;
12191 12199          len = rp->r_commit.c_commlen;
12192 12200          mutex_enter(&rp->r_statelock);
12193 12201          rp->r_flags &= ~R4COMMIT;
12194 12202          cv_broadcast(&rp->r_commit.c_cv);
12195 12203          mutex_exit(&rp->r_statelock);
12196 12204  
12197 12205          /*
12198 12206           * If any pages need to be committed, commit them and
12199 12207           * then unlock them so that they can be freed some
12200 12208           * time later.
12201 12209           */
12202 12210          if (plist == NULL)
12203 12211                  return (0);
12204 12212  
12205 12213          /*
12206 12214           * No error occurred during the flush portion
12207 12215           * of this operation, so now attempt to commit
12208 12216           * the data to stable storage on the server.
12209 12217           *
12210 12218           * This will unlock all of the pages on the list.
12211 12219           */
12212 12220          return (nfs4_sync_commit(vp, plist, offset, len, cr));
12213 12221  }
12214 12222  
12215 12223  static int
12216 12224  nfs4_sync_commit(vnode_t *vp, page_t *plist, offset3 offset, count3 count,
12217 12225      cred_t *cr)
12218 12226  {
12219 12227          int error;
12220 12228          page_t *pp;
12221 12229  
12222 12230          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
12223 12231  
12224 12232          error = nfs4_commit(vp, (offset4)offset, (count3)count, cr);
12225 12233  
12226 12234          /*
12227 12235           * If we got an error, then just unlock all of the pages
12228 12236           * on the list.
12229 12237           */
12230 12238          if (error) {
12231 12239                  while (plist != NULL) {
12232 12240                          pp = plist;
12233 12241                          page_sub(&plist, pp);
12234 12242                          page_unlock(pp);
12235 12243                  }
12236 12244                  return (error);
12237 12245          }
12238 12246          /*
12239 12247           * We've tried as hard as we can to commit the data to stable
12240 12248           * storage on the server.  We just unlock the pages and clear
12241 12249           * the commit required state.  They will get freed later.
12242 12250           */
12243 12251          while (plist != NULL) {
12244 12252                  pp = plist;
12245 12253                  page_sub(&plist, pp);
12246 12254                  pp->p_fsdata = C_NOCOMMIT;
12247 12255                  page_unlock(pp);
12248 12256          }
12249 12257  
12250 12258          return (error);
12251 12259  }
12252 12260  
12253 12261  static void
12254 12262  do_nfs4_async_commit(vnode_t *vp, page_t *plist, offset3 offset, count3 count,
12255 12263      cred_t *cr)
12256 12264  {
12257 12265  
12258 12266          (void) nfs4_sync_commit(vp, plist, offset, count, cr);
12259 12267  }
12260 12268  
12261 12269  /*ARGSUSED*/
12262 12270  static int
12263 12271  nfs4_setsecattr(vnode_t *vp, vsecattr_t *vsecattr, int flag, cred_t *cr,
12264 12272      caller_context_t *ct)
12265 12273  {
12266 12274          int             error = 0;
12267 12275          mntinfo4_t      *mi;
12268 12276          vattr_t         va;
12269 12277          vsecattr_t      nfsace4_vsap;
12270 12278  
12271 12279          mi = VTOMI4(vp);
12272 12280          if (nfs_zone() != mi->mi_zone)
12273 12281                  return (EIO);
12274 12282          if (mi->mi_flags & MI4_ACL) {
12275 12283                  /* if we have a delegation, return it */
12276 12284                  if (VTOR4(vp)->r_deleg_type != OPEN_DELEGATE_NONE)
12277 12285                          (void) nfs4delegreturn(VTOR4(vp),
12278 12286                              NFS4_DR_REOPEN|NFS4_DR_PUSH);
12279 12287  
12280 12288                  error = nfs4_is_acl_mask_valid(vsecattr->vsa_mask,
12281 12289                      NFS4_ACL_SET);
12282 12290                  if (error) /* EINVAL */
12283 12291                          return (error);
12284 12292  
12285 12293                  if (vsecattr->vsa_mask & (VSA_ACL | VSA_DFACL)) {
12286 12294                          /*
12287 12295                           * These are aclent_t type entries.
12288 12296                           */
12289 12297                          error = vs_aent_to_ace4(vsecattr, &nfsace4_vsap,
12290 12298                              vp->v_type == VDIR, FALSE);
12291 12299                          if (error)
12292 12300                                  return (error);
12293 12301                  } else {
12294 12302                          /*
12295 12303                           * These are ace_t type entries.
12296 12304                           */
12297 12305                          error = vs_acet_to_ace4(vsecattr, &nfsace4_vsap,
12298 12306                              FALSE);
12299 12307                          if (error)
12300 12308                                  return (error);
12301 12309                  }
12302 12310                  bzero(&va, sizeof (va));
12303 12311                  error = nfs4setattr(vp, &va, flag, cr, &nfsace4_vsap);
12304 12312                  vs_ace4_destroy(&nfsace4_vsap);
12305 12313                  return (error);
12306 12314          }
12307 12315          return (ENOSYS);
12308 12316  }
12309 12317  
12310 12318  /* ARGSUSED */
12311 12319  int
12312 12320  nfs4_getsecattr(vnode_t *vp, vsecattr_t *vsecattr, int flag, cred_t *cr,
12313 12321      caller_context_t *ct)
12314 12322  {
12315 12323          int             error;
12316 12324          mntinfo4_t      *mi;
12317 12325          nfs4_ga_res_t   gar;
12318 12326          rnode4_t        *rp = VTOR4(vp);
12319 12327  
12320 12328          mi = VTOMI4(vp);
12321 12329          if (nfs_zone() != mi->mi_zone)
12322 12330                  return (EIO);
12323 12331  
12324 12332          bzero(&gar, sizeof (gar));
12325 12333          gar.n4g_vsa.vsa_mask = vsecattr->vsa_mask;
12326 12334  
12327 12335          /*
12328 12336           * vsecattr->vsa_mask holds the original acl request mask.
12329 12337           * This is needed when determining what to return.
12330 12338           * (See: nfs4_create_getsecattr_return())
12331 12339           */
12332 12340          error = nfs4_is_acl_mask_valid(vsecattr->vsa_mask, NFS4_ACL_GET);
12333 12341          if (error) /* EINVAL */
12334 12342                  return (error);
12335 12343  
12336 12344          /*
12337 12345           * If this is a referral stub, don't try to go OTW for an ACL
12338 12346           */
12339 12347          if (RP_ISSTUB_REFERRAL(VTOR4(vp)))
12340 12348                  return (fs_fab_acl(vp, vsecattr, flag, cr, ct));
12341 12349  
12342 12350          if (mi->mi_flags & MI4_ACL) {
12343 12351                  /*
12344 12352                   * Check if the data is cached and the cache is valid.  If it
12345 12353                   * is we don't go over the wire.
12346 12354                   */
12347 12355                  if (rp->r_secattr != NULL && ATTRCACHE4_VALID(vp)) {
12348 12356                          mutex_enter(&rp->r_statelock);
12349 12357                          if (rp->r_secattr != NULL) {
12350 12358                                  error = nfs4_create_getsecattr_return(
12351 12359                                      rp->r_secattr, vsecattr, rp->r_attr.va_uid,
12352 12360                                      rp->r_attr.va_gid,
12353 12361                                      vp->v_type == VDIR);
12354 12362                                  if (!error) { /* error == 0 - Success! */
12355 12363                                          mutex_exit(&rp->r_statelock);
12356 12364                                          return (error);
12357 12365                                  }
12358 12366                          }
12359 12367                          mutex_exit(&rp->r_statelock);
12360 12368                  }
12361 12369  
12362 12370                  /*
12363 12371                   * The getattr otw call will always get both the acl, in
12364 12372                   * the form of a list of nfsace4's, and the number of acl
12365 12373                   * entries; independent of the value of gar.n4g_vsa.vsa_mask.
12366 12374                   */
12367 12375                  gar.n4g_va.va_mask = AT_ALL;
12368 12376                  error =  nfs4_getattr_otw(vp, &gar, cr, 1);
12369 12377                  if (error) {
12370 12378                          vs_ace4_destroy(&gar.n4g_vsa);
12371 12379                          if (error == ENOTSUP || error == EOPNOTSUPP)
12372 12380                                  error = fs_fab_acl(vp, vsecattr, flag, cr, ct);
12373 12381                          return (error);
12374 12382                  }
12375 12383  
12376 12384                  if (!(gar.n4g_resbmap & FATTR4_ACL_MASK)) {
12377 12385                          /*
12378 12386                           * No error was returned, but according to the response
12379 12387                           * bitmap, neither was an acl.
12380 12388                           */
12381 12389                          vs_ace4_destroy(&gar.n4g_vsa);
12382 12390                          error = fs_fab_acl(vp, vsecattr, flag, cr, ct);
12383 12391                          return (error);
12384 12392                  }
12385 12393  
12386 12394                  /*
12387 12395                   * Update the cache with the ACL.
12388 12396                   */
12389 12397                  nfs4_acl_fill_cache(rp, &gar.n4g_vsa);
12390 12398  
12391 12399                  error = nfs4_create_getsecattr_return(&gar.n4g_vsa,
12392 12400                      vsecattr, gar.n4g_va.va_uid, gar.n4g_va.va_gid,
12393 12401                      vp->v_type == VDIR);
12394 12402                  vs_ace4_destroy(&gar.n4g_vsa);
12395 12403                  if ((error) && (vsecattr->vsa_mask &
12396 12404                      (VSA_ACL | VSA_ACLCNT | VSA_DFACL | VSA_DFACLCNT)) &&
12397 12405                      (error != EACCES)) {
12398 12406                          error = fs_fab_acl(vp, vsecattr, flag, cr, ct);
12399 12407                  }
12400 12408                  return (error);
12401 12409          }
12402 12410          error = fs_fab_acl(vp, vsecattr, flag, cr, ct);
12403 12411          return (error);
12404 12412  }
12405 12413  
12406 12414  /*
12407 12415   * The function returns:
12408 12416   *      - 0 (zero) if the passed in "acl_mask" is a valid request.
12409 12417   *      - EINVAL if the passed in "acl_mask" is an invalid request.
12410 12418   *
12411 12419   * In the case of getting an acl (op == NFS4_ACL_GET) the mask is invalid if:
12412 12420   * - We have a mixture of ACE and ACL requests (e.g. VSA_ACL | VSA_ACE)
12413 12421   *
12414 12422   * In the case of setting an acl (op == NFS4_ACL_SET) the mask is invalid if:
12415 12423   * - We have a mixture of ACE and ACL requests (e.g. VSA_ACL | VSA_ACE)
12416 12424   * - We have a count field set without the corresponding acl field set. (e.g. -
12417 12425   * VSA_ACECNT is set, but VSA_ACE is not)
12418 12426   */
12419 12427  static int
12420 12428  nfs4_is_acl_mask_valid(uint_t acl_mask, nfs4_acl_op_t op)
12421 12429  {
12422 12430          /* Shortcut the masks that are always valid. */
12423 12431          if (acl_mask == (VSA_ACE | VSA_ACECNT))
12424 12432                  return (0);
12425 12433          if (acl_mask == (VSA_ACL | VSA_ACLCNT | VSA_DFACL | VSA_DFACLCNT))
12426 12434                  return (0);
12427 12435  
12428 12436          if (acl_mask & (VSA_ACE | VSA_ACECNT)) {
12429 12437                  /*
12430 12438                   * We can't have any VSA_ACL type stuff in the mask now.
12431 12439                   */
12432 12440                  if (acl_mask & (VSA_ACL | VSA_ACLCNT | VSA_DFACL |
12433 12441                      VSA_DFACLCNT))
12434 12442                          return (EINVAL);
12435 12443  
12436 12444                  if (op == NFS4_ACL_SET) {
12437 12445                          if ((acl_mask & VSA_ACECNT) && !(acl_mask & VSA_ACE))
12438 12446                                  return (EINVAL);
12439 12447                  }
12440 12448          }
12441 12449  
12442 12450          if (acl_mask & (VSA_ACL | VSA_ACLCNT | VSA_DFACL | VSA_DFACLCNT)) {
12443 12451                  /*
12444 12452                   * We can't have any VSA_ACE type stuff in the mask now.
12445 12453                   */
12446 12454                  if (acl_mask & (VSA_ACE | VSA_ACECNT))
12447 12455                          return (EINVAL);
12448 12456  
12449 12457                  if (op == NFS4_ACL_SET) {
12450 12458                          if ((acl_mask & VSA_ACLCNT) && !(acl_mask & VSA_ACL))
12451 12459                                  return (EINVAL);
12452 12460  
12453 12461                          if ((acl_mask & VSA_DFACLCNT) &&
12454 12462                              !(acl_mask & VSA_DFACL))
12455 12463                                  return (EINVAL);
12456 12464                  }
12457 12465          }
12458 12466          return (0);
12459 12467  }
12460 12468  
12461 12469  /*
12462 12470   * The theory behind creating the correct getsecattr return is simply this:
12463 12471   * "Don't return anything that the caller is not expecting to have to free."
12464 12472   */
12465 12473  static int
12466 12474  nfs4_create_getsecattr_return(vsecattr_t *filled_vsap, vsecattr_t *vsap,
12467 12475      uid_t uid, gid_t gid, int isdir)
12468 12476  {
12469 12477          int error = 0;
12470 12478          /* Save the mask since the translators modify it. */
12471 12479          uint_t  orig_mask = vsap->vsa_mask;
12472 12480  
12473 12481          if (orig_mask & (VSA_ACE | VSA_ACECNT)) {
12474 12482                  error = vs_ace4_to_acet(filled_vsap, vsap, uid, gid, FALSE);
12475 12483  
12476 12484                  if (error)
12477 12485                          return (error);
12478 12486  
12479 12487                  /*
12480 12488                   * If the caller only asked for the ace count (VSA_ACECNT)
12481 12489                   * don't give them the full acl (VSA_ACE), free it.
12482 12490                   */
12483 12491                  if (!orig_mask & VSA_ACE) {
12484 12492                          if (vsap->vsa_aclentp != NULL) {
12485 12493                                  kmem_free(vsap->vsa_aclentp,
12486 12494                                      vsap->vsa_aclcnt * sizeof (ace_t));
12487 12495                                  vsap->vsa_aclentp = NULL;
12488 12496                          }
12489 12497                  }
12490 12498                  vsap->vsa_mask = orig_mask;
12491 12499  
12492 12500          } else if (orig_mask & (VSA_ACL | VSA_ACLCNT | VSA_DFACL |
12493 12501              VSA_DFACLCNT)) {
12494 12502                  error = vs_ace4_to_aent(filled_vsap, vsap, uid, gid,
12495 12503                      isdir, FALSE);
12496 12504  
12497 12505                  if (error)
12498 12506                          return (error);
12499 12507  
12500 12508                  /*
12501 12509                   * If the caller only asked for the acl count (VSA_ACLCNT)
12502 12510                   * and/or the default acl count (VSA_DFACLCNT) don't give them
12503 12511                   * the acl (VSA_ACL) or default acl (VSA_DFACL), free it.
12504 12512                   */
12505 12513                  if (!orig_mask & VSA_ACL) {
12506 12514                          if (vsap->vsa_aclentp != NULL) {
12507 12515                                  kmem_free(vsap->vsa_aclentp,
12508 12516                                      vsap->vsa_aclcnt * sizeof (aclent_t));
12509 12517                                  vsap->vsa_aclentp = NULL;
12510 12518                          }
12511 12519                  }
12512 12520  
12513 12521                  if (!orig_mask & VSA_DFACL) {
12514 12522                          if (vsap->vsa_dfaclentp != NULL) {
12515 12523                                  kmem_free(vsap->vsa_dfaclentp,
12516 12524                                      vsap->vsa_dfaclcnt * sizeof (aclent_t));
12517 12525                                  vsap->vsa_dfaclentp = NULL;
12518 12526                          }
12519 12527                  }
12520 12528                  vsap->vsa_mask = orig_mask;
12521 12529          }
12522 12530          return (0);
12523 12531  }
12524 12532  
12525 12533  /* ARGSUSED */
12526 12534  int
12527 12535  nfs4_shrlock(vnode_t *vp, int cmd, struct shrlock *shr, int flag, cred_t *cr,
12528 12536      caller_context_t *ct)
12529 12537  {
12530 12538          int error;
12531 12539  
12532 12540          if (nfs_zone() != VTOMI4(vp)->mi_zone)
12533 12541                  return (EIO);
12534 12542          /*
12535 12543           * check for valid cmd parameter
12536 12544           */
12537 12545          if (cmd != F_SHARE && cmd != F_UNSHARE && cmd != F_HASREMOTELOCKS)
12538 12546                  return (EINVAL);
12539 12547  
12540 12548          /*
12541 12549           * Check access permissions
12542 12550           */
12543 12551          if ((cmd & F_SHARE) &&
12544 12552              (((shr->s_access & F_RDACC) && (flag & FREAD) == 0) ||
12545 12553              (shr->s_access == F_WRACC && (flag & FWRITE) == 0)))
12546 12554                  return (EBADF);
12547 12555  
12548 12556          /*
12549 12557           * If the filesystem is mounted using local locking, pass the
12550 12558           * request off to the local share code.
12551 12559           */
12552 12560          if (VTOMI4(vp)->mi_flags & MI4_LLOCK)
12553 12561                  return (fs_shrlock(vp, cmd, shr, flag, cr, ct));
12554 12562  
12555 12563          switch (cmd) {
12556 12564          case F_SHARE:
12557 12565          case F_UNSHARE:
12558 12566                  /*
12559 12567                   * This will be properly implemented later,
12560 12568                   * see RFE: 4823948 .
12561 12569                   */
12562 12570                  error = EAGAIN;
12563 12571                  break;
12564 12572  
12565 12573          case F_HASREMOTELOCKS:
12566 12574                  /*
12567 12575                   * NFS client can't store remote locks itself
12568 12576                   */
12569 12577                  shr->s_access = 0;
12570 12578                  error = 0;
12571 12579                  break;
12572 12580  
12573 12581          default:
12574 12582                  error = EINVAL;
12575 12583                  break;
12576 12584          }
12577 12585  
12578 12586          return (error);
12579 12587  }
12580 12588  
12581 12589  /*
12582 12590   * Common code called by directory ops to update the attrcache
12583 12591   */
12584 12592  static int
12585 12593  nfs4_update_attrcache(nfsstat4 status, nfs4_ga_res_t *garp,
12586 12594      hrtime_t t, vnode_t *vp, cred_t *cr)
12587 12595  {
12588 12596          int error = 0;
12589 12597  
12590 12598          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
12591 12599  
12592 12600          if (status != NFS4_OK) {
12593 12601                  /* getattr not done or failed */
12594 12602                  PURGE_ATTRCACHE4(vp);
12595 12603                  return (error);
12596 12604          }
12597 12605  
12598 12606          if (garp) {
12599 12607                  nfs4_attr_cache(vp, garp, t, cr, FALSE, NULL);
12600 12608          } else {
12601 12609                  PURGE_ATTRCACHE4(vp);
12602 12610          }
12603 12611          return (error);
12604 12612  }
12605 12613  
12606 12614  /*
12607 12615   * Update directory caches for directory modification ops (link, rename, etc.)
12608 12616   * When dinfo is NULL, manage dircaches in the old way.
12609 12617   */
12610 12618  static void
12611 12619  nfs4_update_dircaches(change_info4 *cinfo, vnode_t *dvp, vnode_t *vp, char *nm,
12612 12620      dirattr_info_t *dinfo)
12613 12621  {
12614 12622          rnode4_t        *drp = VTOR4(dvp);
12615 12623  
12616 12624          ASSERT(nfs_zone() == VTOMI4(dvp)->mi_zone);
12617 12625  
12618 12626          /* Purge rddir cache for dir since it changed */
12619 12627          if (drp->r_dir != NULL)
12620 12628                  nfs4_purge_rddir_cache(dvp);
12621 12629  
12622 12630          /*
12623 12631           * If caller provided dinfo, then use it to manage dir caches.
12624 12632           */
12625 12633          if (dinfo != NULL) {
12626 12634                  if (vp != NULL) {
12627 12635                          mutex_enter(&VTOR4(vp)->r_statev4_lock);
12628 12636                          if (!VTOR4(vp)->created_v4) {
12629 12637                                  mutex_exit(&VTOR4(vp)->r_statev4_lock);
12630 12638                                  dnlc_update(dvp, nm, vp);
12631 12639                          } else {
12632 12640                                  /*
12633 12641                                   * XXX don't update if the created_v4 flag is
12634 12642                                   * set
12635 12643                                   */
12636 12644                                  mutex_exit(&VTOR4(vp)->r_statev4_lock);
12637 12645                                  NFS4_DEBUG(nfs4_client_state_debug,
12638 12646                                      (CE_NOTE, "nfs4_update_dircaches: "
12639 12647                                      "don't update dnlc: created_v4 flag"));
12640 12648                          }
12641 12649                  }
12642 12650  
12643 12651                  nfs4_attr_cache(dvp, dinfo->di_garp, dinfo->di_time_call,
12644 12652                      dinfo->di_cred, FALSE, cinfo);
12645 12653  
12646 12654                  return;
12647 12655          }
12648 12656  
12649 12657          /*
12650 12658           * Caller didn't provide dinfo, then check change_info4 to update DNLC.
12651 12659           * Since caller modified dir but didn't receive post-dirmod-op dir
12652 12660           * attrs, the dir's attrs must be purged.
12653 12661           *
12654 12662           * XXX this check and dnlc update/purge should really be atomic,
12655 12663           * XXX but can't use rnode statelock because it'll deadlock in
12656 12664           * XXX dnlc_purge_vp, however, the risk is minimal even if a race
12657 12665           * XXX does occur.
12658 12666           *
12659 12667           * XXX We also may want to check that atomic is true in the
12660 12668           * XXX change_info struct. If it is not, the change_info may
12661 12669           * XXX reflect changes by more than one clients which means that
12662 12670           * XXX our cache may not be valid.
12663 12671           */
12664 12672          PURGE_ATTRCACHE4(dvp);
12665 12673          if (drp->r_change == cinfo->before) {
12666 12674                  /* no changes took place in the directory prior to our link */
12667 12675                  if (vp != NULL) {
12668 12676                          mutex_enter(&VTOR4(vp)->r_statev4_lock);
12669 12677                          if (!VTOR4(vp)->created_v4) {
12670 12678                                  mutex_exit(&VTOR4(vp)->r_statev4_lock);
12671 12679                                  dnlc_update(dvp, nm, vp);
12672 12680                          } else {
12673 12681                                  /*
12674 12682                                   * XXX dont' update if the created_v4 flag
12675 12683                                   * is set
12676 12684                                   */
12677 12685                                  mutex_exit(&VTOR4(vp)->r_statev4_lock);
12678 12686                                  NFS4_DEBUG(nfs4_client_state_debug, (CE_NOTE,
12679 12687                                      "nfs4_update_dircaches: don't"
12680 12688                                      " update dnlc: created_v4 flag"));
12681 12689                          }
12682 12690                  }
12683 12691          } else {
12684 12692                  /* Another client modified directory - purge its dnlc cache */
12685 12693                  dnlc_purge_vp(dvp);
12686 12694          }
12687 12695  }
12688 12696  
12689 12697  /*
12690 12698   * The OPEN_CONFIRM operation confirms the sequence number used in OPENing a
12691 12699   * file.
12692 12700   *
12693 12701   * The 'reopening_file' boolean should be set to TRUE if we are reopening this
12694 12702   * file (ie: client recovery) and otherwise set to FALSE.
12695 12703   *
12696 12704   * 'nfs4_start/end_op' should have been called by the proper (ie: not recovery
12697 12705   * initiated) calling functions.
12698 12706   *
12699 12707   * 'resend' is set to TRUE if this is a OPEN_CONFIRM issued as a result
12700 12708   * of resending a 'lost' open request.
12701 12709   *
12702 12710   * 'num_bseqid_retryp' makes sure we don't loop forever on a broken
12703 12711   * server that hands out BAD_SEQID on open confirm.
12704 12712   *
12705 12713   * Errors are returned via the nfs4_error_t parameter.
12706 12714   */
12707 12715  void
12708 12716  nfs4open_confirm(vnode_t *vp, seqid4 *seqid, stateid4 *stateid, cred_t *cr,
12709 12717      bool_t reopening_file, bool_t *retry_open, nfs4_open_owner_t *oop,
12710 12718      bool_t resend, nfs4_error_t *ep, int *num_bseqid_retryp)
12711 12719  {
12712 12720          COMPOUND4args_clnt args;
12713 12721          COMPOUND4res_clnt res;
12714 12722          nfs_argop4 argop[2];
12715 12723          nfs_resop4 *resop;
12716 12724          int doqueue = 1;
12717 12725          mntinfo4_t *mi;
12718 12726          OPEN_CONFIRM4args *open_confirm_args;
12719 12727          int needrecov;
12720 12728  
12721 12729          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
12722 12730  #if DEBUG
12723 12731          mutex_enter(&oop->oo_lock);
12724 12732          ASSERT(oop->oo_seqid_inuse);
12725 12733          mutex_exit(&oop->oo_lock);
12726 12734  #endif
12727 12735  
12728 12736  recov_retry_confirm:
12729 12737          nfs4_error_zinit(ep);
12730 12738          *retry_open = FALSE;
12731 12739  
12732 12740          if (resend)
12733 12741                  args.ctag = TAG_OPEN_CONFIRM_LOST;
12734 12742          else
12735 12743                  args.ctag = TAG_OPEN_CONFIRM;
12736 12744  
12737 12745          args.array_len = 2;
12738 12746          args.array = argop;
12739 12747  
12740 12748          /* putfh target fh */
12741 12749          argop[0].argop = OP_CPUTFH;
12742 12750          argop[0].nfs_argop4_u.opcputfh.sfh = VTOR4(vp)->r_fh;
12743 12751  
12744 12752          argop[1].argop = OP_OPEN_CONFIRM;
12745 12753          open_confirm_args = &argop[1].nfs_argop4_u.opopen_confirm;
12746 12754  
12747 12755          (*seqid) += 1;
12748 12756          open_confirm_args->seqid = *seqid;
12749 12757          open_confirm_args->open_stateid = *stateid;
12750 12758  
12751 12759          mi = VTOMI4(vp);
12752 12760  
12753 12761          rfs4call(mi, &args, &res, cr, &doqueue, 0, ep);
12754 12762  
12755 12763          if (!ep->error && nfs4_need_to_bump_seqid(&res)) {
12756 12764                  nfs4_set_open_seqid((*seqid), oop, args.ctag);
12757 12765          }
12758 12766  
12759 12767          needrecov = nfs4_needs_recovery(ep, FALSE, mi->mi_vfsp);
12760 12768          if (!needrecov && ep->error)
12761 12769                  return;
12762 12770  
12763 12771          if (needrecov) {
12764 12772                  bool_t abort = FALSE;
12765 12773  
12766 12774                  if (reopening_file == FALSE) {
12767 12775                          nfs4_bseqid_entry_t *bsep = NULL;
12768 12776  
12769 12777                          if (!ep->error && res.status == NFS4ERR_BAD_SEQID)
12770 12778                                  bsep = nfs4_create_bseqid_entry(oop, NULL,
12771 12779                                      vp, 0, args.ctag,
12772 12780                                      open_confirm_args->seqid);
12773 12781  
12774 12782                          abort = nfs4_start_recovery(ep, VTOMI4(vp), vp, NULL,
12775 12783                              NULL, NULL, OP_OPEN_CONFIRM, bsep, NULL, NULL);
12776 12784                          if (bsep) {
12777 12785                                  kmem_free(bsep, sizeof (*bsep));
12778 12786                                  if (num_bseqid_retryp &&
12779 12787                                      --(*num_bseqid_retryp) == 0)
12780 12788                                          abort = TRUE;
12781 12789                          }
12782 12790                  }
12783 12791                  if ((ep->error == ETIMEDOUT ||
12784 12792                      res.status == NFS4ERR_RESOURCE) &&
12785 12793                      abort == FALSE && resend == FALSE) {
12786 12794                          if (!ep->error)
12787 12795                                  (void) xdr_free(xdr_COMPOUND4res_clnt,
12788 12796                                      (caddr_t)&res);
12789 12797  
12790 12798                          delay(SEC_TO_TICK(confirm_retry_sec));
12791 12799                          goto recov_retry_confirm;
12792 12800                  }
12793 12801                  /* State may have changed so retry the entire OPEN op */
12794 12802                  if (abort == FALSE)
12795 12803                          *retry_open = TRUE;
12796 12804                  else
12797 12805                          *retry_open = FALSE;
12798 12806                  if (!ep->error)
12799 12807                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
12800 12808                  return;
12801 12809          }
12802 12810  
12803 12811          if (res.status) {
12804 12812                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
12805 12813                  return;
12806 12814          }
12807 12815  
12808 12816          resop = &res.array[1];  /* open confirm res */
12809 12817          bcopy(&resop->nfs_resop4_u.opopen_confirm.open_stateid,
12810 12818              stateid, sizeof (*stateid));
12811 12819  
12812 12820          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
12813 12821  }
12814 12822  
12815 12823  /*
12816 12824   * Return the credentials associated with a client state object.  The
12817 12825   * caller is responsible for freeing the credentials.
12818 12826   */
12819 12827  
12820 12828  static cred_t *
12821 12829  state_to_cred(nfs4_open_stream_t *osp)
12822 12830  {
12823 12831          cred_t *cr;
12824 12832  
12825 12833          /*
12826 12834           * It's ok to not lock the open stream and open owner to get
12827 12835           * the oo_cred since this is only written once (upon creation)
12828 12836           * and will not change.
12829 12837           */
12830 12838          cr = osp->os_open_owner->oo_cred;
12831 12839          crhold(cr);
12832 12840  
12833 12841          return (cr);
12834 12842  }
12835 12843  
12836 12844  /*
12837 12845   * nfs4_find_sysid
12838 12846   *
12839 12847   * Find the sysid for the knetconfig associated with the given mi.
12840 12848   */
12841 12849  static struct lm_sysid *
12842 12850  nfs4_find_sysid(mntinfo4_t *mi)
12843 12851  {
12844 12852          ASSERT(nfs_zone() == mi->mi_zone);
12845 12853  
12846 12854          /*
12847 12855           * Switch from RDMA knconf to original mount knconf
12848 12856           */
12849 12857          return (lm_get_sysid(ORIG_KNCONF(mi), &mi->mi_curr_serv->sv_addr,
12850 12858              mi->mi_curr_serv->sv_hostname, NULL));
12851 12859  }
12852 12860  
12853 12861  #ifdef DEBUG
12854 12862  /*
12855 12863   * Return a string version of the call type for easy reading.
12856 12864   */
12857 12865  static char *
12858 12866  nfs4frlock_get_call_type(nfs4_lock_call_type_t ctype)
12859 12867  {
12860 12868          switch (ctype) {
12861 12869          case NFS4_LCK_CTYPE_NORM:
12862 12870                  return ("NORMAL");
12863 12871          case NFS4_LCK_CTYPE_RECLAIM:
12864 12872                  return ("RECLAIM");
12865 12873          case NFS4_LCK_CTYPE_RESEND:
12866 12874                  return ("RESEND");
12867 12875          case NFS4_LCK_CTYPE_REINSTATE:
12868 12876                  return ("REINSTATE");
12869 12877          default:
12870 12878                  cmn_err(CE_PANIC, "nfs4frlock_get_call_type: got illegal "
12871 12879                      "type %d", ctype);
12872 12880                  return ("");
12873 12881          }
12874 12882  }
12875 12883  #endif
12876 12884  
12877 12885  /*
12878 12886   * Map the frlock cmd and lock type to the NFSv4 over-the-wire lock type
12879 12887   * Unlock requests don't have an over-the-wire locktype, so we just return
12880 12888   * something non-threatening.
12881 12889   */
12882 12890  
12883 12891  static nfs_lock_type4
12884 12892  flk_to_locktype(int cmd, int l_type)
12885 12893  {
12886 12894          ASSERT(l_type == F_RDLCK || l_type == F_WRLCK || l_type == F_UNLCK);
12887 12895  
12888 12896          switch (l_type) {
12889 12897          case F_UNLCK:
12890 12898                  return (READ_LT);
12891 12899          case F_RDLCK:
12892 12900                  if (cmd == F_SETLK)
12893 12901                          return (READ_LT);
12894 12902                  else
12895 12903                          return (READW_LT);
12896 12904          case F_WRLCK:
12897 12905                  if (cmd == F_SETLK)
12898 12906                          return (WRITE_LT);
12899 12907                  else
12900 12908                          return (WRITEW_LT);
12901 12909          }
12902 12910          panic("flk_to_locktype");
12903 12911          /*NOTREACHED*/
12904 12912  }
12905 12913  
12906 12914  /*
12907 12915   * Do some preliminary checks for nfs4frlock.
12908 12916   */
12909 12917  static int
12910 12918  nfs4frlock_validate_args(int cmd, flock64_t *flk, int flag, vnode_t *vp,
12911 12919      u_offset_t offset)
12912 12920  {
12913 12921          int error = 0;
12914 12922  
12915 12923          /*
12916 12924           * If we are setting a lock, check that the file is opened
12917 12925           * with the correct mode.
12918 12926           */
12919 12927          if (cmd == F_SETLK || cmd == F_SETLKW) {
12920 12928                  if ((flk->l_type == F_RDLCK && (flag & FREAD) == 0) ||
12921 12929                      (flk->l_type == F_WRLCK && (flag & FWRITE) == 0)) {
12922 12930                          NFS4_DEBUG(nfs4_client_lock_debug, (CE_NOTE,
12923 12931                              "nfs4frlock_validate_args: file was opened with "
12924 12932                              "incorrect mode"));
12925 12933                          return (EBADF);
12926 12934                  }
12927 12935          }
12928 12936  
12929 12937          /* Convert the offset. It may need to be restored before returning. */
12930 12938          if (error = convoff(vp, flk, 0, offset)) {
12931 12939                  NFS4_DEBUG(nfs4_client_lock_debug, (CE_NOTE,
12932 12940                      "nfs4frlock_validate_args: convoff  =>  error= %d\n",
12933 12941                      error));
12934 12942                  return (error);
12935 12943          }
12936 12944  
12937 12945          return (error);
12938 12946  }
12939 12947  
12940 12948  /*
12941 12949   * Set the flock64's lm_sysid for nfs4frlock.
12942 12950   */
12943 12951  static int
12944 12952  nfs4frlock_get_sysid(struct lm_sysid **lspp, vnode_t *vp, flock64_t *flk)
12945 12953  {
12946 12954          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
12947 12955  
12948 12956          /* Find the lm_sysid */
12949 12957          *lspp = nfs4_find_sysid(VTOMI4(vp));
12950 12958  
12951 12959          if (*lspp == NULL) {
12952 12960                  NFS4_DEBUG(nfs4_client_lock_debug, (CE_NOTE,
12953 12961                      "nfs4frlock_get_sysid: no sysid, return ENOLCK"));
12954 12962                  return (ENOLCK);
12955 12963          }
12956 12964  
12957 12965          flk->l_sysid = lm_sysidt(*lspp);
12958 12966  
12959 12967          return (0);
12960 12968  }
12961 12969  
12962 12970  /*
12963 12971   * Do the remaining preliminary setup for nfs4frlock.
12964 12972   */
12965 12973  static void
12966 12974  nfs4frlock_pre_setup(clock_t *tick_delayp, nfs4_recov_state_t *recov_statep,
12967 12975      flock64_t *flk, short *whencep, vnode_t *vp, cred_t *search_cr,
12968 12976      cred_t **cred_otw)
12969 12977  {
12970 12978          /*
12971 12979           * set tick_delay to the base delay time.
12972 12980           * (NFS4_BASE_WAIT_TIME is in secs)
12973 12981           */
12974 12982  
12975 12983          *tick_delayp = drv_usectohz(NFS4_BASE_WAIT_TIME * 1000 * 1000);
12976 12984  
12977 12985          /*
12978 12986           * If lock is relative to EOF, we need the newest length of the
12979 12987           * file. Therefore invalidate the ATTR_CACHE.
12980 12988           */
12981 12989  
12982 12990          *whencep = flk->l_whence;
12983 12991  
12984 12992          if (*whencep == 2)              /* SEEK_END */
12985 12993                  PURGE_ATTRCACHE4(vp);
12986 12994  
12987 12995          recov_statep->rs_flags = 0;
12988 12996          recov_statep->rs_num_retry_despite_err = 0;
12989 12997          *cred_otw = nfs4_get_otw_cred(search_cr, VTOMI4(vp), NULL);
12990 12998  }
12991 12999  
12992 13000  /*
12993 13001   * Initialize and allocate the data structures necessary for
12994 13002   * the nfs4frlock call.
12995 13003   * Allocates argsp's op array, frees up the saved_rqstpp if there is one.
12996 13004   */
12997 13005  static void
12998 13006  nfs4frlock_call_init(COMPOUND4args_clnt *argsp, COMPOUND4args_clnt **argspp,
12999 13007      nfs_argop4 **argopp, nfs4_op_hint_t *op_hintp, flock64_t *flk, int cmd,
13000 13008      bool_t *retry, bool_t *did_start_fop, COMPOUND4res_clnt **respp,
13001 13009      bool_t *skip_get_err, nfs4_lost_rqst_t *lost_rqstp)
13002 13010  {
13003 13011          int             argoplist_size;
13004 13012          int             num_ops = 2;
13005 13013  
13006 13014          *retry = FALSE;
13007 13015          *did_start_fop = FALSE;
13008 13016          *skip_get_err = FALSE;
13009 13017          lost_rqstp->lr_op = 0;
13010 13018          argoplist_size  = num_ops * sizeof (nfs_argop4);
13011 13019          /* fill array with zero */
13012 13020          *argopp = kmem_zalloc(argoplist_size, KM_SLEEP);
13013 13021  
13014 13022          *argspp = argsp;
13015 13023          *respp = NULL;
13016 13024  
13017 13025          argsp->array_len = num_ops;
13018 13026          argsp->array = *argopp;
13019 13027  
13020 13028          /* initialize in case of error; will get real value down below */
13021 13029          argsp->ctag = TAG_NONE;
13022 13030  
13023 13031          if ((cmd == F_SETLK || cmd == F_SETLKW) && flk->l_type == F_UNLCK)
13024 13032                  *op_hintp = OH_LOCKU;
13025 13033          else
13026 13034                  *op_hintp = OH_OTHER;
13027 13035  }
13028 13036  
13029 13037  /*
13030 13038   * Call the nfs4_start_fop() for nfs4frlock, if necessary.  Assign
13031 13039   * the proper nfs4_server_t for this instance of nfs4frlock.
13032 13040   * Returns 0 (success) or an errno value.
13033 13041   */
13034 13042  static int
13035 13043  nfs4frlock_start_call(nfs4_lock_call_type_t ctype, vnode_t *vp,
13036 13044      nfs4_op_hint_t op_hint, nfs4_recov_state_t *recov_statep,
13037 13045      bool_t *did_start_fop, bool_t *startrecovp)
13038 13046  {
13039 13047          int error = 0;
13040 13048          rnode4_t *rp;
13041 13049  
13042 13050          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
13043 13051  
13044 13052          if (ctype == NFS4_LCK_CTYPE_NORM) {
13045 13053                  error = nfs4_start_fop(VTOMI4(vp), vp, NULL, op_hint,
13046 13054                      recov_statep, startrecovp);
13047 13055                  if (error)
13048 13056                          return (error);
13049 13057                  *did_start_fop = TRUE;
13050 13058          } else {
13051 13059                  *did_start_fop = FALSE;
13052 13060                  *startrecovp = FALSE;
13053 13061          }
13054 13062  
13055 13063          if (!error) {
13056 13064                  rp = VTOR4(vp);
13057 13065  
13058 13066                  /* If the file failed recovery, just quit. */
13059 13067                  mutex_enter(&rp->r_statelock);
13060 13068                  if (rp->r_flags & R4RECOVERR) {
13061 13069                          error = EIO;
13062 13070                  }
13063 13071                  mutex_exit(&rp->r_statelock);
13064 13072          }
13065 13073  
13066 13074          return (error);
13067 13075  }
13068 13076  
13069 13077  /*
13070 13078   * Setup the LOCK4/LOCKU4 arguments for resending a lost lock request.  A
13071 13079   * resend nfs4frlock call is initiated by the recovery framework.
13072 13080   * Acquires the lop and oop seqid synchronization.
13073 13081   */
13074 13082  static void
13075 13083  nfs4frlock_setup_resend_lock_args(nfs4_lost_rqst_t *resend_rqstp,
13076 13084      COMPOUND4args_clnt *argsp, nfs_argop4 *argop, nfs4_lock_owner_t **lopp,
13077 13085      nfs4_open_owner_t **oopp, nfs4_open_stream_t **ospp,
13078 13086      LOCK4args **lock_argsp, LOCKU4args **locku_argsp)
13079 13087  {
13080 13088          mntinfo4_t *mi = VTOMI4(resend_rqstp->lr_vp);
13081 13089          int error;
13082 13090  
13083 13091          NFS4_DEBUG((nfs4_lost_rqst_debug || nfs4_client_lock_debug),
13084 13092              (CE_NOTE,
13085 13093              "nfs4frlock_setup_resend_lock_args: have lost lock to resend"));
13086 13094          ASSERT(resend_rqstp != NULL);
13087 13095          ASSERT(resend_rqstp->lr_op == OP_LOCK ||
13088 13096              resend_rqstp->lr_op == OP_LOCKU);
13089 13097  
13090 13098          *oopp = resend_rqstp->lr_oop;
13091 13099          if (resend_rqstp->lr_oop) {
13092 13100                  open_owner_hold(resend_rqstp->lr_oop);
13093 13101                  error = nfs4_start_open_seqid_sync(resend_rqstp->lr_oop, mi);
13094 13102                  ASSERT(error == 0);     /* recov thread always succeeds */
13095 13103          }
13096 13104  
13097 13105          /* Must resend this lost lock/locku request. */
13098 13106          ASSERT(resend_rqstp->lr_lop != NULL);
13099 13107          *lopp = resend_rqstp->lr_lop;
13100 13108          lock_owner_hold(resend_rqstp->lr_lop);
13101 13109          error = nfs4_start_lock_seqid_sync(resend_rqstp->lr_lop, mi);
13102 13110          ASSERT(error == 0);     /* recov thread always succeeds */
13103 13111  
13104 13112          *ospp = resend_rqstp->lr_osp;
13105 13113          if (*ospp)
13106 13114                  open_stream_hold(resend_rqstp->lr_osp);
13107 13115  
13108 13116          if (resend_rqstp->lr_op == OP_LOCK) {
13109 13117                  LOCK4args *lock_args;
13110 13118  
13111 13119                  argop->argop = OP_LOCK;
13112 13120                  *lock_argsp = lock_args = &argop->nfs_argop4_u.oplock;
13113 13121                  lock_args->locktype = resend_rqstp->lr_locktype;
13114 13122                  lock_args->reclaim =
13115 13123                      (resend_rqstp->lr_ctype == NFS4_LCK_CTYPE_RECLAIM);
13116 13124                  lock_args->offset = resend_rqstp->lr_flk->l_start;
13117 13125                  lock_args->length = resend_rqstp->lr_flk->l_len;
13118 13126                  if (lock_args->length == 0)
13119 13127                          lock_args->length = ~lock_args->length;
13120 13128                  nfs4_setup_lock_args(*lopp, *oopp, *ospp,
13121 13129                      mi2clientid(mi), &lock_args->locker);
13122 13130  
13123 13131                  switch (resend_rqstp->lr_ctype) {
13124 13132                  case NFS4_LCK_CTYPE_RESEND:
13125 13133                          argsp->ctag = TAG_LOCK_RESEND;
13126 13134                          break;
13127 13135                  case NFS4_LCK_CTYPE_REINSTATE:
13128 13136                          argsp->ctag = TAG_LOCK_REINSTATE;
13129 13137                          break;
13130 13138                  case NFS4_LCK_CTYPE_RECLAIM:
13131 13139                          argsp->ctag = TAG_LOCK_RECLAIM;
13132 13140                          break;
13133 13141                  default:
13134 13142                          argsp->ctag = TAG_LOCK_UNKNOWN;
13135 13143                          break;
13136 13144                  }
13137 13145          } else {
13138 13146                  LOCKU4args *locku_args;
13139 13147                  nfs4_lock_owner_t *lop = resend_rqstp->lr_lop;
13140 13148  
13141 13149                  argop->argop = OP_LOCKU;
13142 13150                  *locku_argsp = locku_args = &argop->nfs_argop4_u.oplocku;
13143 13151                  locku_args->locktype = READ_LT;
13144 13152                  locku_args->seqid = lop->lock_seqid + 1;
13145 13153                  mutex_enter(&lop->lo_lock);
13146 13154                  locku_args->lock_stateid = lop->lock_stateid;
13147 13155                  mutex_exit(&lop->lo_lock);
13148 13156                  locku_args->offset = resend_rqstp->lr_flk->l_start;
13149 13157                  locku_args->length = resend_rqstp->lr_flk->l_len;
13150 13158                  if (locku_args->length == 0)
13151 13159                          locku_args->length = ~locku_args->length;
13152 13160  
13153 13161                  switch (resend_rqstp->lr_ctype) {
13154 13162                  case NFS4_LCK_CTYPE_RESEND:
13155 13163                          argsp->ctag = TAG_LOCKU_RESEND;
13156 13164                          break;
13157 13165                  case NFS4_LCK_CTYPE_REINSTATE:
13158 13166                          argsp->ctag = TAG_LOCKU_REINSTATE;
13159 13167                          break;
13160 13168                  default:
13161 13169                          argsp->ctag = TAG_LOCK_UNKNOWN;
13162 13170                          break;
13163 13171                  }
13164 13172          }
13165 13173  }
13166 13174  
13167 13175  /*
13168 13176   * Setup the LOCKT4 arguments.
13169 13177   */
13170 13178  static void
13171 13179  nfs4frlock_setup_lockt_args(nfs4_lock_call_type_t ctype, nfs_argop4 *argop,
13172 13180      LOCKT4args **lockt_argsp, COMPOUND4args_clnt *argsp, flock64_t *flk,
13173 13181      rnode4_t *rp)
13174 13182  {
13175 13183          LOCKT4args *lockt_args;
13176 13184  
13177 13185          ASSERT(nfs_zone() == VTOMI4(RTOV4(rp))->mi_zone);
13178 13186          ASSERT(ctype == NFS4_LCK_CTYPE_NORM);
13179 13187          argop->argop = OP_LOCKT;
13180 13188          argsp->ctag = TAG_LOCKT;
13181 13189          lockt_args = &argop->nfs_argop4_u.oplockt;
13182 13190  
13183 13191          /*
13184 13192           * The locktype will be READ_LT unless it's
13185 13193           * a write lock. We do this because the Solaris
13186 13194           * system call allows the combination of
13187 13195           * F_UNLCK and F_GETLK* and so in that case the
13188 13196           * unlock is mapped to a read.
13189 13197           */
13190 13198          if (flk->l_type == F_WRLCK)
13191 13199                  lockt_args->locktype = WRITE_LT;
13192 13200          else
13193 13201                  lockt_args->locktype = READ_LT;
13194 13202  
13195 13203          lockt_args->owner.clientid = mi2clientid(VTOMI4(RTOV4(rp)));
13196 13204          /* set the lock owner4 args */
13197 13205          nfs4_setlockowner_args(&lockt_args->owner, rp,
13198 13206              ctype == NFS4_LCK_CTYPE_NORM ? curproc->p_pidp->pid_id :
13199 13207              flk->l_pid);
13200 13208          lockt_args->offset = flk->l_start;
13201 13209          lockt_args->length = flk->l_len;
13202 13210          if (flk->l_len == 0)
13203 13211                  lockt_args->length = ~lockt_args->length;
13204 13212  
13205 13213          *lockt_argsp = lockt_args;
13206 13214  }
13207 13215  
13208 13216  /*
13209 13217   * If the client is holding a delegation, and the open stream to be used
13210 13218   * with this lock request is a delegation open stream, then re-open the stream.
13211 13219   * Sets the nfs4_error_t to all zeros unless the open stream has already
13212 13220   * failed a reopen or we couldn't find the open stream.  NFS4ERR_DELAY
13213 13221   * means the caller should retry (like a recovery retry).
13214 13222   */
13215 13223  static void
13216 13224  nfs4frlock_check_deleg(vnode_t *vp, nfs4_error_t *ep, cred_t *cr, int lt)
13217 13225  {
13218 13226          open_delegation_type4   dt;
13219 13227          bool_t                  reopen_needed, force;
13220 13228          nfs4_open_stream_t      *osp;
13221 13229          open_claim_type4        oclaim;
13222 13230          rnode4_t                *rp = VTOR4(vp);
13223 13231          mntinfo4_t              *mi = VTOMI4(vp);
13224 13232  
13225 13233          ASSERT(nfs_zone() == mi->mi_zone);
13226 13234  
13227 13235          nfs4_error_zinit(ep);
13228 13236  
13229 13237          mutex_enter(&rp->r_statev4_lock);
13230 13238          dt = rp->r_deleg_type;
13231 13239          mutex_exit(&rp->r_statev4_lock);
13232 13240  
13233 13241          if (dt != OPEN_DELEGATE_NONE) {
13234 13242                  nfs4_open_owner_t       *oop;
13235 13243  
13236 13244                  oop = find_open_owner(cr, NFS4_PERM_CREATED, mi);
13237 13245                  if (!oop) {
13238 13246                          ep->stat = NFS4ERR_IO;
13239 13247                          return;
13240 13248                  }
13241 13249                  /* returns with 'os_sync_lock' held */
13242 13250                  osp = find_open_stream(oop, rp);
13243 13251                  if (!osp) {
13244 13252                          open_owner_rele(oop);
13245 13253                          ep->stat = NFS4ERR_IO;
13246 13254                          return;
13247 13255                  }
13248 13256  
13249 13257                  if (osp->os_failed_reopen) {
13250 13258                          NFS4_DEBUG((nfs4_open_stream_debug ||
13251 13259                              nfs4_client_lock_debug), (CE_NOTE,
13252 13260                              "nfs4frlock_check_deleg: os_failed_reopen set "
13253 13261                              "for osp %p, cr %p, rp %s", (void *)osp,
13254 13262                              (void *)cr, rnode4info(rp)));
13255 13263                          mutex_exit(&osp->os_sync_lock);
13256 13264                          open_stream_rele(osp, rp);
13257 13265                          open_owner_rele(oop);
13258 13266                          ep->stat = NFS4ERR_IO;
13259 13267                          return;
13260 13268                  }
13261 13269  
13262 13270                  /*
13263 13271                   * Determine whether a reopen is needed.  If this
13264 13272                   * is a delegation open stream, then send the open
13265 13273                   * to the server to give visibility to the open owner.
13266 13274                   * Even if it isn't a delegation open stream, we need
13267 13275                   * to check if the previous open CLAIM_DELEGATE_CUR
13268 13276                   * was sufficient.
13269 13277                   */
13270 13278  
13271 13279                  reopen_needed = osp->os_delegation ||
13272 13280                      ((lt == F_RDLCK &&
13273 13281                      !(osp->os_dc_openacc & OPEN4_SHARE_ACCESS_READ)) ||
13274 13282                      (lt == F_WRLCK &&
13275 13283                      !(osp->os_dc_openacc & OPEN4_SHARE_ACCESS_WRITE)));
13276 13284  
13277 13285                  mutex_exit(&osp->os_sync_lock);
13278 13286                  open_owner_rele(oop);
13279 13287  
13280 13288                  if (reopen_needed) {
13281 13289                          /*
13282 13290                           * Always use CLAIM_PREVIOUS after server reboot.
13283 13291                           * The server will reject CLAIM_DELEGATE_CUR if
13284 13292                           * it is used during the grace period.
13285 13293                           */
13286 13294                          mutex_enter(&mi->mi_lock);
13287 13295                          if (mi->mi_recovflags & MI4R_SRV_REBOOT) {
13288 13296                                  oclaim = CLAIM_PREVIOUS;
13289 13297                                  force = TRUE;
13290 13298                          } else {
13291 13299                                  oclaim = CLAIM_DELEGATE_CUR;
13292 13300                                  force = FALSE;
13293 13301                          }
13294 13302                          mutex_exit(&mi->mi_lock);
13295 13303  
13296 13304                          nfs4_reopen(vp, osp, ep, oclaim, force, FALSE);
13297 13305                          if (ep->error == EAGAIN) {
13298 13306                                  nfs4_error_zinit(ep);
13299 13307                                  ep->stat = NFS4ERR_DELAY;
13300 13308                          }
13301 13309                  }
13302 13310                  open_stream_rele(osp, rp);
13303 13311                  osp = NULL;
13304 13312          }
13305 13313  }
13306 13314  
13307 13315  /*
13308 13316   * Setup the LOCKU4 arguments.
13309 13317   * Returns errors via the nfs4_error_t.
13310 13318   * NFS4_OK              no problems.  *go_otwp is TRUE if call should go
13311 13319   *                      over-the-wire.  The caller must release the
13312 13320   *                      reference on *lopp.
13313 13321   * NFS4ERR_DELAY        caller should retry (like recovery retry)
13314 13322   * (other)              unrecoverable error.
13315 13323   */
13316 13324  static void
13317 13325  nfs4frlock_setup_locku_args(nfs4_lock_call_type_t ctype, nfs_argop4 *argop,
13318 13326      LOCKU4args **locku_argsp, flock64_t *flk,
13319 13327      nfs4_lock_owner_t **lopp, nfs4_error_t *ep, COMPOUND4args_clnt *argsp,
13320 13328      vnode_t *vp, int flag, u_offset_t offset, cred_t *cr,
13321 13329      bool_t *skip_get_err, bool_t *go_otwp)
13322 13330  {
13323 13331          nfs4_lock_owner_t       *lop = NULL;
13324 13332          LOCKU4args              *locku_args;
13325 13333          pid_t                   pid;
13326 13334          bool_t                  is_spec = FALSE;
13327 13335          rnode4_t                *rp = VTOR4(vp);
13328 13336  
13329 13337          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
13330 13338          ASSERT(ctype == NFS4_LCK_CTYPE_NORM);
13331 13339  
13332 13340          nfs4frlock_check_deleg(vp, ep, cr, F_UNLCK);
13333 13341          if (ep->error || ep->stat)
13334 13342                  return;
13335 13343  
13336 13344          argop->argop = OP_LOCKU;
13337 13345          if (ctype == NFS4_LCK_CTYPE_REINSTATE)
13338 13346                  argsp->ctag = TAG_LOCKU_REINSTATE;
13339 13347          else
13340 13348                  argsp->ctag = TAG_LOCKU;
13341 13349          locku_args = &argop->nfs_argop4_u.oplocku;
13342 13350          *locku_argsp = locku_args;
13343 13351  
13344 13352          /*
13345 13353           * XXX what should locku_args->locktype be?
13346 13354           * setting to ALWAYS be READ_LT so at least
13347 13355           * it is a valid locktype.
13348 13356           */
13349 13357  
13350 13358          locku_args->locktype = READ_LT;
13351 13359  
13352 13360          pid = ctype == NFS4_LCK_CTYPE_NORM ? curproc->p_pidp->pid_id :
13353 13361              flk->l_pid;
13354 13362  
13355 13363          /*
13356 13364           * Get the lock owner stateid.  If no lock owner
13357 13365           * exists, return success.
13358 13366           */
13359 13367          lop = find_lock_owner(rp, pid, LOWN_ANY);
13360 13368          *lopp = lop;
13361 13369          if (lop && CLNT_ISSPECIAL(&lop->lock_stateid))
13362 13370                  is_spec = TRUE;
13363 13371          if (!lop || is_spec) {
13364 13372                  /*
13365 13373                   * No lock owner so no locks to unlock.
13366 13374                   * Return success.  If there was a failed
13367 13375                   * reclaim earlier, the lock might still be
13368 13376                   * registered with the local locking code,
13369 13377                   * so notify it of the unlock.
13370 13378                   *
13371 13379                   * If the lockowner is using a special stateid,
13372 13380                   * then the original lock request (that created
13373 13381                   * this lockowner) was never successful, so we
13374 13382                   * have no lock to undo OTW.
13375 13383                   */
13376 13384                  NFS4_DEBUG(nfs4_client_lock_debug, (CE_NOTE,
13377 13385                      "nfs4frlock_setup_locku_args: LOCKU: no lock owner "
13378 13386                      "(%ld) so return success", (long)pid));
13379 13387  
13380 13388                  if (ctype == NFS4_LCK_CTYPE_NORM)
13381 13389                          flk->l_pid = curproc->p_pid;
13382 13390                  nfs4_register_lock_locally(vp, flk, flag, offset);
13383 13391                  /*
13384 13392                   * Release our hold and NULL out so final_cleanup
13385 13393                   * doesn't try to end a lock seqid sync we
13386 13394                   * never started.
13387 13395                   */
13388 13396                  if (is_spec) {
13389 13397                          lock_owner_rele(lop);
13390 13398                          *lopp = NULL;
13391 13399                  }
13392 13400                  *skip_get_err = TRUE;
13393 13401                  *go_otwp = FALSE;
13394 13402                  return;
13395 13403          }
13396 13404  
13397 13405          ep->error = nfs4_start_lock_seqid_sync(lop, VTOMI4(vp));
13398 13406          if (ep->error == EAGAIN) {
13399 13407                  lock_owner_rele(lop);
13400 13408                  *lopp = NULL;
13401 13409                  return;
13402 13410          }
13403 13411  
13404 13412          mutex_enter(&lop->lo_lock);
13405 13413          locku_args->lock_stateid = lop->lock_stateid;
13406 13414          mutex_exit(&lop->lo_lock);
13407 13415          locku_args->seqid = lop->lock_seqid + 1;
13408 13416  
13409 13417          /* leave the ref count on lop, rele after RPC call */
13410 13418  
13411 13419          locku_args->offset = flk->l_start;
13412 13420          locku_args->length = flk->l_len;
13413 13421          if (flk->l_len == 0)
13414 13422                  locku_args->length = ~locku_args->length;
13415 13423  
13416 13424          *go_otwp = TRUE;
13417 13425  }
13418 13426  
13419 13427  /*
13420 13428   * Setup the LOCK4 arguments.
13421 13429   *
13422 13430   * Returns errors via the nfs4_error_t.
13423 13431   * NFS4_OK              no problems
13424 13432   * NFS4ERR_DELAY        caller should retry (like recovery retry)
13425 13433   * (other)              unrecoverable error
13426 13434   */
13427 13435  static void
13428 13436  nfs4frlock_setup_lock_args(nfs4_lock_call_type_t ctype, LOCK4args **lock_argsp,
13429 13437      nfs4_open_owner_t **oopp, nfs4_open_stream_t **ospp,
13430 13438      nfs4_lock_owner_t **lopp, nfs_argop4 *argop, COMPOUND4args_clnt *argsp,
13431 13439      flock64_t *flk, int cmd, vnode_t *vp, cred_t *cr, nfs4_error_t *ep)
13432 13440  {
13433 13441          LOCK4args               *lock_args;
13434 13442          nfs4_open_owner_t       *oop = NULL;
13435 13443          nfs4_open_stream_t      *osp = NULL;
13436 13444          nfs4_lock_owner_t       *lop = NULL;
13437 13445          pid_t                   pid;
13438 13446          rnode4_t                *rp = VTOR4(vp);
13439 13447  
13440 13448          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
13441 13449  
13442 13450          nfs4frlock_check_deleg(vp, ep, cr, flk->l_type);
13443 13451          if (ep->error || ep->stat != NFS4_OK)
13444 13452                  return;
13445 13453  
13446 13454          argop->argop = OP_LOCK;
13447 13455          if (ctype == NFS4_LCK_CTYPE_NORM)
13448 13456                  argsp->ctag = TAG_LOCK;
13449 13457          else if (ctype == NFS4_LCK_CTYPE_RECLAIM)
13450 13458                  argsp->ctag = TAG_RELOCK;
13451 13459          else
13452 13460                  argsp->ctag = TAG_LOCK_REINSTATE;
13453 13461          lock_args = &argop->nfs_argop4_u.oplock;
13454 13462          lock_args->locktype = flk_to_locktype(cmd, flk->l_type);
13455 13463          lock_args->reclaim = ctype == NFS4_LCK_CTYPE_RECLAIM ? 1 : 0;
13456 13464          /*
13457 13465           * Get the lock owner.  If no lock owner exists,
13458 13466           * create a 'temporary' one and grab the open seqid
13459 13467           * synchronization (which puts a hold on the open
13460 13468           * owner and open stream).
13461 13469           * This also grabs the lock seqid synchronization.
13462 13470           */
13463 13471          pid = ctype == NFS4_LCK_CTYPE_NORM ? curproc->p_pid : flk->l_pid;
13464 13472          ep->stat =
13465 13473              nfs4_find_or_create_lock_owner(pid, rp, cr, &oop, &osp, &lop);
13466 13474  
13467 13475          if (ep->stat != NFS4_OK)
13468 13476                  goto out;
13469 13477  
13470 13478          nfs4_setup_lock_args(lop, oop, osp, mi2clientid(VTOMI4(vp)),
13471 13479              &lock_args->locker);
13472 13480  
13473 13481          lock_args->offset = flk->l_start;
13474 13482          lock_args->length = flk->l_len;
13475 13483          if (flk->l_len == 0)
13476 13484                  lock_args->length = ~lock_args->length;
13477 13485          *lock_argsp = lock_args;
13478 13486  out:
13479 13487          *oopp = oop;
13480 13488          *ospp = osp;
13481 13489          *lopp = lop;
13482 13490  }
13483 13491  
13484 13492  /*
13485 13493   * After we get the reply from the server, record the proper information
13486 13494   * for possible resend lock requests.
13487 13495   *
13488 13496   * Allocates memory for the saved_rqstp if we have a lost lock to save.
13489 13497   */
13490 13498  static void
13491 13499  nfs4frlock_save_lost_rqst(nfs4_lock_call_type_t ctype, int error,
13492 13500      nfs_lock_type4 locktype, nfs4_open_owner_t *oop,
13493 13501      nfs4_open_stream_t *osp, nfs4_lock_owner_t *lop, flock64_t *flk,
13494 13502      nfs4_lost_rqst_t *lost_rqstp, cred_t *cr, vnode_t *vp)
13495 13503  {
13496 13504          bool_t unlock = (flk->l_type == F_UNLCK);
13497 13505  
13498 13506          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
13499 13507          ASSERT(ctype == NFS4_LCK_CTYPE_NORM ||
13500 13508              ctype == NFS4_LCK_CTYPE_REINSTATE);
13501 13509  
13502 13510          if (error != 0 && !unlock) {
13503 13511                  NFS4_DEBUG((nfs4_lost_rqst_debug ||
13504 13512                      nfs4_client_lock_debug), (CE_NOTE,
13505 13513                      "nfs4frlock_save_lost_rqst: set lo_pending_rqsts to 1 "
13506 13514                      " for lop %p", (void *)lop));
13507 13515                  ASSERT(lop != NULL);
13508 13516                  mutex_enter(&lop->lo_lock);
13509 13517                  lop->lo_pending_rqsts = 1;
13510 13518                  mutex_exit(&lop->lo_lock);
13511 13519          }
13512 13520  
13513 13521          lost_rqstp->lr_putfirst = FALSE;
13514 13522          lost_rqstp->lr_op = 0;
13515 13523  
13516 13524          /*
13517 13525           * For lock/locku requests, we treat EINTR as ETIMEDOUT for
13518 13526           * recovery purposes so that the lock request that was sent
13519 13527           * can be saved and re-issued later.  Ditto for EIO from a forced
13520 13528           * unmount.  This is done to have the client's local locking state
13521 13529           * match the v4 server's state; that is, the request was
13522 13530           * potentially received and accepted by the server but the client
13523 13531           * thinks it was not.
13524 13532           */
13525 13533          if (error == ETIMEDOUT || error == EINTR ||
13526 13534              NFS4_FRC_UNMT_ERR(error, vp->v_vfsp)) {
13527 13535                  NFS4_DEBUG((nfs4_lost_rqst_debug ||
13528 13536                      nfs4_client_lock_debug), (CE_NOTE,
13529 13537                      "nfs4frlock_save_lost_rqst: got a lost %s lock for "
13530 13538                      "lop %p oop %p osp %p", unlock ? "LOCKU" : "LOCK",
13531 13539                      (void *)lop, (void *)oop, (void *)osp));
13532 13540                  if (unlock)
13533 13541                          lost_rqstp->lr_op = OP_LOCKU;
13534 13542                  else {
13535 13543                          lost_rqstp->lr_op = OP_LOCK;
13536 13544                          lost_rqstp->lr_locktype = locktype;
13537 13545                  }
13538 13546                  /*
13539 13547                   * Objects are held and rele'd via the recovery code.
13540 13548                   * See nfs4_save_lost_rqst.
13541 13549                   */
13542 13550                  lost_rqstp->lr_vp = vp;
13543 13551                  lost_rqstp->lr_dvp = NULL;
13544 13552                  lost_rqstp->lr_oop = oop;
13545 13553                  lost_rqstp->lr_osp = osp;
13546 13554                  lost_rqstp->lr_lop = lop;
13547 13555                  lost_rqstp->lr_cr = cr;
13548 13556                  switch (ctype) {
13549 13557                  case NFS4_LCK_CTYPE_NORM:
13550 13558                          flk->l_pid = ttoproc(curthread)->p_pid;
13551 13559                          lost_rqstp->lr_ctype = NFS4_LCK_CTYPE_RESEND;
13552 13560                          break;
13553 13561                  case NFS4_LCK_CTYPE_REINSTATE:
13554 13562                          lost_rqstp->lr_putfirst = TRUE;
13555 13563                          lost_rqstp->lr_ctype = ctype;
13556 13564                          break;
13557 13565                  default:
13558 13566                          break;
13559 13567                  }
13560 13568                  lost_rqstp->lr_flk = flk;
13561 13569          }
13562 13570  }
13563 13571  
13564 13572  /*
13565 13573   * Update lop's seqid.  Also update the seqid stored in a resend request,
13566 13574   * if any.  (Some recovery errors increment the seqid, and we may have to
13567 13575   * send the resend request again.)
13568 13576   */
13569 13577  
13570 13578  static void
13571 13579  nfs4frlock_bump_seqid(LOCK4args *lock_args, LOCKU4args *locku_args,
13572 13580      nfs4_open_owner_t *oop, nfs4_lock_owner_t *lop, nfs4_tag_type_t tag_type)
13573 13581  {
13574 13582          if (lock_args) {
13575 13583                  if (lock_args->locker.new_lock_owner == TRUE)
13576 13584                          nfs4_get_and_set_next_open_seqid(oop, tag_type);
13577 13585                  else {
13578 13586                          ASSERT(lop->lo_flags & NFS4_LOCK_SEQID_INUSE);
13579 13587                          nfs4_set_lock_seqid(lop->lock_seqid + 1, lop);
13580 13588                  }
13581 13589          } else if (locku_args) {
13582 13590                  ASSERT(lop->lo_flags & NFS4_LOCK_SEQID_INUSE);
13583 13591                  nfs4_set_lock_seqid(lop->lock_seqid +1, lop);
13584 13592          }
13585 13593  }
13586 13594  
13587 13595  /*
13588 13596   * Calls nfs4_end_fop, drops the seqid syncs, and frees up the
13589 13597   * COMPOUND4 args/res for calls that need to retry.
13590 13598   * Switches the *cred_otwp to base_cr.
13591 13599   */
13592 13600  static void
13593 13601  nfs4frlock_check_access(vnode_t *vp, nfs4_op_hint_t op_hint,
13594 13602      nfs4_recov_state_t *recov_statep, int needrecov, bool_t *did_start_fop,
13595 13603      COMPOUND4args_clnt **argspp, COMPOUND4res_clnt **respp, int error,
13596 13604      nfs4_lock_owner_t **lopp, nfs4_open_owner_t **oopp,
13597 13605      nfs4_open_stream_t **ospp, cred_t *base_cr, cred_t **cred_otwp)
13598 13606  {
13599 13607          nfs4_open_owner_t       *oop = *oopp;
13600 13608          nfs4_open_stream_t      *osp = *ospp;
13601 13609          nfs4_lock_owner_t       *lop = *lopp;
13602 13610          nfs_argop4              *argop = (*argspp)->array;
13603 13611  
13604 13612          if (*did_start_fop) {
13605 13613                  nfs4_end_fop(VTOMI4(vp), vp, NULL, op_hint, recov_statep,
13606 13614                      needrecov);
13607 13615                  *did_start_fop = FALSE;
13608 13616          }
13609 13617          ASSERT((*argspp)->array_len == 2);
13610 13618          if (argop[1].argop == OP_LOCK)
13611 13619                  nfs4args_lock_free(&argop[1]);
13612 13620          else if (argop[1].argop == OP_LOCKT)
13613 13621                  nfs4args_lockt_free(&argop[1]);
13614 13622          kmem_free(argop, 2 * sizeof (nfs_argop4));
13615 13623          if (!error)
13616 13624                  (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)*respp);
13617 13625          *argspp = NULL;
13618 13626          *respp = NULL;
13619 13627  
13620 13628          if (lop) {
13621 13629                  nfs4_end_lock_seqid_sync(lop);
13622 13630                  lock_owner_rele(lop);
13623 13631                  *lopp = NULL;
13624 13632          }
13625 13633  
13626 13634          /* need to free up the reference on osp for lock args */
13627 13635          if (osp != NULL) {
13628 13636                  open_stream_rele(osp, VTOR4(vp));
13629 13637                  *ospp = NULL;
13630 13638          }
13631 13639  
13632 13640          /* need to free up the reference on oop for lock args */
13633 13641          if (oop != NULL) {
13634 13642                  nfs4_end_open_seqid_sync(oop);
13635 13643                  open_owner_rele(oop);
13636 13644                  *oopp = NULL;
13637 13645          }
13638 13646  
13639 13647          crfree(*cred_otwp);
13640 13648          *cred_otwp = base_cr;
13641 13649          crhold(*cred_otwp);
13642 13650  }
13643 13651  
13644 13652  /*
13645 13653   * Function to process the client's recovery for nfs4frlock.
13646 13654   * Returns TRUE if we should retry the lock request; FALSE otherwise.
13647 13655   *
13648 13656   * Calls nfs4_end_fop, drops the seqid syncs, and frees up the
13649 13657   * COMPOUND4 args/res for calls that need to retry.
13650 13658   *
13651 13659   * Note: the rp's r_lkserlock is *not* dropped during this path.
13652 13660   */
13653 13661  static bool_t
13654 13662  nfs4frlock_recovery(int needrecov, nfs4_error_t *ep,
13655 13663      COMPOUND4args_clnt **argspp, COMPOUND4res_clnt **respp,
13656 13664      LOCK4args *lock_args, LOCKU4args *locku_args,
13657 13665      nfs4_open_owner_t **oopp, nfs4_open_stream_t **ospp,
13658 13666      nfs4_lock_owner_t **lopp, rnode4_t *rp, vnode_t *vp,
13659 13667      nfs4_recov_state_t *recov_statep, nfs4_op_hint_t op_hint,
13660 13668      bool_t *did_start_fop, nfs4_lost_rqst_t *lost_rqstp, flock64_t *flk)
13661 13669  {
13662 13670          nfs4_open_owner_t       *oop = *oopp;
13663 13671          nfs4_open_stream_t      *osp = *ospp;
13664 13672          nfs4_lock_owner_t       *lop = *lopp;
13665 13673  
13666 13674          bool_t abort, retry;
13667 13675  
13668 13676          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
13669 13677          ASSERT((*argspp) != NULL);
13670 13678          ASSERT((*respp) != NULL);
13671 13679          if (lock_args || locku_args)
13672 13680                  ASSERT(lop != NULL);
13673 13681  
13674 13682          NFS4_DEBUG((nfs4_client_lock_debug || nfs4_client_recov_debug),
13675 13683              (CE_NOTE, "nfs4frlock_recovery: initiating recovery\n"));
13676 13684  
13677 13685          retry = TRUE;
13678 13686          abort = FALSE;
13679 13687          if (needrecov) {
13680 13688                  nfs4_bseqid_entry_t *bsep = NULL;
13681 13689                  nfs_opnum4 op;
13682 13690  
13683 13691                  op = lock_args ? OP_LOCK : locku_args ? OP_LOCKU : OP_LOCKT;
13684 13692  
13685 13693                  if (!ep->error && ep->stat == NFS4ERR_BAD_SEQID) {
13686 13694                          seqid4 seqid;
13687 13695  
13688 13696                          if (lock_args) {
13689 13697                                  if (lock_args->locker.new_lock_owner == TRUE)
13690 13698                                          seqid = lock_args->locker.locker4_u.
13691 13699                                              open_owner.open_seqid;
13692 13700                                  else
13693 13701                                          seqid = lock_args->locker.locker4_u.
13694 13702                                              lock_owner.lock_seqid;
13695 13703                          } else if (locku_args) {
13696 13704                                  seqid = locku_args->seqid;
13697 13705                          } else {
13698 13706                                  seqid = 0;
13699 13707                          }
13700 13708  
13701 13709                          bsep = nfs4_create_bseqid_entry(oop, lop, vp,
13702 13710                              flk->l_pid, (*argspp)->ctag, seqid);
13703 13711                  }
13704 13712  
13705 13713                  abort = nfs4_start_recovery(ep, VTOMI4(vp), vp, NULL, NULL,
13706 13714                      (lost_rqstp && (lost_rqstp->lr_op == OP_LOCK ||
13707 13715                      lost_rqstp->lr_op == OP_LOCKU)) ? lost_rqstp :
13708 13716                      NULL, op, bsep, NULL, NULL);
13709 13717  
13710 13718                  if (bsep)
13711 13719                          kmem_free(bsep, sizeof (*bsep));
13712 13720          }
13713 13721  
13714 13722          /*
13715 13723           * Return that we do not want to retry the request for 3 cases:
13716 13724           * 1. If we received EINTR or are bailing out because of a forced
13717 13725           *    unmount, we came into this code path just for the sake of
13718 13726           *    initiating recovery, we now need to return the error.
13719 13727           * 2. If we have aborted recovery.
13720 13728           * 3. We received NFS4ERR_BAD_SEQID.
13721 13729           */
13722 13730          if (ep->error == EINTR || NFS4_FRC_UNMT_ERR(ep->error, vp->v_vfsp) ||
13723 13731              abort == TRUE || (ep->error == 0 && ep->stat == NFS4ERR_BAD_SEQID))
13724 13732                  retry = FALSE;
13725 13733  
13726 13734          if (*did_start_fop == TRUE) {
13727 13735                  nfs4_end_fop(VTOMI4(vp), vp, NULL, op_hint, recov_statep,
13728 13736                      needrecov);
13729 13737                  *did_start_fop = FALSE;
13730 13738          }
13731 13739  
13732 13740          if (retry == TRUE) {
13733 13741                  nfs_argop4      *argop;
13734 13742  
13735 13743                  argop = (*argspp)->array;
13736 13744                  ASSERT((*argspp)->array_len == 2);
13737 13745  
13738 13746                  if (argop[1].argop == OP_LOCK)
13739 13747                          nfs4args_lock_free(&argop[1]);
13740 13748                  else if (argop[1].argop == OP_LOCKT)
13741 13749                          nfs4args_lockt_free(&argop[1]);
13742 13750                  kmem_free(argop, 2 * sizeof (nfs_argop4));
13743 13751                  if (!ep->error)
13744 13752                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)*respp);
13745 13753                  *respp = NULL;
13746 13754                  *argspp = NULL;
13747 13755          }
13748 13756  
13749 13757          if (lop != NULL) {
13750 13758                  nfs4_end_lock_seqid_sync(lop);
13751 13759                  lock_owner_rele(lop);
13752 13760          }
13753 13761  
13754 13762          *lopp = NULL;
13755 13763  
13756 13764          /* need to free up the reference on osp for lock args */
13757 13765          if (osp != NULL) {
13758 13766                  open_stream_rele(osp, rp);
13759 13767                  *ospp = NULL;
13760 13768          }
13761 13769  
13762 13770          /* need to free up the reference on oop for lock args */
13763 13771          if (oop != NULL) {
13764 13772                  nfs4_end_open_seqid_sync(oop);
13765 13773                  open_owner_rele(oop);
13766 13774                  *oopp = NULL;
13767 13775          }
13768 13776  
13769 13777          return (retry);
13770 13778  }
13771 13779  
13772 13780  /*
13773 13781   * Handles the successful reply from the server for nfs4frlock.
13774 13782   */
13775 13783  static void
13776 13784  nfs4frlock_results_ok(nfs4_lock_call_type_t ctype, int cmd, flock64_t *flk,
13777 13785      vnode_t *vp, int flag, u_offset_t offset,
13778 13786      nfs4_lost_rqst_t *resend_rqstp)
13779 13787  {
13780 13788          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
13781 13789          if ((cmd == F_SETLK || cmd == F_SETLKW) &&
13782 13790              (flk->l_type == F_RDLCK || flk->l_type == F_WRLCK)) {
13783 13791                  if (ctype == NFS4_LCK_CTYPE_NORM) {
13784 13792                          flk->l_pid = ttoproc(curthread)->p_pid;
13785 13793                          /*
13786 13794                           * We do not register lost locks locally in
13787 13795                           * the 'resend' case since the user/application
13788 13796                           * doesn't think we have the lock.
13789 13797                           */
13790 13798                          ASSERT(!resend_rqstp);
13791 13799                          nfs4_register_lock_locally(vp, flk, flag, offset);
13792 13800                  }
13793 13801          }
13794 13802  }
13795 13803  
13796 13804  /*
13797 13805   * Handle the DENIED reply from the server for nfs4frlock.
13798 13806   * Returns TRUE if we should retry the request; FALSE otherwise.
13799 13807   *
13800 13808   * Calls nfs4_end_fop, drops the seqid syncs, and frees up the
13801 13809   * COMPOUND4 args/res for calls that need to retry.  Can also
13802 13810   * drop and regrab the r_lkserlock.
13803 13811   */
13804 13812  static bool_t
13805 13813  nfs4frlock_results_denied(nfs4_lock_call_type_t ctype, LOCK4args *lock_args,
13806 13814      LOCKT4args *lockt_args, nfs4_open_owner_t **oopp,
13807 13815      nfs4_open_stream_t **ospp, nfs4_lock_owner_t **lopp, int cmd,
13808 13816      vnode_t *vp, flock64_t *flk, nfs4_op_hint_t op_hint,
13809 13817      nfs4_recov_state_t *recov_statep, int needrecov,
13810 13818      COMPOUND4args_clnt **argspp, COMPOUND4res_clnt **respp,
13811 13819      clock_t *tick_delayp, short *whencep, int *errorp,
13812 13820      nfs_resop4 *resop, cred_t *cr, bool_t *did_start_fop,
13813 13821      bool_t *skip_get_err)
13814 13822  {
13815 13823          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
13816 13824  
13817 13825          if (lock_args) {
13818 13826                  nfs4_open_owner_t       *oop = *oopp;
13819 13827                  nfs4_open_stream_t      *osp = *ospp;
13820 13828                  nfs4_lock_owner_t       *lop = *lopp;
13821 13829                  int                     intr;
13822 13830  
13823 13831                  /*
13824 13832                   * Blocking lock needs to sleep and retry from the request.
13825 13833                   *
13826 13834                   * Do not block and wait for 'resend' or 'reinstate'
13827 13835                   * lock requests, just return the error.
13828 13836                   *
13829 13837                   * Note: reclaim requests have cmd == F_SETLK, not F_SETLKW.
13830 13838                   */
13831 13839                  if (cmd == F_SETLKW) {
13832 13840                          rnode4_t *rp = VTOR4(vp);
13833 13841                          nfs_argop4 *argop = (*argspp)->array;
13834 13842  
13835 13843                          ASSERT(ctype == NFS4_LCK_CTYPE_NORM);
13836 13844  
13837 13845                          nfs4_end_fop(VTOMI4(vp), vp, NULL, op_hint,
13838 13846                              recov_statep, needrecov);
13839 13847                          *did_start_fop = FALSE;
13840 13848                          ASSERT((*argspp)->array_len == 2);
13841 13849                          if (argop[1].argop == OP_LOCK)
13842 13850                                  nfs4args_lock_free(&argop[1]);
13843 13851                          else if (argop[1].argop == OP_LOCKT)
13844 13852                                  nfs4args_lockt_free(&argop[1]);
13845 13853                          kmem_free(argop, 2 * sizeof (nfs_argop4));
13846 13854                          if (*respp)
13847 13855                                  (void) xdr_free(xdr_COMPOUND4res_clnt,
13848 13856                                      (caddr_t)*respp);
13849 13857                          *argspp = NULL;
13850 13858                          *respp = NULL;
13851 13859                          nfs4_end_lock_seqid_sync(lop);
13852 13860                          lock_owner_rele(lop);
13853 13861                          *lopp = NULL;
13854 13862                          if (osp != NULL) {
13855 13863                                  open_stream_rele(osp, rp);
13856 13864                                  *ospp = NULL;
13857 13865                          }
13858 13866                          if (oop != NULL) {
13859 13867                                  nfs4_end_open_seqid_sync(oop);
13860 13868                                  open_owner_rele(oop);
13861 13869                                  *oopp = NULL;
13862 13870                          }
13863 13871  
13864 13872                          nfs_rw_exit(&rp->r_lkserlock);
13865 13873  
13866 13874                          intr = nfs4_block_and_wait(tick_delayp, rp);
13867 13875  
13868 13876                          if (intr) {
13869 13877                                  (void) nfs_rw_enter_sig(&rp->r_lkserlock,
13870 13878                                      RW_WRITER, FALSE);
13871 13879                                  *errorp = EINTR;
13872 13880                                  return (FALSE);
13873 13881                          }
13874 13882  
13875 13883                          (void) nfs_rw_enter_sig(&rp->r_lkserlock,
13876 13884                              RW_WRITER, FALSE);
13877 13885  
13878 13886                          /*
13879 13887                           * Make sure we are still safe to lock with
13880 13888                           * regards to mmapping.
13881 13889                           */
13882 13890                          if (!nfs4_safelock(vp, flk, cr)) {
13883 13891                                  *errorp = EAGAIN;
13884 13892                                  return (FALSE);
13885 13893                          }
13886 13894  
13887 13895                          return (TRUE);
13888 13896                  }
13889 13897                  if (ctype == NFS4_LCK_CTYPE_NORM)
13890 13898                          *errorp = EAGAIN;
13891 13899                  *skip_get_err = TRUE;
13892 13900                  flk->l_whence = 0;
13893 13901                  *whencep = 0;
13894 13902                  return (FALSE);
13895 13903          } else if (lockt_args) {
13896 13904                  NFS4_DEBUG(nfs4_client_lock_debug, (CE_NOTE,
13897 13905                      "nfs4frlock_results_denied: OP_LOCKT DENIED"));
13898 13906  
13899 13907                  denied_to_flk(&resop->nfs_resop4_u.oplockt.denied,
13900 13908                      flk, lockt_args);
13901 13909  
13902 13910                  /* according to NLM code */
13903 13911                  *errorp = 0;
13904 13912                  *whencep = 0;
13905 13913                  *skip_get_err = TRUE;
13906 13914                  return (FALSE);
13907 13915          }
13908 13916          return (FALSE);
13909 13917  }
13910 13918  
13911 13919  /*
13912 13920   * Handles all NFS4 errors besides NFS4_OK and NFS4ERR_DENIED for nfs4frlock.
13913 13921   */
13914 13922  static void
13915 13923  nfs4frlock_results_default(COMPOUND4res_clnt *resp, int *errorp)
13916 13924  {
13917 13925          switch (resp->status) {
13918 13926          case NFS4ERR_ACCESS:
13919 13927          case NFS4ERR_ADMIN_REVOKED:
13920 13928          case NFS4ERR_BADHANDLE:
13921 13929          case NFS4ERR_BAD_RANGE:
13922 13930          case NFS4ERR_BAD_SEQID:
13923 13931          case NFS4ERR_BAD_STATEID:
13924 13932          case NFS4ERR_BADXDR:
13925 13933          case NFS4ERR_DEADLOCK:
13926 13934          case NFS4ERR_DELAY:
13927 13935          case NFS4ERR_EXPIRED:
13928 13936          case NFS4ERR_FHEXPIRED:
13929 13937          case NFS4ERR_GRACE:
13930 13938          case NFS4ERR_INVAL:
13931 13939          case NFS4ERR_ISDIR:
13932 13940          case NFS4ERR_LEASE_MOVED:
13933 13941          case NFS4ERR_LOCK_NOTSUPP:
13934 13942          case NFS4ERR_LOCK_RANGE:
13935 13943          case NFS4ERR_MOVED:
13936 13944          case NFS4ERR_NOFILEHANDLE:
13937 13945          case NFS4ERR_NO_GRACE:
13938 13946          case NFS4ERR_OLD_STATEID:
13939 13947          case NFS4ERR_OPENMODE:
13940 13948          case NFS4ERR_RECLAIM_BAD:
13941 13949          case NFS4ERR_RECLAIM_CONFLICT:
13942 13950          case NFS4ERR_RESOURCE:
13943 13951          case NFS4ERR_SERVERFAULT:
13944 13952          case NFS4ERR_STALE:
13945 13953          case NFS4ERR_STALE_CLIENTID:
13946 13954          case NFS4ERR_STALE_STATEID:
13947 13955                  return;
13948 13956          default:
13949 13957                  NFS4_DEBUG(nfs4_client_lock_debug, (CE_NOTE,
13950 13958                      "nfs4frlock_results_default: got unrecognizable "
13951 13959                      "res.status %d", resp->status));
13952 13960                  *errorp = NFS4ERR_INVAL;
13953 13961          }
13954 13962  }
13955 13963  
13956 13964  /*
13957 13965   * The lock request was successful, so update the client's state.
13958 13966   */
13959 13967  static void
13960 13968  nfs4frlock_update_state(LOCK4args *lock_args, LOCKU4args *locku_args,
13961 13969      LOCKT4args *lockt_args, nfs_resop4 *resop, nfs4_lock_owner_t *lop,
13962 13970      vnode_t *vp, flock64_t *flk, cred_t *cr,
13963 13971      nfs4_lost_rqst_t *resend_rqstp)
13964 13972  {
13965 13973          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
13966 13974  
13967 13975          if (lock_args) {
13968 13976                  LOCK4res *lock_res;
13969 13977  
13970 13978                  lock_res = &resop->nfs_resop4_u.oplock;
13971 13979                  /* update the stateid with server's response */
13972 13980  
13973 13981                  if (lock_args->locker.new_lock_owner == TRUE) {
13974 13982                          mutex_enter(&lop->lo_lock);
13975 13983                          lop->lo_just_created = NFS4_PERM_CREATED;
13976 13984                          mutex_exit(&lop->lo_lock);
13977 13985                  }
13978 13986  
13979 13987                  nfs4_set_lock_stateid(lop, lock_res->LOCK4res_u.lock_stateid);
13980 13988  
13981 13989                  /*
13982 13990                   * If the lock was the result of a resending a lost
13983 13991                   * request, we've synched up the stateid and seqid
13984 13992                   * with the server, but now the server might be out of sync
13985 13993                   * with what the application thinks it has for locks.
13986 13994                   * Clean that up here.  It's unclear whether we should do
13987 13995                   * this even if the filesystem has been forcibly unmounted.
13988 13996                   * For most servers, it's probably wasted effort, but
13989 13997                   * RFC3530 lets servers require that unlocks exactly match
13990 13998                   * the locks that are held.
13991 13999                   */
13992 14000                  if (resend_rqstp != NULL &&
13993 14001                      resend_rqstp->lr_ctype != NFS4_LCK_CTYPE_REINSTATE) {
13994 14002                          nfs4_reinstitute_local_lock_state(vp, flk, cr, lop);
13995 14003                  } else {
13996 14004                          flk->l_whence = 0;
13997 14005                  }
13998 14006          } else if (locku_args) {
13999 14007                  LOCKU4res *locku_res;
14000 14008  
14001 14009                  locku_res = &resop->nfs_resop4_u.oplocku;
14002 14010  
14003 14011                  /* Update the stateid with the server's response */
14004 14012                  nfs4_set_lock_stateid(lop, locku_res->lock_stateid);
14005 14013          } else if (lockt_args) {
14006 14014                  /* Switch the lock type to express success, see fcntl */
14007 14015                  flk->l_type = F_UNLCK;
14008 14016                  flk->l_whence = 0;
14009 14017          }
14010 14018  }
14011 14019  
14012 14020  /*
14013 14021   * Do final cleanup before exiting nfs4frlock.
14014 14022   * Calls nfs4_end_fop, drops the seqid syncs, and frees up the
14015 14023   * COMPOUND4 args/res for calls that haven't already.
14016 14024   */
14017 14025  static void
14018 14026  nfs4frlock_final_cleanup(nfs4_lock_call_type_t ctype, COMPOUND4args_clnt *argsp,
14019 14027      COMPOUND4res_clnt *resp, vnode_t *vp, nfs4_op_hint_t op_hint,
14020 14028      nfs4_recov_state_t *recov_statep, int needrecov, nfs4_open_owner_t *oop,
14021 14029      nfs4_open_stream_t *osp, nfs4_lock_owner_t *lop, flock64_t *flk,
14022 14030      short whence, u_offset_t offset, struct lm_sysid *ls,
14023 14031      int *errorp, LOCK4args *lock_args, LOCKU4args *locku_args,
14024 14032      bool_t did_start_fop, bool_t skip_get_err,
14025 14033      cred_t *cred_otw, cred_t *cred)
14026 14034  {
14027 14035          mntinfo4_t      *mi = VTOMI4(vp);
14028 14036          rnode4_t        *rp = VTOR4(vp);
14029 14037          int             error = *errorp;
14030 14038          nfs_argop4      *argop;
14031 14039          int     do_flush_pages = 0;
14032 14040  
14033 14041          ASSERT(nfs_zone() == mi->mi_zone);
14034 14042          /*
14035 14043           * The client recovery code wants the raw status information,
14036 14044           * so don't map the NFS status code to an errno value for
14037 14045           * non-normal call types.
14038 14046           */
14039 14047          if (ctype == NFS4_LCK_CTYPE_NORM) {
14040 14048                  if (*errorp == 0 && resp != NULL && skip_get_err == FALSE)
14041 14049                          *errorp = geterrno4(resp->status);
14042 14050                  if (did_start_fop == TRUE)
14043 14051                          nfs4_end_fop(mi, vp, NULL, op_hint, recov_statep,
14044 14052                              needrecov);
14045 14053  
14046 14054                  /*
14047 14055                   * We've established a new lock on the server, so invalidate
14048 14056                   * the pages associated with the vnode to get the most up to
14049 14057                   * date pages from the server after acquiring the lock. We
14050 14058                   * want to be sure that the read operation gets the newest data.
14051 14059                   * N.B.
14052 14060                   * We used to do this in nfs4frlock_results_ok but that doesn't
14053 14061                   * work since VOP_PUTPAGE can call nfs4_commit which calls
14054 14062                   * nfs4_start_fop. We flush the pages below after calling
14055 14063                   * nfs4_end_fop above
14056 14064                   * The flush of the page cache must be done after
14057 14065                   * nfs4_end_open_seqid_sync() to avoid a 4-way hang.
14058 14066                   */
14059 14067                  if (!error && resp && resp->status == NFS4_OK)
14060 14068                          do_flush_pages = 1;
14061 14069          }
14062 14070          if (argsp) {
14063 14071                  ASSERT(argsp->array_len == 2);
14064 14072                  argop = argsp->array;
14065 14073                  if (argop[1].argop == OP_LOCK)
14066 14074                          nfs4args_lock_free(&argop[1]);
14067 14075                  else if (argop[1].argop == OP_LOCKT)
14068 14076                          nfs4args_lockt_free(&argop[1]);
14069 14077                  kmem_free(argop, 2 * sizeof (nfs_argop4));
14070 14078                  if (resp)
14071 14079                          (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)resp);
14072 14080          }
14073 14081  
14074 14082          /* free the reference on the lock owner */
14075 14083          if (lop != NULL) {
14076 14084                  nfs4_end_lock_seqid_sync(lop);
14077 14085                  lock_owner_rele(lop);
14078 14086          }
14079 14087  
14080 14088          /* need to free up the reference on osp for lock args */
14081 14089          if (osp != NULL)
14082 14090                  open_stream_rele(osp, rp);
14083 14091  
14084 14092          /* need to free up the reference on oop for lock args */
14085 14093          if (oop != NULL) {
14086 14094                  nfs4_end_open_seqid_sync(oop);
14087 14095                  open_owner_rele(oop);
14088 14096          }
14089 14097  
14090 14098          if (do_flush_pages)
14091 14099                  nfs4_flush_pages(vp, cred);
14092 14100  
14093 14101          (void) convoff(vp, flk, whence, offset);
14094 14102  
14095 14103          lm_rel_sysid(ls);
14096 14104  
14097 14105          /*
14098 14106           * Record debug information in the event we get EINVAL.
14099 14107           */
14100 14108          mutex_enter(&mi->mi_lock);
14101 14109          if (*errorp == EINVAL && (lock_args || locku_args) &&
14102 14110              (!(mi->mi_flags & MI4_POSIX_LOCK))) {
14103 14111                  if (!(mi->mi_flags & MI4_LOCK_DEBUG)) {
14104 14112                          zcmn_err(getzoneid(), CE_NOTE,
14105 14113                              "%s operation failed with "
14106 14114                              "EINVAL probably since the server, %s,"
14107 14115                              " doesn't support POSIX style locking",
14108 14116                              lock_args ? "LOCK" : "LOCKU",
14109 14117                              mi->mi_curr_serv->sv_hostname);
14110 14118                          mi->mi_flags |= MI4_LOCK_DEBUG;
14111 14119                  }
14112 14120          }
14113 14121          mutex_exit(&mi->mi_lock);
14114 14122  
14115 14123          if (cred_otw)
14116 14124                  crfree(cred_otw);
14117 14125  }
14118 14126  
14119 14127  /*
14120 14128   * This calls the server and the local locking code.
14121 14129   *
14122 14130   * Client locks are registerred locally by oring the sysid with
14123 14131   * LM_SYSID_CLIENT. The server registers locks locally using just the sysid.
14124 14132   * We need to distinguish between the two to avoid collision in case one
14125 14133   * machine is used as both client and server.
14126 14134   *
14127 14135   * Blocking lock requests will continually retry to acquire the lock
14128 14136   * forever.
14129 14137   *
14130 14138   * The ctype is defined as follows:
14131 14139   * NFS4_LCK_CTYPE_NORM: normal lock request.
14132 14140   *
14133 14141   * NFS4_LCK_CTYPE_RECLAIM:  bypass the usual calls for synchronizing with client
14134 14142   * recovery, get the pid from flk instead of curproc, and don't reregister
14135 14143   * the lock locally.
14136 14144   *
14137 14145   * NFS4_LCK_CTYPE_RESEND: same as NFS4_LCK_CTYPE_RECLAIM, with the addition
14138 14146   * that we will use the information passed in via resend_rqstp to setup the
14139 14147   * lock/locku request.  This resend is the exact same request as the 'lost
14140 14148   * lock', and is initiated by the recovery framework. A successful resend
14141 14149   * request can initiate one or more reinstate requests.
14142 14150   *
14143 14151   * NFS4_LCK_CTYPE_REINSTATE: same as NFS4_LCK_CTYPE_RESEND, except that it
14144 14152   * does not trigger additional reinstate requests.  This lock call type is
14145 14153   * set for setting the v4 server's locking state back to match what the
14146 14154   * client's local locking state is in the event of a received 'lost lock'.
14147 14155   *
14148 14156   * Errors are returned via the nfs4_error_t parameter.
14149 14157   */
14150 14158  void
14151 14159  nfs4frlock(nfs4_lock_call_type_t ctype, vnode_t *vp, int cmd, flock64_t *flk,
14152 14160      int flag, u_offset_t offset, cred_t *cr, nfs4_error_t *ep,
14153 14161      nfs4_lost_rqst_t *resend_rqstp, int *did_reclaimp)
14154 14162  {
14155 14163          COMPOUND4args_clnt      args, *argsp = NULL;
14156 14164          COMPOUND4res_clnt       res, *resp = NULL;
14157 14165          nfs_argop4      *argop;
14158 14166          nfs_resop4      *resop;
14159 14167          rnode4_t        *rp;
14160 14168          int             doqueue = 1;
14161 14169          clock_t         tick_delay;  /* delay in clock ticks */
14162 14170          struct lm_sysid *ls;
14163 14171          LOCK4args       *lock_args = NULL;
14164 14172          LOCKU4args      *locku_args = NULL;
14165 14173          LOCKT4args      *lockt_args = NULL;
14166 14174          nfs4_open_owner_t *oop = NULL;
14167 14175          nfs4_open_stream_t *osp = NULL;
14168 14176          nfs4_lock_owner_t *lop = NULL;
14169 14177          bool_t          needrecov = FALSE;
14170 14178          nfs4_recov_state_t recov_state;
14171 14179          short           whence;
14172 14180          nfs4_op_hint_t  op_hint;
14173 14181          nfs4_lost_rqst_t lost_rqst;
14174 14182          bool_t          retry = FALSE;
14175 14183          bool_t          did_start_fop = FALSE;
14176 14184          bool_t          skip_get_err = FALSE;
14177 14185          cred_t          *cred_otw = NULL;
14178 14186          bool_t          recovonly;      /* just queue request */
14179 14187          int             frc_no_reclaim = 0;
14180 14188  #ifdef DEBUG
14181 14189          char *name;
14182 14190  #endif
14183 14191  
14184 14192          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
14185 14193  
14186 14194  #ifdef DEBUG
14187 14195          name = fn_name(VTOSV(vp)->sv_name);
14188 14196          NFS4_DEBUG(nfs4_client_lock_debug, (CE_NOTE, "nfs4frlock: "
14189 14197              "%s: cmd %d, type %d, offset %llu, start %"PRIx64", "
14190 14198              "length %"PRIu64", pid %d, sysid %d, call type %s, "
14191 14199              "resend request %s", name, cmd, flk->l_type, offset, flk->l_start,
14192 14200              flk->l_len, ctype == NFS4_LCK_CTYPE_NORM ? curproc->p_pid :
14193 14201              flk->l_pid, flk->l_sysid, nfs4frlock_get_call_type(ctype),
14194 14202              resend_rqstp ? "TRUE" : "FALSE"));
14195 14203          kmem_free(name, MAXNAMELEN);
14196 14204  #endif
14197 14205  
14198 14206          nfs4_error_zinit(ep);
14199 14207          ep->error = nfs4frlock_validate_args(cmd, flk, flag, vp, offset);
14200 14208          if (ep->error)
14201 14209                  return;
14202 14210          ep->error = nfs4frlock_get_sysid(&ls, vp, flk);
14203 14211          if (ep->error)
14204 14212                  return;
14205 14213          nfs4frlock_pre_setup(&tick_delay, &recov_state, flk, &whence,
14206 14214              vp, cr, &cred_otw);
14207 14215  
14208 14216  recov_retry:
14209 14217          nfs4frlock_call_init(&args, &argsp, &argop, &op_hint, flk, cmd,
14210 14218              &retry, &did_start_fop, &resp, &skip_get_err, &lost_rqst);
14211 14219          rp = VTOR4(vp);
14212 14220  
14213 14221          ep->error = nfs4frlock_start_call(ctype, vp, op_hint, &recov_state,
14214 14222              &did_start_fop, &recovonly);
14215 14223  
14216 14224          if (ep->error)
14217 14225                  goto out;
14218 14226  
14219 14227          if (recovonly) {
14220 14228                  /*
14221 14229                   * Leave the request for the recovery system to deal with.
14222 14230                   */
14223 14231                  ASSERT(ctype == NFS4_LCK_CTYPE_NORM);
14224 14232                  ASSERT(cmd != F_GETLK);
14225 14233                  ASSERT(flk->l_type == F_UNLCK);
14226 14234  
14227 14235                  nfs4_error_init(ep, EINTR);
14228 14236                  needrecov = TRUE;
14229 14237                  lop = find_lock_owner(rp, curproc->p_pid, LOWN_ANY);
14230 14238                  if (lop != NULL) {
14231 14239                          nfs4frlock_save_lost_rqst(ctype, ep->error, READ_LT,
14232 14240                              NULL, NULL, lop, flk, &lost_rqst, cr, vp);
14233 14241                          (void) nfs4_start_recovery(ep,
14234 14242                              VTOMI4(vp), vp, NULL, NULL,
14235 14243                              (lost_rqst.lr_op == OP_LOCK ||
14236 14244                              lost_rqst.lr_op == OP_LOCKU) ?
14237 14245                              &lost_rqst : NULL, OP_LOCKU, NULL, NULL, NULL);
14238 14246                          lock_owner_rele(lop);
14239 14247                          lop = NULL;
14240 14248                  }
14241 14249                  flk->l_pid = curproc->p_pid;
14242 14250                  nfs4_register_lock_locally(vp, flk, flag, offset);
14243 14251                  goto out;
14244 14252          }
14245 14253  
14246 14254          /* putfh directory fh */
14247 14255          argop[0].argop = OP_CPUTFH;
14248 14256          argop[0].nfs_argop4_u.opcputfh.sfh = rp->r_fh;
14249 14257  
14250 14258          /*
14251 14259           * Set up the over-the-wire arguments and get references to the
14252 14260           * open owner, etc.
14253 14261           */
14254 14262  
14255 14263          if (ctype == NFS4_LCK_CTYPE_RESEND ||
14256 14264              ctype == NFS4_LCK_CTYPE_REINSTATE) {
14257 14265                  nfs4frlock_setup_resend_lock_args(resend_rqstp, argsp,
14258 14266                      &argop[1], &lop, &oop, &osp, &lock_args, &locku_args);
14259 14267          } else {
14260 14268                  bool_t go_otw = TRUE;
14261 14269  
14262 14270                  ASSERT(resend_rqstp == NULL);
14263 14271  
14264 14272                  switch (cmd) {
14265 14273                  case F_GETLK:
14266 14274                  case F_O_GETLK:
14267 14275                          nfs4frlock_setup_lockt_args(ctype, &argop[1],
14268 14276                              &lockt_args, argsp, flk, rp);
14269 14277                          break;
14270 14278                  case F_SETLKW:
14271 14279                  case F_SETLK:
14272 14280                          if (flk->l_type == F_UNLCK)
14273 14281                                  nfs4frlock_setup_locku_args(ctype,
14274 14282                                      &argop[1], &locku_args, flk,
14275 14283                                      &lop, ep, argsp,
14276 14284                                      vp, flag, offset, cr,
14277 14285                                      &skip_get_err, &go_otw);
14278 14286                          else
14279 14287                                  nfs4frlock_setup_lock_args(ctype,
14280 14288                                      &lock_args, &oop, &osp, &lop, &argop[1],
14281 14289                                      argsp, flk, cmd, vp, cr, ep);
14282 14290  
14283 14291                          if (ep->error)
14284 14292                                  goto out;
14285 14293  
14286 14294                          switch (ep->stat) {
14287 14295                          case NFS4_OK:
14288 14296                                  break;
14289 14297                          case NFS4ERR_DELAY:
14290 14298                                  /* recov thread never gets this error */
14291 14299                                  ASSERT(resend_rqstp == NULL);
14292 14300                                  ASSERT(did_start_fop);
14293 14301  
14294 14302                                  nfs4_end_fop(VTOMI4(vp), vp, NULL, op_hint,
14295 14303                                      &recov_state, TRUE);
14296 14304                                  did_start_fop = FALSE;
14297 14305                                  if (argop[1].argop == OP_LOCK)
14298 14306                                          nfs4args_lock_free(&argop[1]);
14299 14307                                  else if (argop[1].argop == OP_LOCKT)
14300 14308                                          nfs4args_lockt_free(&argop[1]);
14301 14309                                  kmem_free(argop, 2 * sizeof (nfs_argop4));
14302 14310                                  argsp = NULL;
14303 14311                                  goto recov_retry;
14304 14312                          default:
14305 14313                                  ep->error = EIO;
14306 14314                                  goto out;
14307 14315                          }
14308 14316                          break;
14309 14317                  default:
14310 14318                          NFS4_DEBUG(nfs4_client_lock_debug, (CE_NOTE,
14311 14319                              "nfs4_frlock: invalid cmd %d", cmd));
14312 14320                          ep->error = EINVAL;
14313 14321                          goto out;
14314 14322                  }
14315 14323  
14316 14324                  if (!go_otw)
14317 14325                          goto out;
14318 14326          }
14319 14327  
14320 14328          /* XXX should we use the local reclock as a cache ? */
14321 14329          /*
14322 14330           * Unregister the lock with the local locking code before
14323 14331           * contacting the server.  This avoids a potential race where
14324 14332           * another process gets notified that it has been granted a lock
14325 14333           * before we can unregister ourselves locally.
14326 14334           */
14327 14335          if ((cmd == F_SETLK || cmd == F_SETLKW) && flk->l_type == F_UNLCK) {
14328 14336                  if (ctype == NFS4_LCK_CTYPE_NORM)
14329 14337                          flk->l_pid = ttoproc(curthread)->p_pid;
14330 14338                  nfs4_register_lock_locally(vp, flk, flag, offset);
14331 14339          }
14332 14340  
14333 14341          /*
14334 14342           * Send the server the lock request.  Continually loop with a delay
14335 14343           * if get error NFS4ERR_DENIED (for blocking locks) or NFS4ERR_GRACE.
14336 14344           */
14337 14345          resp = &res;
14338 14346  
14339 14347          NFS4_DEBUG((nfs4_client_call_debug || nfs4_client_lock_debug),
14340 14348              (CE_NOTE,
14341 14349              "nfs4frlock: %s call, rp %s", needrecov ? "recov" : "first",
14342 14350              rnode4info(rp)));
14343 14351  
14344 14352          if (lock_args && frc_no_reclaim) {
14345 14353                  ASSERT(ctype == NFS4_LCK_CTYPE_RECLAIM);
14346 14354                  NFS4_DEBUG(nfs4_client_lock_debug, (CE_NOTE,
14347 14355                      "nfs4frlock: frc_no_reclaim: clearing reclaim"));
14348 14356                  lock_args->reclaim = FALSE;
14349 14357                  if (did_reclaimp)
14350 14358                          *did_reclaimp = 0;
14351 14359          }
14352 14360  
14353 14361          /*
14354 14362           * Do the OTW call.
14355 14363           */
14356 14364          rfs4call(VTOMI4(vp), argsp, resp, cred_otw, &doqueue, 0, ep);
14357 14365  
14358 14366          NFS4_DEBUG(nfs4_client_lock_debug, (CE_NOTE,
14359 14367              "nfs4frlock: error %d, status %d", ep->error, resp->status));
14360 14368  
14361 14369          needrecov = nfs4_needs_recovery(ep, TRUE, vp->v_vfsp);
14362 14370          NFS4_DEBUG(nfs4_client_lock_debug, (CE_NOTE,
14363 14371              "nfs4frlock: needrecov %d", needrecov));
14364 14372  
14365 14373          if (ep->error == 0 && nfs4_need_to_bump_seqid(resp))
14366 14374                  nfs4frlock_bump_seqid(lock_args, locku_args, oop, lop,
14367 14375                      args.ctag);
14368 14376  
14369 14377          /*
14370 14378           * Check if one of these mutually exclusive error cases has
14371 14379           * happened:
14372 14380           *   need to swap credentials due to access error
14373 14381           *   recovery is needed
14374 14382           *   different error (only known case is missing Kerberos ticket)
14375 14383           */
14376 14384  
14377 14385          if ((ep->error == EACCES ||
14378 14386              (ep->error == 0 && resp->status == NFS4ERR_ACCESS)) &&
14379 14387              cred_otw != cr) {
14380 14388                  nfs4frlock_check_access(vp, op_hint, &recov_state, needrecov,
14381 14389                      &did_start_fop, &argsp, &resp, ep->error, &lop, &oop, &osp,
14382 14390                      cr, &cred_otw);
14383 14391                  goto recov_retry;
14384 14392          }
14385 14393  
14386 14394          if (needrecov) {
14387 14395                  /*
14388 14396                   * LOCKT requests don't need to recover from lost
14389 14397                   * requests since they don't create/modify state.
14390 14398                   */
14391 14399                  if ((ep->error == EINTR ||
14392 14400                      NFS4_FRC_UNMT_ERR(ep->error, vp->v_vfsp)) &&
14393 14401                      lockt_args)
14394 14402                          goto out;
14395 14403                  /*
14396 14404                   * Do not attempt recovery for requests initiated by
14397 14405                   * the recovery framework.  Let the framework redrive them.
14398 14406                   */
14399 14407                  if (ctype != NFS4_LCK_CTYPE_NORM)
14400 14408                          goto out;
14401 14409                  else {
14402 14410                          ASSERT(resend_rqstp == NULL);
14403 14411                  }
14404 14412  
14405 14413                  nfs4frlock_save_lost_rqst(ctype, ep->error,
14406 14414                      flk_to_locktype(cmd, flk->l_type),
14407 14415                      oop, osp, lop, flk, &lost_rqst, cred_otw, vp);
14408 14416  
14409 14417                  retry = nfs4frlock_recovery(needrecov, ep, &argsp,
14410 14418                      &resp, lock_args, locku_args, &oop, &osp, &lop,
14411 14419                      rp, vp, &recov_state, op_hint, &did_start_fop,
14412 14420                      cmd != F_GETLK ? &lost_rqst : NULL, flk);
14413 14421  
14414 14422                  if (retry) {
14415 14423                          ASSERT(oop == NULL);
14416 14424                          ASSERT(osp == NULL);
14417 14425                          ASSERT(lop == NULL);
14418 14426                          goto recov_retry;
14419 14427                  }
14420 14428                  goto out;
14421 14429          }
14422 14430  
14423 14431          /*
14424 14432           * Bail out if have reached this point with ep->error set. Can
14425 14433           * happen if (ep->error == EACCES && !needrecov && cred_otw == cr).
14426 14434           * This happens if Kerberos ticket has expired or has been
14427 14435           * destroyed.
14428 14436           */
14429 14437          if (ep->error != 0)
14430 14438                  goto out;
14431 14439  
14432 14440          /*
14433 14441           * Process the reply.
14434 14442           */
14435 14443          switch (resp->status) {
14436 14444          case NFS4_OK:
14437 14445                  resop = &resp->array[1];
14438 14446                  nfs4frlock_results_ok(ctype, cmd, flk, vp, flag, offset,
14439 14447                      resend_rqstp);
14440 14448                  /*
14441 14449                   * Have a successful lock operation, now update state.
14442 14450                   */
14443 14451                  nfs4frlock_update_state(lock_args, locku_args, lockt_args,
14444 14452                      resop, lop, vp, flk, cr, resend_rqstp);
14445 14453                  break;
14446 14454  
14447 14455          case NFS4ERR_DENIED:
14448 14456                  resop = &resp->array[1];
14449 14457                  retry = nfs4frlock_results_denied(ctype, lock_args, lockt_args,
14450 14458                      &oop, &osp, &lop, cmd, vp, flk, op_hint,
14451 14459                      &recov_state, needrecov, &argsp, &resp,
14452 14460                      &tick_delay, &whence, &ep->error, resop, cr,
14453 14461                      &did_start_fop, &skip_get_err);
14454 14462  
14455 14463                  if (retry) {
14456 14464                          ASSERT(oop == NULL);
14457 14465                          ASSERT(osp == NULL);
14458 14466                          ASSERT(lop == NULL);
14459 14467                          goto recov_retry;
14460 14468                  }
14461 14469                  break;
14462 14470          /*
14463 14471           * If the server won't let us reclaim, fall-back to trying to lock
14464 14472           * the file from scratch. Code elsewhere will check the changeinfo
14465 14473           * to ensure the file hasn't been changed.
14466 14474           */
14467 14475          case NFS4ERR_NO_GRACE:
14468 14476                  if (lock_args && lock_args->reclaim == TRUE) {
14469 14477                          ASSERT(ctype == NFS4_LCK_CTYPE_RECLAIM);
14470 14478                          NFS4_DEBUG(nfs4_client_lock_debug, (CE_NOTE,
14471 14479                              "nfs4frlock: reclaim: NFS4ERR_NO_GRACE"));
14472 14480                          frc_no_reclaim = 1;
14473 14481                          /* clean up before retrying */
14474 14482                          needrecov = 0;
14475 14483                          (void) nfs4frlock_recovery(needrecov, ep, &argsp, &resp,
14476 14484                              lock_args, locku_args, &oop, &osp, &lop, rp, vp,
14477 14485                              &recov_state, op_hint, &did_start_fop, NULL, flk);
14478 14486                          goto recov_retry;
14479 14487                  }
14480 14488                  /* FALLTHROUGH */
14481 14489  
14482 14490          default:
14483 14491                  nfs4frlock_results_default(resp, &ep->error);
14484 14492                  break;
14485 14493          }
14486 14494  out:
14487 14495          /*
14488 14496           * Process and cleanup from error.  Make interrupted unlock
14489 14497           * requests look successful, since they will be handled by the
14490 14498           * client recovery code.
14491 14499           */
14492 14500          nfs4frlock_final_cleanup(ctype, argsp, resp, vp, op_hint, &recov_state,
14493 14501              needrecov, oop, osp, lop, flk, whence, offset, ls, &ep->error,
14494 14502              lock_args, locku_args, did_start_fop,
14495 14503              skip_get_err, cred_otw, cr);
14496 14504  
14497 14505          if (ep->error == EINTR && flk->l_type == F_UNLCK &&
14498 14506              (cmd == F_SETLK || cmd == F_SETLKW))
14499 14507                  ep->error = 0;
14500 14508  }
14501 14509  
14502 14510  /*
14503 14511   * nfs4_safelock:
14504 14512   *
14505 14513   * Return non-zero if the given lock request can be handled without
14506 14514   * violating the constraints on concurrent mapping and locking.
14507 14515   */
14508 14516  
14509 14517  static int
14510 14518  nfs4_safelock(vnode_t *vp, const struct flock64 *bfp, cred_t *cr)
14511 14519  {
14512 14520          rnode4_t *rp = VTOR4(vp);
14513 14521          struct vattr va;
14514 14522          int error;
14515 14523  
14516 14524          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
14517 14525          ASSERT(rp->r_mapcnt >= 0);
14518 14526          NFS4_DEBUG(nfs4_client_lock_debug, (CE_NOTE, "nfs4_safelock %s: "
14519 14527              "(%"PRIx64", %"PRIx64"); mapcnt = %ld", bfp->l_type == F_WRLCK ?
14520 14528              "write" : bfp->l_type == F_RDLCK ? "read" : "unlock",
14521 14529              bfp->l_start, bfp->l_len, rp->r_mapcnt));
14522 14530  
14523 14531          if (rp->r_mapcnt == 0)
14524 14532                  return (1);             /* always safe if not mapped */
14525 14533  
14526 14534          /*
14527 14535           * If the file is already mapped and there are locks, then they
14528 14536           * should be all safe locks.  So adding or removing a lock is safe
14529 14537           * as long as the new request is safe (i.e., whole-file, meaning
14530 14538           * length and starting offset are both zero).
14531 14539           */
14532 14540  
14533 14541          if (bfp->l_start != 0 || bfp->l_len != 0) {
14534 14542                  NFS4_DEBUG(nfs4_client_lock_debug, (CE_NOTE, "nfs4_safelock: "
14535 14543                      "cannot lock a memory mapped file unless locking the "
14536 14544                      "entire file: start %"PRIx64", len %"PRIx64,
14537 14545                      bfp->l_start, bfp->l_len));
14538 14546                  return (0);
14539 14547          }
14540 14548  
14541 14549          /* mandatory locking and mapping don't mix */
14542 14550          va.va_mask = AT_MODE;
14543 14551          error = VOP_GETATTR(vp, &va, 0, cr, NULL);
14544 14552          if (error != 0) {
14545 14553                  NFS4_DEBUG(nfs4_client_lock_debug, (CE_NOTE, "nfs4_safelock: "
14546 14554                      "getattr error %d", error));
14547 14555                  return (0);             /* treat errors conservatively */
14548 14556          }
14549 14557          if (MANDLOCK(vp, va.va_mode)) {
14550 14558                  NFS4_DEBUG(nfs4_client_lock_debug, (CE_NOTE, "nfs4_safelock: "
14551 14559                      "cannot mandatory lock and mmap a file"));
14552 14560                  return (0);
14553 14561          }
14554 14562  
14555 14563          return (1);
14556 14564  }
14557 14565  
14558 14566  
14559 14567  /*
14560 14568   * Register the lock locally within Solaris.
14561 14569   * As the client, we "or" the sysid with LM_SYSID_CLIENT when
14562 14570   * recording locks locally.
14563 14571   *
14564 14572   * This should handle conflicts/cooperation with NFS v2/v3 since all locks
14565 14573   * are registered locally.
14566 14574   */
14567 14575  void
14568 14576  nfs4_register_lock_locally(vnode_t *vp, struct flock64 *flk, int flag,
14569 14577      u_offset_t offset)
14570 14578  {
14571 14579          int oldsysid;
14572 14580          int error;
14573 14581  #ifdef DEBUG
14574 14582          char *name;
14575 14583  #endif
14576 14584  
14577 14585          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
14578 14586  
14579 14587  #ifdef DEBUG
14580 14588          name = fn_name(VTOSV(vp)->sv_name);
14581 14589          NFS4_DEBUG(nfs4_client_lock_debug,
14582 14590              (CE_NOTE, "nfs4_register_lock_locally: %s: type %d, "
14583 14591              "start %"PRIx64", length %"PRIx64", pid %ld, sysid %d",
14584 14592              name, flk->l_type, flk->l_start, flk->l_len, (long)flk->l_pid,
14585 14593              flk->l_sysid));
14586 14594          kmem_free(name, MAXNAMELEN);
14587 14595  #endif
14588 14596  
14589 14597          /* register the lock with local locking */
14590 14598          oldsysid = flk->l_sysid;
14591 14599          flk->l_sysid |= LM_SYSID_CLIENT;
14592 14600          error = reclock(vp, flk, SETFLCK, flag, offset, NULL);
14593 14601  #ifdef DEBUG
14594 14602          if (error != 0) {
14595 14603                  NFS4_DEBUG(nfs4_client_lock_debug, (CE_NOTE,
14596 14604                      "nfs4_register_lock_locally: could not register with"
14597 14605                      " local locking"));
14598 14606                  NFS4_DEBUG(nfs4_client_lock_debug, (CE_CONT,
14599 14607                      "error %d, vp 0x%p, pid %d, sysid 0x%x",
14600 14608                      error, (void *)vp, flk->l_pid, flk->l_sysid));
14601 14609                  NFS4_DEBUG(nfs4_client_lock_debug, (CE_CONT,
14602 14610                      "type %d off 0x%" PRIx64 " len 0x%" PRIx64,
14603 14611                      flk->l_type, flk->l_start, flk->l_len));
14604 14612                  (void) reclock(vp, flk, 0, flag, offset, NULL);
14605 14613                  NFS4_DEBUG(nfs4_client_lock_debug, (CE_CONT,
14606 14614                      "blocked by pid %d sysid 0x%x type %d "
14607 14615                      "off 0x%" PRIx64 " len 0x%" PRIx64,
14608 14616                      flk->l_pid, flk->l_sysid, flk->l_type, flk->l_start,
14609 14617                      flk->l_len));
14610 14618          }
14611 14619  #endif
14612 14620          flk->l_sysid = oldsysid;
14613 14621  }
14614 14622  
14615 14623  /*
14616 14624   * nfs4_lockrelease:
14617 14625   *
14618 14626   * Release any locks on the given vnode that are held by the current
14619 14627   * process.  Also removes the lock owner (if one exists) from the rnode's
14620 14628   * list.
14621 14629   */
14622 14630  static int
14623 14631  nfs4_lockrelease(vnode_t *vp, int flag, offset_t offset, cred_t *cr)
14624 14632  {
14625 14633          flock64_t ld;
14626 14634          int ret, error;
14627 14635          rnode4_t *rp;
14628 14636          nfs4_lock_owner_t *lop;
14629 14637          nfs4_recov_state_t recov_state;
14630 14638          mntinfo4_t *mi;
14631 14639          bool_t possible_orphan = FALSE;
14632 14640          bool_t recovonly;
14633 14641  
14634 14642          ASSERT((uintptr_t)vp > KERNELBASE);
14635 14643          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
14636 14644  
14637 14645          rp = VTOR4(vp);
14638 14646          mi = VTOMI4(vp);
14639 14647  
14640 14648          /*
14641 14649           * If we have not locked anything then we can
14642 14650           * just return since we have no work to do.
14643 14651           */
14644 14652          if (rp->r_lo_head.lo_next_rnode == &rp->r_lo_head) {
14645 14653                  return (0);
14646 14654          }
14647 14655  
14648 14656          /*
14649 14657           * We need to comprehend that another thread may
14650 14658           * kick off recovery and the lock_owner we have stashed
14651 14659           * in lop might be invalid so we should NOT cache it
14652 14660           * locally!
14653 14661           */
14654 14662          recov_state.rs_flags = 0;
14655 14663          recov_state.rs_num_retry_despite_err = 0;
14656 14664          error = nfs4_start_fop(mi, vp, NULL, OH_LOCKU, &recov_state,
14657 14665              &recovonly);
14658 14666          if (error) {
14659 14667                  mutex_enter(&rp->r_statelock);
14660 14668                  rp->r_flags |= R4LODANGLERS;
14661 14669                  mutex_exit(&rp->r_statelock);
14662 14670                  return (error);
14663 14671          }
14664 14672  
14665 14673          lop = find_lock_owner(rp, curproc->p_pid, LOWN_ANY);
14666 14674  
14667 14675          /*
14668 14676           * Check if the lock owner might have a lock (request was sent but
14669 14677           * no response was received).  Also check if there are any remote
14670 14678           * locks on the file.  (In theory we shouldn't have to make this
14671 14679           * second check if there's no lock owner, but for now we'll be
14672 14680           * conservative and do it anyway.)  If either condition is true,
14673 14681           * send an unlock for the entire file to the server.
14674 14682           *
14675 14683           * Note that no explicit synchronization is needed here.  At worst,
14676 14684           * flk_has_remote_locks() will return a false positive, in which case
14677 14685           * the unlock call wastes time but doesn't harm correctness.
14678 14686           */
14679 14687  
14680 14688          if (lop) {
14681 14689                  mutex_enter(&lop->lo_lock);
14682 14690                  possible_orphan = lop->lo_pending_rqsts;
14683 14691                  mutex_exit(&lop->lo_lock);
14684 14692                  lock_owner_rele(lop);
14685 14693          }
14686 14694  
14687 14695          nfs4_end_fop(mi, vp, NULL, OH_LOCKU, &recov_state, 0);
14688 14696  
14689 14697          NFS4_DEBUG(nfs4_client_lock_debug, (CE_NOTE,
14690 14698              "nfs4_lockrelease: possible orphan %d, remote locks %d, for "
14691 14699              "lop %p.", possible_orphan, flk_has_remote_locks(vp),
14692 14700              (void *)lop));
14693 14701  
14694 14702          if (possible_orphan || flk_has_remote_locks(vp)) {
14695 14703                  ld.l_type = F_UNLCK;    /* set to unlock entire file */
14696 14704                  ld.l_whence = 0;        /* unlock from start of file */
14697 14705                  ld.l_start = 0;
14698 14706                  ld.l_len = 0;           /* do entire file */
14699 14707  
14700 14708                  ret = VOP_FRLOCK(vp, F_SETLK, &ld, flag, offset, NULL,
14701 14709                      cr, NULL);
14702 14710  
14703 14711                  if (ret != 0) {
14704 14712                          /*
14705 14713                           * If VOP_FRLOCK fails, make sure we unregister
14706 14714                           * local locks before we continue.
14707 14715                           */
14708 14716                          ld.l_pid = ttoproc(curthread)->p_pid;
14709 14717                          nfs4_register_lock_locally(vp, &ld, flag, offset);
14710 14718                          NFS4_DEBUG(nfs4_client_lock_debug, (CE_NOTE,
14711 14719                              "nfs4_lockrelease: lock release error on vp"
14712 14720                              " %p: error %d.\n", (void *)vp, ret));
14713 14721                  }
14714 14722          }
14715 14723  
14716 14724          recov_state.rs_flags = 0;
14717 14725          recov_state.rs_num_retry_despite_err = 0;
14718 14726          error = nfs4_start_fop(mi, vp, NULL, OH_LOCKU, &recov_state,
14719 14727              &recovonly);
14720 14728          if (error) {
14721 14729                  mutex_enter(&rp->r_statelock);
14722 14730                  rp->r_flags |= R4LODANGLERS;
14723 14731                  mutex_exit(&rp->r_statelock);
14724 14732                  return (error);
14725 14733          }
14726 14734  
14727 14735          /*
14728 14736           * So, here we're going to need to retrieve the lock-owner
14729 14737           * again (in case recovery has done a switch-a-roo) and
14730 14738           * remove it because we can.
14731 14739           */
14732 14740          lop = find_lock_owner(rp, curproc->p_pid, LOWN_ANY);
14733 14741  
14734 14742          if (lop) {
14735 14743                  nfs4_rnode_remove_lock_owner(rp, lop);
14736 14744                  lock_owner_rele(lop);
14737 14745          }
14738 14746  
14739 14747          nfs4_end_fop(mi, vp, NULL, OH_LOCKU, &recov_state, 0);
14740 14748          return (0);
14741 14749  }
14742 14750  
14743 14751  /*
14744 14752   * Wait for 'tick_delay' clock ticks.
14745 14753   * Implement exponential backoff until hit the lease_time of this nfs4_server.
14746 14754   * NOTE: lock_lease_time is in seconds.
14747 14755   *
14748 14756   * XXX For future improvements, should implement a waiting queue scheme.
14749 14757   */
14750 14758  static int
14751 14759  nfs4_block_and_wait(clock_t *tick_delay, rnode4_t *rp)
14752 14760  {
14753 14761          long milliseconds_delay;
14754 14762          time_t lock_lease_time;
14755 14763  
14756 14764          /* wait tick_delay clock ticks or siginteruptus */
14757 14765          if (delay_sig(*tick_delay)) {
14758 14766                  return (EINTR);
14759 14767          }
14760 14768          NFS4_DEBUG(nfs4_client_lock_debug, (CE_NOTE, "nfs4_block_and_wait: "
14761 14769              "reissue the lock request: blocked for %ld clock ticks: %ld "
14762 14770              "milliseconds", *tick_delay, drv_hztousec(*tick_delay) / 1000));
14763 14771  
14764 14772          /* get the lease time */
14765 14773          lock_lease_time = r2lease_time(rp);
14766 14774  
14767 14775          /* drv_hztousec converts ticks to microseconds */
14768 14776          milliseconds_delay = drv_hztousec(*tick_delay) / 1000;
14769 14777          if (milliseconds_delay < lock_lease_time * 1000) {
14770 14778                  *tick_delay = 2 * *tick_delay;
14771 14779                  if (drv_hztousec(*tick_delay) > lock_lease_time * 1000 * 1000)
14772 14780                          *tick_delay = drv_usectohz(lock_lease_time*1000*1000);
14773 14781          }
14774 14782          return (0);
14775 14783  }
14776 14784  
14777 14785  
14778 14786  void
14779 14787  nfs4_vnops_init(void)
14780 14788  {
14781 14789  }
14782 14790  
14783 14791  void
14784 14792  nfs4_vnops_fini(void)
14785 14793  {
14786 14794  }
14787 14795  
14788 14796  /*
14789 14797   * Return a reference to the directory (parent) vnode for a given vnode,
14790 14798   * using the saved pathname information and the directory file handle.  The
14791 14799   * caller is responsible for disposing of the reference.
14792 14800   * Returns zero or an errno value.
14793 14801   *
14794 14802   * Caller should set need_start_op to FALSE if it is the recovery
14795 14803   * thread, or if a start_fop has already been done.  Otherwise, TRUE.
14796 14804   */
14797 14805  int
14798 14806  vtodv(vnode_t *vp, vnode_t **dvpp, cred_t *cr, bool_t need_start_op)
14799 14807  {
14800 14808          svnode_t *svnp;
14801 14809          vnode_t *dvp = NULL;
14802 14810          servinfo4_t *svp;
14803 14811          nfs4_fname_t *mfname;
14804 14812          int error;
14805 14813  
14806 14814          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
14807 14815  
14808 14816          if (vp->v_flag & VROOT) {
14809 14817                  nfs4_sharedfh_t *sfh;
14810 14818                  nfs_fh4 fh;
14811 14819                  mntinfo4_t *mi;
14812 14820  
14813 14821                  ASSERT(vp->v_type == VREG);
14814 14822  
14815 14823                  mi = VTOMI4(vp);
14816 14824                  svp = mi->mi_curr_serv;
14817 14825                  (void) nfs_rw_enter_sig(&svp->sv_lock, RW_READER, 0);
14818 14826                  fh.nfs_fh4_len = svp->sv_pfhandle.fh_len;
14819 14827                  fh.nfs_fh4_val = svp->sv_pfhandle.fh_buf;
14820 14828                  sfh = sfh4_get(&fh, VTOMI4(vp));
14821 14829                  nfs_rw_exit(&svp->sv_lock);
14822 14830                  mfname = mi->mi_fname;
14823 14831                  fn_hold(mfname);
14824 14832                  dvp = makenfs4node_by_fh(sfh, NULL, &mfname, NULL, mi, cr, 0);
14825 14833                  sfh4_rele(&sfh);
14826 14834  
14827 14835                  if (dvp->v_type == VNON)
14828 14836                          dvp->v_type = VDIR;
14829 14837                  *dvpp = dvp;
14830 14838                  return (0);
14831 14839          }
14832 14840  
14833 14841          svnp = VTOSV(vp);
14834 14842  
14835 14843          if (svnp == NULL) {
14836 14844                  NFS4_DEBUG(nfs4_client_shadow_debug, (CE_NOTE, "vtodv: "
14837 14845                      "shadow node is NULL"));
14838 14846                  return (EINVAL);
14839 14847          }
14840 14848  
14841 14849          if (svnp->sv_name == NULL || svnp->sv_dfh == NULL) {
14842 14850                  NFS4_DEBUG(nfs4_client_shadow_debug, (CE_NOTE, "vtodv: "
14843 14851                      "shadow node name or dfh val == NULL"));
14844 14852                  return (EINVAL);
14845 14853          }
14846 14854  
14847 14855          error = nfs4_make_dotdot(svnp->sv_dfh, 0, vp, cr, &dvp,
14848 14856              (int)need_start_op);
14849 14857          if (error != 0) {
14850 14858                  NFS4_DEBUG(nfs4_client_shadow_debug, (CE_NOTE, "vtodv: "
14851 14859                      "nfs4_make_dotdot returned %d", error));
14852 14860                  return (error);
14853 14861          }
14854 14862          if (!dvp) {
14855 14863                  NFS4_DEBUG(nfs4_client_shadow_debug, (CE_NOTE, "vtodv: "
14856 14864                      "nfs4_make_dotdot returned a NULL dvp"));
14857 14865                  return (EIO);
14858 14866          }
14859 14867          if (dvp->v_type == VNON)
14860 14868                  dvp->v_type = VDIR;
14861 14869          ASSERT(dvp->v_type == VDIR);
14862 14870          if (VTOR4(vp)->r_flags & R4ISXATTR) {
14863 14871                  mutex_enter(&dvp->v_lock);
14864 14872                  dvp->v_flag |= V_XATTRDIR;
14865 14873                  mutex_exit(&dvp->v_lock);
14866 14874          }
14867 14875          *dvpp = dvp;
14868 14876          return (0);
14869 14877  }
14870 14878  
14871 14879  /*
14872 14880   * Copy the (final) component name of vp to fnamep.  maxlen is the maximum
14873 14881   * length that fnamep can accept, including the trailing null.
14874 14882   * Returns 0 if okay, returns an errno value if there was a problem.
14875 14883   */
14876 14884  
14877 14885  int
14878 14886  vtoname(vnode_t *vp, char *fnamep, ssize_t maxlen)
14879 14887  {
14880 14888          char *fn;
14881 14889          int err = 0;
14882 14890          servinfo4_t *svp;
14883 14891          svnode_t *shvp;
14884 14892  
14885 14893          /*
14886 14894           * If the file being opened has VROOT set, then this is
14887 14895           * a "file" mount.  sv_name will not be interesting, so
14888 14896           * go back to the servinfo4 to get the original mount
14889 14897           * path and strip off all but the final edge.  Otherwise
14890 14898           * just return the name from the shadow vnode.
14891 14899           */
14892 14900  
14893 14901          if (vp->v_flag & VROOT) {
14894 14902  
14895 14903                  svp = VTOMI4(vp)->mi_curr_serv;
14896 14904                  (void) nfs_rw_enter_sig(&svp->sv_lock, RW_READER, 0);
14897 14905  
14898 14906                  fn = strrchr(svp->sv_path, '/');
14899 14907                  if (fn == NULL)
14900 14908                          err = EINVAL;
14901 14909                  else
14902 14910                          fn++;
14903 14911          } else {
14904 14912                  shvp = VTOSV(vp);
14905 14913                  fn = fn_name(shvp->sv_name);
14906 14914          }
14907 14915  
14908 14916          if (err == 0)
14909 14917                  if (strlen(fn) < maxlen)
14910 14918                          (void) strcpy(fnamep, fn);
14911 14919                  else
14912 14920                          err = ENAMETOOLONG;
14913 14921  
14914 14922          if (vp->v_flag & VROOT)
14915 14923                  nfs_rw_exit(&svp->sv_lock);
14916 14924          else
14917 14925                  kmem_free(fn, MAXNAMELEN);
14918 14926  
14919 14927          return (err);
14920 14928  }
14921 14929  
14922 14930  /*
14923 14931   * Bookkeeping for a close that doesn't need to go over the wire.
14924 14932   * *have_lockp is set to 0 if 'os_sync_lock' is released; otherwise
14925 14933   * it is left at 1.
14926 14934   */
14927 14935  void
14928 14936  nfs4close_notw(vnode_t *vp, nfs4_open_stream_t *osp, int *have_lockp)
14929 14937  {
14930 14938          rnode4_t                *rp;
14931 14939          mntinfo4_t              *mi;
14932 14940  
14933 14941          mi = VTOMI4(vp);
14934 14942          rp = VTOR4(vp);
14935 14943  
14936 14944          NFS4_DEBUG(nfs4close_notw_debug, (CE_NOTE, "nfs4close_notw: "
14937 14945              "rp=%p osp=%p", (void *)rp, (void *)osp));
14938 14946          ASSERT(nfs_zone() == mi->mi_zone);
14939 14947          ASSERT(mutex_owned(&osp->os_sync_lock));
14940 14948          ASSERT(*have_lockp);
14941 14949  
14942 14950          if (!osp->os_valid ||
14943 14951              osp->os_open_ref_count > 0 || osp->os_mapcnt > 0) {
14944 14952                  return;
14945 14953          }
14946 14954  
14947 14955          /*
14948 14956           * This removes the reference obtained at OPEN; ie,
14949 14957           * when the open stream structure was created.
14950 14958           *
14951 14959           * We don't have to worry about calling 'open_stream_rele'
14952 14960           * since we our currently holding a reference to this
14953 14961           * open stream which means the count can not go to 0 with
14954 14962           * this decrement.
14955 14963           */
14956 14964          ASSERT(osp->os_ref_count >= 2);
14957 14965          osp->os_ref_count--;
14958 14966          osp->os_valid = 0;
14959 14967          mutex_exit(&osp->os_sync_lock);
14960 14968          *have_lockp = 0;
14961 14969  
14962 14970          nfs4_dec_state_ref_count(mi);
14963 14971  }
14964 14972  
14965 14973  /*
14966 14974   * Close all remaining open streams on the rnode.  These open streams
14967 14975   * could be here because:
14968 14976   * - The close attempted at either close or delmap failed
14969 14977   * - Some kernel entity did VOP_OPEN but never did VOP_CLOSE
14970 14978   * - Someone did mknod on a regular file but never opened it
14971 14979   */
14972 14980  int
14973 14981  nfs4close_all(vnode_t *vp, cred_t *cr)
14974 14982  {
14975 14983          nfs4_open_stream_t *osp;
14976 14984          int error;
14977 14985          nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS };
14978 14986          rnode4_t *rp;
14979 14987  
14980 14988          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
14981 14989  
14982 14990          error = 0;
14983 14991          rp = VTOR4(vp);
14984 14992  
14985 14993          /*
14986 14994           * At this point, all we know is that the last time
14987 14995           * someone called vn_rele, the count was 1.  Since then,
14988 14996           * the vnode could have been re-activated.  We want to
14989 14997           * loop through the open streams and close each one, but
14990 14998           * we have to be careful since once we release the rnode
14991 14999           * hash bucket lock, someone else is free to come in and
14992 15000           * re-activate the rnode and add new open streams.  The
14993 15001           * strategy is take the rnode hash bucket lock, verify that
14994 15002           * the count is still 1, grab the open stream off the
14995 15003           * head of the list and mark it invalid, then release the
14996 15004           * rnode hash bucket lock and proceed with that open stream.
14997 15005           * This is ok because nfs4close_one() will acquire the proper
14998 15006           * open/create to close/destroy synchronization for open
14999 15007           * streams, and will ensure that if someone has reopened
15000 15008           * the open stream after we've dropped the hash bucket lock
15001 15009           * then we'll just simply return without destroying the
15002 15010           * open stream.
15003 15011           * Repeat until the list is empty.
15004 15012           */
15005 15013  
15006 15014          for (;;) {
15007 15015  
15008 15016                  /* make sure vnode hasn't been reactivated */
15009 15017                  rw_enter(&rp->r_hashq->r_lock, RW_READER);
15010 15018                  mutex_enter(&vp->v_lock);
15011 15019                  if (vp->v_count > 1) {
15012 15020                          mutex_exit(&vp->v_lock);
15013 15021                          rw_exit(&rp->r_hashq->r_lock);
15014 15022                          break;
15015 15023                  }
15016 15024                  /*
15017 15025                   * Grabbing r_os_lock before releasing v_lock prevents
15018 15026                   * a window where the rnode/open stream could get
15019 15027                   * reactivated (and os_force_close set to 0) before we
15020 15028                   * had a chance to set os_force_close to 1.
15021 15029                   */
15022 15030                  mutex_enter(&rp->r_os_lock);
15023 15031                  mutex_exit(&vp->v_lock);
15024 15032  
15025 15033                  osp = list_head(&rp->r_open_streams);
15026 15034                  if (!osp) {
15027 15035                          /* nothing left to CLOSE OTW, so return */
15028 15036                          mutex_exit(&rp->r_os_lock);
15029 15037                          rw_exit(&rp->r_hashq->r_lock);
15030 15038                          break;
15031 15039                  }
15032 15040  
15033 15041                  mutex_enter(&rp->r_statev4_lock);
15034 15042                  /* the file can't still be mem mapped */
15035 15043                  ASSERT(rp->r_mapcnt == 0);
15036 15044                  if (rp->created_v4)
15037 15045                          rp->created_v4 = 0;
15038 15046                  mutex_exit(&rp->r_statev4_lock);
15039 15047  
15040 15048                  /*
15041 15049                   * Grab a ref on this open stream; nfs4close_one
15042 15050                   * will mark it as invalid
15043 15051                   */
15044 15052                  mutex_enter(&osp->os_sync_lock);
15045 15053                  osp->os_ref_count++;
15046 15054                  osp->os_force_close = 1;
15047 15055                  mutex_exit(&osp->os_sync_lock);
15048 15056                  mutex_exit(&rp->r_os_lock);
15049 15057                  rw_exit(&rp->r_hashq->r_lock);
15050 15058  
15051 15059                  nfs4close_one(vp, osp, cr, 0, NULL, &e, CLOSE_FORCE, 0, 0, 0);
15052 15060  
15053 15061                  /* Update error if it isn't already non-zero */
15054 15062                  if (error == 0) {
15055 15063                          if (e.error)
15056 15064                                  error = e.error;
15057 15065                          else if (e.stat)
15058 15066                                  error = geterrno4(e.stat);
15059 15067                  }
15060 15068  
15061 15069  #ifdef  DEBUG
15062 15070                  nfs4close_all_cnt++;
15063 15071  #endif
15064 15072                  /* Release the ref on osp acquired above. */
15065 15073                  open_stream_rele(osp, rp);
15066 15074  
15067 15075                  /* Proceed to the next open stream, if any */
15068 15076          }
15069 15077          return (error);
15070 15078  }
15071 15079  
15072 15080  /*
15073 15081   * nfs4close_one - close one open stream for a file if needed.
15074 15082   *
15075 15083   * "close_type" indicates which close path this is:
15076 15084   * CLOSE_NORM: close initiated via VOP_CLOSE.
15077 15085   * CLOSE_DELMAP: close initiated via VOP_DELMAP.
15078 15086   * CLOSE_FORCE: close initiated via VOP_INACTIVE.  This path forces
15079 15087   *      the close and release of client state for this open stream
15080 15088   *      (unless someone else has the open stream open).
15081 15089   * CLOSE_RESEND: indicates the request is a replay of an earlier request
15082 15090   *      (e.g., due to abort because of a signal).
15083 15091   * CLOSE_AFTER_RESEND: close initiated to "undo" a successful resent OPEN.
15084 15092   *
15085 15093   * CLOSE_RESEND and CLOSE_AFTER_RESEND will not attempt to retry after client
15086 15094   * recovery.  Instead, the caller is expected to deal with retries.
15087 15095   *
15088 15096   * The caller can either pass in the osp ('provided_osp') or not.
15089 15097   *
15090 15098   * 'access_bits' represents the access we are closing/downgrading.
15091 15099   *
15092 15100   * 'len', 'prot', and 'mmap_flags' are used for CLOSE_DELMAP.  'len' is the
15093 15101   * number of bytes we are unmapping, 'maxprot' is the mmap protection, and
15094 15102   * 'mmap_flags' tells us the type of sharing (MAP_PRIVATE or MAP_SHARED).
15095 15103   *
15096 15104   * Errors are returned via the nfs4_error_t.
15097 15105   */
15098 15106  void
15099 15107  nfs4close_one(vnode_t *vp, nfs4_open_stream_t *provided_osp, cred_t *cr,
15100 15108      int access_bits, nfs4_lost_rqst_t *lrp, nfs4_error_t *ep,
15101 15109      nfs4_close_type_t close_type, size_t len, uint_t maxprot,
15102 15110      uint_t mmap_flags)
15103 15111  {
15104 15112          nfs4_open_owner_t *oop;
15105 15113          nfs4_open_stream_t *osp = NULL;
15106 15114          int retry = 0;
15107 15115          int num_retries = NFS4_NUM_RECOV_RETRIES;
15108 15116          rnode4_t *rp;
15109 15117          mntinfo4_t *mi;
15110 15118          nfs4_recov_state_t recov_state;
15111 15119          cred_t *cred_otw = NULL;
15112 15120          bool_t recovonly = FALSE;
15113 15121          int isrecov;
15114 15122          int force_close;
15115 15123          int close_failed = 0;
15116 15124          int did_dec_count = 0;
15117 15125          int did_start_op = 0;
15118 15126          int did_force_recovlock = 0;
15119 15127          int did_start_seqid_sync = 0;
15120 15128          int have_sync_lock = 0;
15121 15129  
15122 15130          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
15123 15131  
15124 15132          NFS4_DEBUG(nfs4close_one_debug, (CE_NOTE, "closing vp %p osp %p, "
15125 15133              "lrp %p, close type %d len %ld prot %x mmap flags %x bits %x",
15126 15134              (void *)vp, (void *)provided_osp, (void *)lrp, close_type,
15127 15135              len, maxprot, mmap_flags, access_bits));
15128 15136  
15129 15137          nfs4_error_zinit(ep);
15130 15138          rp = VTOR4(vp);
15131 15139          mi = VTOMI4(vp);
15132 15140          isrecov = (close_type == CLOSE_RESEND ||
15133 15141              close_type == CLOSE_AFTER_RESEND);
15134 15142  
15135 15143          /*
15136 15144           * First get the open owner.
15137 15145           */
15138 15146          if (!provided_osp) {
15139 15147                  oop = find_open_owner(cr, NFS4_PERM_CREATED, mi);
15140 15148          } else {
15141 15149                  oop = provided_osp->os_open_owner;
15142 15150                  ASSERT(oop != NULL);
15143 15151                  open_owner_hold(oop);
15144 15152          }
15145 15153  
15146 15154          if (!oop) {
15147 15155                  NFS4_DEBUG(nfs4_client_recov_debug, (CE_NOTE,
15148 15156                      "nfs4close_one: no oop, rp %p, mi %p, cr %p, osp %p, "
15149 15157                      "close type %d", (void *)rp, (void *)mi, (void *)cr,
15150 15158                      (void *)provided_osp, close_type));
15151 15159                  ep->error = EIO;
15152 15160                  goto out;
15153 15161          }
15154 15162  
15155 15163          cred_otw = nfs4_get_otw_cred(cr, mi, oop);
15156 15164  recov_retry:
15157 15165          osp = NULL;
15158 15166          close_failed = 0;
15159 15167          force_close = (close_type == CLOSE_FORCE);
15160 15168          retry = 0;
15161 15169          did_start_op = 0;
15162 15170          did_force_recovlock = 0;
15163 15171          did_start_seqid_sync = 0;
15164 15172          have_sync_lock = 0;
15165 15173          recovonly = FALSE;
15166 15174          recov_state.rs_flags = 0;
15167 15175          recov_state.rs_num_retry_despite_err = 0;
15168 15176  
15169 15177          /*
15170 15178           * Second synchronize with recovery.
15171 15179           */
15172 15180          if (!isrecov) {
15173 15181                  ep->error = nfs4_start_fop(mi, vp, NULL, OH_CLOSE,
15174 15182                      &recov_state, &recovonly);
15175 15183                  if (!ep->error) {
15176 15184                          did_start_op = 1;
15177 15185                  } else {
15178 15186                          close_failed = 1;
15179 15187                          /*
15180 15188                           * If we couldn't get start_fop, but have to
15181 15189                           * cleanup state, then at least acquire the
15182 15190                           * mi_recovlock so we can synchronize with
15183 15191                           * recovery.
15184 15192                           */
15185 15193                          if (close_type == CLOSE_FORCE) {
15186 15194                                  (void) nfs_rw_enter_sig(&mi->mi_recovlock,
15187 15195                                      RW_READER, FALSE);
15188 15196                                  did_force_recovlock = 1;
15189 15197                          } else
15190 15198                                  goto out;
15191 15199                  }
15192 15200          }
15193 15201  
15194 15202          /*
15195 15203           * We cannot attempt to get the open seqid sync if nfs4_start_fop
15196 15204           * set 'recovonly' to TRUE since most likely this is due to
15197 15205           * reovery being active (MI4_RECOV_ACTIV).  If recovery is active,
15198 15206           * nfs4_start_open_seqid_sync() will fail with EAGAIN asking us
15199 15207           * to retry, causing us to loop until recovery finishes.  Plus we
15200 15208           * don't need protection over the open seqid since we're not going
15201 15209           * OTW, hence don't need to use the seqid.
15202 15210           */
15203 15211          if (recovonly == FALSE) {
15204 15212                  /* need to grab the open owner sync before 'os_sync_lock' */
15205 15213                  ep->error = nfs4_start_open_seqid_sync(oop, mi);
15206 15214                  if (ep->error == EAGAIN) {
15207 15215                          ASSERT(!isrecov);
15208 15216                          if (did_start_op)
15209 15217                                  nfs4_end_fop(mi, vp, NULL, OH_CLOSE,
15210 15218                                      &recov_state, TRUE);
15211 15219                          if (did_force_recovlock)
15212 15220                                  nfs_rw_exit(&mi->mi_recovlock);
15213 15221                          goto recov_retry;
15214 15222                  }
15215 15223                  did_start_seqid_sync = 1;
15216 15224          }
15217 15225  
15218 15226          /*
15219 15227           * Third get an open stream and acquire 'os_sync_lock' to
15220 15228           * sychronize the opening/creating of an open stream with the
15221 15229           * closing/destroying of an open stream.
15222 15230           */
15223 15231          if (!provided_osp) {
15224 15232                  /* returns with 'os_sync_lock' held */
15225 15233                  osp = find_open_stream(oop, rp);
15226 15234                  if (!osp) {
15227 15235                          ep->error = EIO;
15228 15236                          goto out;
15229 15237                  }
15230 15238          } else {
15231 15239                  osp = provided_osp;
15232 15240                  open_stream_hold(osp);
15233 15241                  mutex_enter(&osp->os_sync_lock);
15234 15242          }
15235 15243          have_sync_lock = 1;
15236 15244  
15237 15245          ASSERT(oop == osp->os_open_owner);
15238 15246  
15239 15247          /*
15240 15248           * Fourth, do any special pre-OTW CLOSE processing
15241 15249           * based on the specific close type.
15242 15250           */
15243 15251          if ((close_type == CLOSE_NORM || close_type == CLOSE_AFTER_RESEND) &&
15244 15252              !did_dec_count) {
15245 15253                  ASSERT(osp->os_open_ref_count > 0);
15246 15254                  osp->os_open_ref_count--;
15247 15255                  did_dec_count = 1;
15248 15256                  if (osp->os_open_ref_count == 0)
15249 15257                          osp->os_final_close = 1;
15250 15258          }
15251 15259  
15252 15260          if (close_type == CLOSE_FORCE) {
15253 15261                  /* see if somebody reopened the open stream. */
15254 15262                  if (!osp->os_force_close) {
15255 15263                          NFS4_DEBUG(nfs4close_one_debug, (CE_NOTE,
15256 15264                              "nfs4close_one: skip CLOSE_FORCE as osp %p "
15257 15265                              "was reopened, vp %p", (void *)osp, (void *)vp));
15258 15266                          ep->error = 0;
15259 15267                          ep->stat = NFS4_OK;
15260 15268                          goto out;
15261 15269                  }
15262 15270  
15263 15271                  if (!osp->os_final_close && !did_dec_count) {
15264 15272                          osp->os_open_ref_count--;
15265 15273                          did_dec_count = 1;
15266 15274                  }
15267 15275  
15268 15276                  /*
15269 15277                   * We can't depend on os_open_ref_count being 0 due to the
15270 15278                   * way executables are opened (VN_RELE to match a VOP_OPEN).
15271 15279                   */
15272 15280  #ifdef  NOTYET
15273 15281                  ASSERT(osp->os_open_ref_count == 0);
15274 15282  #endif
15275 15283                  if (osp->os_open_ref_count != 0) {
15276 15284                          NFS4_DEBUG(nfs4close_one_debug, (CE_NOTE,
15277 15285                              "nfs4close_one: should panic here on an "
15278 15286                              "ASSERT(osp->os_open_ref_count == 0). Ignoring "
15279 15287                              "since this is probably the exec problem."));
15280 15288  
15281 15289                          osp->os_open_ref_count = 0;
15282 15290                  }
15283 15291  
15284 15292                  /*
15285 15293                   * There is the possibility that nfs4close_one()
15286 15294                   * for close_type == CLOSE_DELMAP couldn't find the
15287 15295                   * open stream, thus couldn't decrement its os_mapcnt;
15288 15296                   * therefore we can't use this ASSERT yet.
15289 15297                   */
15290 15298  #ifdef  NOTYET
15291 15299                  ASSERT(osp->os_mapcnt == 0);
15292 15300  #endif
15293 15301                  osp->os_mapcnt = 0;
15294 15302          }
15295 15303  
15296 15304          if (close_type == CLOSE_DELMAP && !did_dec_count) {
15297 15305                  ASSERT(osp->os_mapcnt >= btopr(len));
15298 15306  
15299 15307                  if ((mmap_flags & MAP_SHARED) && (maxprot & PROT_WRITE))
15300 15308                          osp->os_mmap_write -= btopr(len);
15301 15309                  if (maxprot & PROT_READ)
15302 15310                          osp->os_mmap_read -= btopr(len);
15303 15311                  if (maxprot & PROT_EXEC)
15304 15312                          osp->os_mmap_read -= btopr(len);
15305 15313                  /* mirror the PROT_NONE check in nfs4_addmap() */
15306 15314                  if (!(maxprot & PROT_READ) && !(maxprot & PROT_WRITE) &&
15307 15315                      !(maxprot & PROT_EXEC))
15308 15316                          osp->os_mmap_read -= btopr(len);
15309 15317                  osp->os_mapcnt -= btopr(len);
15310 15318                  did_dec_count = 1;
15311 15319          }
15312 15320  
15313 15321          if (recovonly) {
15314 15322                  nfs4_lost_rqst_t lost_rqst;
15315 15323  
15316 15324                  /* request should not already be in recovery queue */
15317 15325                  ASSERT(lrp == NULL);
15318 15326                  nfs4_error_init(ep, EINTR);
15319 15327                  nfs4close_save_lost_rqst(ep->error, &lost_rqst, oop,
15320 15328                      osp, cred_otw, vp);
15321 15329                  mutex_exit(&osp->os_sync_lock);
15322 15330                  have_sync_lock = 0;
15323 15331                  (void) nfs4_start_recovery(ep, mi, vp, NULL, NULL,
15324 15332                      lost_rqst.lr_op == OP_CLOSE ?
15325 15333                      &lost_rqst : NULL, OP_CLOSE, NULL, NULL, NULL);
15326 15334                  close_failed = 1;
15327 15335                  force_close = 0;
15328 15336                  goto close_cleanup;
15329 15337          }
15330 15338  
15331 15339          /*
15332 15340           * If a previous OTW call got NFS4ERR_BAD_SEQID, then
15333 15341           * we stopped operating on the open owner's <old oo_name, old seqid>
15334 15342           * space, which means we stopped operating on the open stream
15335 15343           * too.  So don't go OTW (as the seqid is likely bad, and the
15336 15344           * stateid could be stale, potentially triggering a false
15337 15345           * setclientid), and just clean up the client's internal state.
15338 15346           */
15339 15347          if (osp->os_orig_oo_name != oop->oo_name) {
15340 15348                  NFS4_DEBUG(nfs4close_one_debug || nfs4_client_recov_debug,
15341 15349                      (CE_NOTE, "nfs4close_one: skip OTW close for osp %p "
15342 15350                      "oop %p due to bad seqid (orig oo_name %" PRIx64 " current "
15343 15351                      "oo_name %" PRIx64")",
15344 15352                      (void *)osp, (void *)oop, osp->os_orig_oo_name,
15345 15353                      oop->oo_name));
15346 15354                  close_failed = 1;
15347 15355          }
15348 15356  
15349 15357          /* If the file failed recovery, just quit. */
15350 15358          mutex_enter(&rp->r_statelock);
15351 15359          if (rp->r_flags & R4RECOVERR) {
15352 15360                  close_failed = 1;
15353 15361          }
15354 15362          mutex_exit(&rp->r_statelock);
15355 15363  
15356 15364          /*
15357 15365           * If the force close path failed to obtain start_fop
15358 15366           * then skip the OTW close and just remove the state.
15359 15367           */
15360 15368          if (close_failed)
15361 15369                  goto close_cleanup;
15362 15370  
15363 15371          /*
15364 15372           * Fifth, check to see if there are still mapped pages or other
15365 15373           * opens using this open stream.  If there are then we can't
15366 15374           * close yet but we can see if an OPEN_DOWNGRADE is necessary.
15367 15375           */
15368 15376          if (osp->os_open_ref_count > 0 || osp->os_mapcnt > 0) {
15369 15377                  nfs4_lost_rqst_t        new_lost_rqst;
15370 15378                  bool_t                  needrecov = FALSE;
15371 15379                  cred_t                  *odg_cred_otw = NULL;
15372 15380                  seqid4                  open_dg_seqid = 0;
15373 15381  
15374 15382                  if (osp->os_delegation) {
15375 15383                          /*
15376 15384                           * If this open stream was never OPENed OTW then we
15377 15385                           * surely can't DOWNGRADE it (especially since the
15378 15386                           * osp->open_stateid is really a delegation stateid
15379 15387                           * when os_delegation is 1).
15380 15388                           */
15381 15389                          if (access_bits & FREAD)
15382 15390                                  osp->os_share_acc_read--;
15383 15391                          if (access_bits & FWRITE)
15384 15392                                  osp->os_share_acc_write--;
15385 15393                          osp->os_share_deny_none--;
15386 15394                          nfs4_error_zinit(ep);
15387 15395                          goto out;
15388 15396                  }
15389 15397                  nfs4_open_downgrade(access_bits, 0, oop, osp, vp, cr,
15390 15398                      lrp, ep, &odg_cred_otw, &open_dg_seqid);
15391 15399                  needrecov = nfs4_needs_recovery(ep, TRUE, mi->mi_vfsp);
15392 15400                  if (needrecov && !isrecov) {
15393 15401                          bool_t abort;
15394 15402                          nfs4_bseqid_entry_t *bsep = NULL;
15395 15403  
15396 15404                          if (!ep->error && ep->stat == NFS4ERR_BAD_SEQID)
15397 15405                                  bsep = nfs4_create_bseqid_entry(oop, NULL,
15398 15406                                      vp, 0,
15399 15407                                      lrp ? TAG_OPEN_DG_LOST : TAG_OPEN_DG,
15400 15408                                      open_dg_seqid);
15401 15409  
15402 15410                          nfs4open_dg_save_lost_rqst(ep->error, &new_lost_rqst,
15403 15411                              oop, osp, odg_cred_otw, vp, access_bits, 0);
15404 15412                          mutex_exit(&osp->os_sync_lock);
15405 15413                          have_sync_lock = 0;
15406 15414                          abort = nfs4_start_recovery(ep, mi, vp, NULL, NULL,
15407 15415                              new_lost_rqst.lr_op == OP_OPEN_DOWNGRADE ?
15408 15416                              &new_lost_rqst : NULL, OP_OPEN_DOWNGRADE,
15409 15417                              bsep, NULL, NULL);
15410 15418                          if (odg_cred_otw)
15411 15419                                  crfree(odg_cred_otw);
15412 15420                          if (bsep)
15413 15421                                  kmem_free(bsep, sizeof (*bsep));
15414 15422  
15415 15423                          if (abort == TRUE)
15416 15424                                  goto out;
15417 15425  
15418 15426                          if (did_start_seqid_sync) {
15419 15427                                  nfs4_end_open_seqid_sync(oop);
15420 15428                                  did_start_seqid_sync = 0;
15421 15429                          }
15422 15430                          open_stream_rele(osp, rp);
15423 15431  
15424 15432                          if (did_start_op)
15425 15433                                  nfs4_end_fop(mi, vp, NULL, OH_CLOSE,
15426 15434                                      &recov_state, FALSE);
15427 15435                          if (did_force_recovlock)
15428 15436                                  nfs_rw_exit(&mi->mi_recovlock);
15429 15437  
15430 15438                          goto recov_retry;
15431 15439                  } else {
15432 15440                          if (odg_cred_otw)
15433 15441                                  crfree(odg_cred_otw);
15434 15442                  }
15435 15443                  goto out;
15436 15444          }
15437 15445  
15438 15446          /*
15439 15447           * If this open stream was created as the results of an open
15440 15448           * while holding a delegation, then just release it; no need
15441 15449           * to do an OTW close.  Otherwise do a "normal" OTW close.
15442 15450           */
15443 15451          if (osp->os_delegation) {
15444 15452                  nfs4close_notw(vp, osp, &have_sync_lock);
15445 15453                  nfs4_error_zinit(ep);
15446 15454                  goto out;
15447 15455          }
15448 15456  
15449 15457          /*
15450 15458           * If this stream is not valid, we're done.
15451 15459           */
15452 15460          if (!osp->os_valid) {
15453 15461                  nfs4_error_zinit(ep);
15454 15462                  goto out;
15455 15463          }
15456 15464  
15457 15465          /*
15458 15466           * Last open or mmap ref has vanished, need to do an OTW close.
15459 15467           * First check to see if a close is still necessary.
15460 15468           */
15461 15469          if (osp->os_failed_reopen) {
15462 15470                  NFS4_DEBUG(nfs4_client_recov_debug, (CE_NOTE,
15463 15471                      "don't close OTW osp %p since reopen failed.",
15464 15472                      (void *)osp));
15465 15473                  /*
15466 15474                   * Reopen of the open stream failed, hence the
15467 15475                   * stateid of the open stream is invalid/stale, and
15468 15476                   * sending this OTW would incorrectly cause another
15469 15477                   * round of recovery.  In this case, we need to set
15470 15478                   * the 'os_valid' bit to 0 so another thread doesn't
15471 15479                   * come in and re-open this open stream before
15472 15480                   * this "closing" thread cleans up state (decrementing
15473 15481                   * the nfs4_server_t's state_ref_count and decrementing
15474 15482                   * the os_ref_count).
15475 15483                   */
15476 15484                  osp->os_valid = 0;
15477 15485                  /*
15478 15486                   * This removes the reference obtained at OPEN; ie,
15479 15487                   * when the open stream structure was created.
15480 15488                   *
15481 15489                   * We don't have to worry about calling 'open_stream_rele'
15482 15490                   * since we our currently holding a reference to this
15483 15491                   * open stream which means the count can not go to 0 with
15484 15492                   * this decrement.
15485 15493                   */
15486 15494                  ASSERT(osp->os_ref_count >= 2);
15487 15495                  osp->os_ref_count--;
15488 15496                  nfs4_error_zinit(ep);
15489 15497                  close_failed = 0;
15490 15498                  goto close_cleanup;
15491 15499          }
15492 15500  
15493 15501          ASSERT(osp->os_ref_count > 1);
15494 15502  
15495 15503          /*
15496 15504           * Sixth, try the CLOSE OTW.
15497 15505           */
15498 15506          nfs4close_otw(rp, cred_otw, oop, osp, &retry, &did_start_seqid_sync,
15499 15507              close_type, ep, &have_sync_lock);
15500 15508  
15501 15509          if (ep->error == EINTR || NFS4_FRC_UNMT_ERR(ep->error, vp->v_vfsp)) {
15502 15510                  /*
15503 15511                   * Let the recovery thread be responsible for
15504 15512                   * removing the state for CLOSE.
15505 15513                   */
15506 15514                  close_failed = 1;
15507 15515                  force_close = 0;
15508 15516                  retry = 0;
15509 15517          }
15510 15518  
15511 15519          /* See if we need to retry with a different cred */
15512 15520          if ((ep->error == EACCES ||
15513 15521              (ep->error == 0 && ep->stat == NFS4ERR_ACCESS)) &&
15514 15522              cred_otw != cr) {
15515 15523                  crfree(cred_otw);
15516 15524                  cred_otw = cr;
15517 15525                  crhold(cred_otw);
15518 15526                  retry = 1;
15519 15527          }
15520 15528  
15521 15529          if (ep->error || ep->stat)
15522 15530                  close_failed = 1;
15523 15531  
15524 15532          if (retry && !isrecov && num_retries-- > 0) {
15525 15533                  if (have_sync_lock) {
15526 15534                          mutex_exit(&osp->os_sync_lock);
15527 15535                          have_sync_lock = 0;
15528 15536                  }
15529 15537                  if (did_start_seqid_sync) {
15530 15538                          nfs4_end_open_seqid_sync(oop);
15531 15539                          did_start_seqid_sync = 0;
15532 15540                  }
15533 15541                  open_stream_rele(osp, rp);
15534 15542  
15535 15543                  if (did_start_op)
15536 15544                          nfs4_end_fop(mi, vp, NULL, OH_CLOSE,
15537 15545                              &recov_state, FALSE);
15538 15546                  if (did_force_recovlock)
15539 15547                          nfs_rw_exit(&mi->mi_recovlock);
15540 15548                  NFS4_DEBUG(nfs4_client_recov_debug, (CE_NOTE,
15541 15549                      "nfs4close_one: need to retry the close "
15542 15550                      "operation"));
15543 15551                  goto recov_retry;
15544 15552          }
15545 15553  close_cleanup:
15546 15554          /*
15547 15555           * Seventh and lastly, process our results.
15548 15556           */
15549 15557          if (close_failed && force_close) {
15550 15558                  /*
15551 15559                   * It's ok to drop and regrab the 'os_sync_lock' since
15552 15560                   * nfs4close_notw() will recheck to make sure the
15553 15561                   * "close"/removal of state should happen.
15554 15562                   */
15555 15563                  if (!have_sync_lock) {
15556 15564                          mutex_enter(&osp->os_sync_lock);
15557 15565                          have_sync_lock = 1;
15558 15566                  }
15559 15567                  /*
15560 15568                   * This is last call, remove the ref on the open
15561 15569                   * stream created by open and clean everything up.
15562 15570                   */
15563 15571                  osp->os_pending_close = 0;
15564 15572                  nfs4close_notw(vp, osp, &have_sync_lock);
15565 15573                  nfs4_error_zinit(ep);
15566 15574          }
15567 15575  
15568 15576          if (!close_failed) {
15569 15577                  if (have_sync_lock) {
15570 15578                          osp->os_pending_close = 0;
15571 15579                          mutex_exit(&osp->os_sync_lock);
15572 15580                          have_sync_lock = 0;
15573 15581                  } else {
15574 15582                          mutex_enter(&osp->os_sync_lock);
15575 15583                          osp->os_pending_close = 0;
15576 15584                          mutex_exit(&osp->os_sync_lock);
15577 15585                  }
15578 15586                  if (did_start_op && recov_state.rs_sp != NULL) {
15579 15587                          mutex_enter(&recov_state.rs_sp->s_lock);
15580 15588                          nfs4_dec_state_ref_count_nolock(recov_state.rs_sp, mi);
15581 15589                          mutex_exit(&recov_state.rs_sp->s_lock);
15582 15590                  } else {
15583 15591                          nfs4_dec_state_ref_count(mi);
15584 15592                  }
15585 15593                  nfs4_error_zinit(ep);
15586 15594          }
15587 15595  
15588 15596  out:
15589 15597          if (have_sync_lock)
15590 15598                  mutex_exit(&osp->os_sync_lock);
15591 15599          if (did_start_op)
15592 15600                  nfs4_end_fop(mi, vp, NULL, OH_CLOSE, &recov_state,
15593 15601                      recovonly ? TRUE : FALSE);
15594 15602          if (did_force_recovlock)
15595 15603                  nfs_rw_exit(&mi->mi_recovlock);
15596 15604          if (cred_otw)
15597 15605                  crfree(cred_otw);
15598 15606          if (osp)
15599 15607                  open_stream_rele(osp, rp);
15600 15608          if (oop) {
15601 15609                  if (did_start_seqid_sync)
15602 15610                          nfs4_end_open_seqid_sync(oop);
15603 15611                  open_owner_rele(oop);
15604 15612          }
15605 15613  }
15606 15614  
15607 15615  /*
15608 15616   * Convert information returned by the server in the LOCK4denied
15609 15617   * structure to the form required by fcntl.
15610 15618   */
15611 15619  static void
15612 15620  denied_to_flk(LOCK4denied *lockt_denied, flock64_t *flk, LOCKT4args *lockt_args)
15613 15621  {
15614 15622          nfs4_lo_name_t *lo;
15615 15623  
15616 15624  #ifdef  DEBUG
15617 15625          if (denied_to_flk_debug) {
15618 15626                  lockt_denied_debug = lockt_denied;
15619 15627                  debug_enter("lockt_denied");
15620 15628          }
15621 15629  #endif
15622 15630  
15623 15631          flk->l_type = lockt_denied->locktype == READ_LT ? F_RDLCK : F_WRLCK;
15624 15632          flk->l_whence = 0;      /* aka SEEK_SET */
15625 15633          flk->l_start = lockt_denied->offset;
15626 15634          flk->l_len = lockt_denied->length;
15627 15635  
15628 15636          /*
15629 15637           * If the blocking clientid matches our client id, then we can
15630 15638           * interpret the lockowner (since we built it).  If not, then
15631 15639           * fabricate a sysid and pid.  Note that the l_sysid field
15632 15640           * in *flk already has the local sysid.
15633 15641           */
15634 15642  
15635 15643          if (lockt_denied->owner.clientid == lockt_args->owner.clientid) {
15636 15644  
15637 15645                  if (lockt_denied->owner.owner_len == sizeof (*lo)) {
15638 15646                          lo = (nfs4_lo_name_t *)
15639 15647                              lockt_denied->owner.owner_val;
15640 15648  
15641 15649                          flk->l_pid = lo->ln_pid;
15642 15650                  } else {
15643 15651                          NFS4_DEBUG(nfs4_client_lock_debug, (CE_NOTE,
15644 15652                              "denied_to_flk: bad lock owner length\n"));
15645 15653  
15646 15654                          flk->l_pid = lo_to_pid(&lockt_denied->owner);
15647 15655                  }
15648 15656          } else {
15649 15657                  NFS4_DEBUG(nfs4_client_lock_debug, (CE_NOTE,
15650 15658                  "denied_to_flk: foreign clientid\n"));
15651 15659  
15652 15660                  /*
15653 15661                   * Construct a new sysid which should be different from
15654 15662                   * sysids of other systems.
15655 15663                   */
15656 15664  
15657 15665                  flk->l_sysid++;
15658 15666                  flk->l_pid = lo_to_pid(&lockt_denied->owner);
15659 15667          }
15660 15668  }
15661 15669  
15662 15670  static pid_t
15663 15671  lo_to_pid(lock_owner4 *lop)
15664 15672  {
15665 15673          pid_t pid = 0;
15666 15674          uchar_t *cp;
15667 15675          int i;
15668 15676  
15669 15677          cp = (uchar_t *)&lop->clientid;
15670 15678  
15671 15679          for (i = 0; i < sizeof (lop->clientid); i++)
15672 15680                  pid += (pid_t)*cp++;
15673 15681  
15674 15682          cp = (uchar_t *)lop->owner_val;
15675 15683  
15676 15684          for (i = 0; i < lop->owner_len; i++)
15677 15685                  pid += (pid_t)*cp++;
15678 15686  
15679 15687          return (pid);
15680 15688  }
15681 15689  
15682 15690  /*
15683 15691   * Given a lock pointer, returns the length of that lock.
15684 15692   * "end" is the last locked offset the "l_len" covers from
15685 15693   * the start of the lock.
15686 15694   */
15687 15695  static off64_t
15688 15696  lock_to_end(flock64_t *lock)
15689 15697  {
15690 15698          off64_t lock_end;
15691 15699  
15692 15700          if (lock->l_len == 0)
15693 15701                  lock_end = (off64_t)MAXEND;
15694 15702          else
15695 15703                  lock_end = lock->l_start + lock->l_len - 1;
15696 15704  
15697 15705          return (lock_end);
15698 15706  }
15699 15707  
15700 15708  /*
15701 15709   * Given the end of a lock, it will return you the length "l_len" for that lock.
15702 15710   */
15703 15711  static off64_t
15704 15712  end_to_len(off64_t start, off64_t end)
15705 15713  {
15706 15714          off64_t lock_len;
15707 15715  
15708 15716          ASSERT(end >= start);
15709 15717          if (end == MAXEND)
15710 15718                  lock_len = 0;
15711 15719          else
15712 15720                  lock_len = end - start + 1;
15713 15721  
15714 15722          return (lock_len);
15715 15723  }
15716 15724  
15717 15725  /*
15718 15726   * On given end for a lock it determines if it is the last locked offset
15719 15727   * or not, if so keeps it as is, else adds one to return the length for
15720 15728   * valid start.
15721 15729   */
15722 15730  static off64_t
15723 15731  start_check(off64_t x)
15724 15732  {
15725 15733          if (x == MAXEND)
15726 15734                  return (x);
15727 15735          else
15728 15736                  return (x + 1);
15729 15737  }
15730 15738  
15731 15739  /*
15732 15740   * See if these two locks overlap, and if so return 1;
15733 15741   * otherwise, return 0.
15734 15742   */
15735 15743  static int
15736 15744  locks_intersect(flock64_t *llfp, flock64_t *curfp)
15737 15745  {
15738 15746          off64_t llfp_end, curfp_end;
15739 15747  
15740 15748          llfp_end = lock_to_end(llfp);
15741 15749          curfp_end = lock_to_end(curfp);
15742 15750  
15743 15751          if (((llfp_end >= curfp->l_start) &&
15744 15752              (llfp->l_start <= curfp->l_start)) ||
15745 15753              ((curfp->l_start <= llfp->l_start) && (curfp_end >= llfp->l_start)))
15746 15754                  return (1);
15747 15755          return (0);
15748 15756  }
15749 15757  
15750 15758  /*
15751 15759   * Determine what the intersecting lock region is, and add that to the
15752 15760   * 'nl_llpp' locklist in increasing order (by l_start).
15753 15761   */
15754 15762  static void
15755 15763  nfs4_add_lock_range(flock64_t *lost_flp, flock64_t *local_flp,
15756 15764      locklist_t **nl_llpp, vnode_t *vp)
15757 15765  {
15758 15766          locklist_t *intersect_llp, *tmp_fllp, *cur_fllp;
15759 15767          off64_t lost_flp_end, local_flp_end, len, start;
15760 15768  
15761 15769          NFS4_DEBUG(nfs4_lost_rqst_debug, (CE_NOTE, "nfs4_add_lock_range:"));
15762 15770  
15763 15771          if (!locks_intersect(lost_flp, local_flp))
15764 15772                  return;
15765 15773  
15766 15774          NFS4_DEBUG(nfs4_lost_rqst_debug, (CE_NOTE, "nfs4_add_lock_range: "
15767 15775              "locks intersect"));
15768 15776  
15769 15777          lost_flp_end = lock_to_end(lost_flp);
15770 15778          local_flp_end = lock_to_end(local_flp);
15771 15779  
15772 15780          /* Find the starting point of the intersecting region */
15773 15781          if (local_flp->l_start > lost_flp->l_start)
15774 15782                  start = local_flp->l_start;
15775 15783          else
15776 15784                  start = lost_flp->l_start;
15777 15785  
15778 15786          /* Find the lenght of the intersecting region */
15779 15787          if (lost_flp_end < local_flp_end)
15780 15788                  len = end_to_len(start, lost_flp_end);
15781 15789          else
15782 15790                  len = end_to_len(start, local_flp_end);
15783 15791  
15784 15792          /*
15785 15793           * Prepare the flock structure for the intersection found and insert
15786 15794           * it into the new list in increasing l_start order. This list contains
15787 15795           * intersections of locks registered by the client with the local host
15788 15796           * and the lost lock.
15789 15797           * The lock type of this lock is the same as that of the local_flp.
15790 15798           */
15791 15799          intersect_llp = (locklist_t *)kmem_alloc(sizeof (locklist_t), KM_SLEEP);
15792 15800          intersect_llp->ll_flock.l_start = start;
15793 15801          intersect_llp->ll_flock.l_len = len;
15794 15802          intersect_llp->ll_flock.l_type = local_flp->l_type;
15795 15803          intersect_llp->ll_flock.l_pid = local_flp->l_pid;
15796 15804          intersect_llp->ll_flock.l_sysid = local_flp->l_sysid;
15797 15805          intersect_llp->ll_flock.l_whence = 0;   /* aka SEEK_SET */
15798 15806          intersect_llp->ll_vp = vp;
15799 15807  
15800 15808          tmp_fllp = *nl_llpp;
15801 15809          cur_fllp = NULL;
15802 15810          while (tmp_fllp != NULL && tmp_fllp->ll_flock.l_start <
15803 15811              intersect_llp->ll_flock.l_start) {
15804 15812                          cur_fllp = tmp_fllp;
15805 15813                          tmp_fllp = tmp_fllp->ll_next;
15806 15814          }
15807 15815          if (cur_fllp == NULL) {
15808 15816                  /* first on the list */
15809 15817                  intersect_llp->ll_next = *nl_llpp;
15810 15818                  *nl_llpp = intersect_llp;
15811 15819          } else {
15812 15820                  intersect_llp->ll_next = cur_fllp->ll_next;
15813 15821                  cur_fllp->ll_next = intersect_llp;
15814 15822          }
15815 15823  
15816 15824          NFS4_DEBUG(nfs4_lost_rqst_debug, (CE_NOTE, "nfs4_add_lock_range: "
15817 15825              "created lock region: start %"PRIx64" end %"PRIx64" : %s\n",
15818 15826              intersect_llp->ll_flock.l_start,
15819 15827              intersect_llp->ll_flock.l_start + intersect_llp->ll_flock.l_len,
15820 15828              intersect_llp->ll_flock.l_type == F_RDLCK ? "READ" : "WRITE"));
15821 15829  }
15822 15830  
15823 15831  /*
15824 15832   * Our local locking current state is potentially different than
15825 15833   * what the NFSv4 server thinks we have due to a lost lock that was
15826 15834   * resent and then received.  We need to reset our "NFSv4" locking
15827 15835   * state to match the current local locking state for this pid since
15828 15836   * that is what the user/application sees as what the world is.
15829 15837   *
15830 15838   * We cannot afford to drop the open/lock seqid sync since then we can
15831 15839   * get confused about what the current local locking state "is" versus
15832 15840   * "was".
15833 15841   *
15834 15842   * If we are unable to fix up the locks, we send SIGLOST to the affected
15835 15843   * process.  This is not done if the filesystem has been forcibly
15836 15844   * unmounted, in case the process has already exited and a new process
15837 15845   * exists with the same pid.
15838 15846   */
15839 15847  static void
15840 15848  nfs4_reinstitute_local_lock_state(vnode_t *vp, flock64_t *lost_flp, cred_t *cr,
15841 15849      nfs4_lock_owner_t *lop)
15842 15850  {
15843 15851          locklist_t *locks, *llp, *ri_llp, *tmp_llp;
15844 15852          mntinfo4_t *mi = VTOMI4(vp);
15845 15853          const int cmd = F_SETLK;
15846 15854          off64_t cur_start, llp_ll_flock_end, lost_flp_end;
15847 15855          flock64_t ul_fl;
15848 15856  
15849 15857          NFS4_DEBUG(nfs4_lost_rqst_debug, (CE_NOTE,
15850 15858              "nfs4_reinstitute_local_lock_state"));
15851 15859  
15852 15860          /*
15853 15861           * Find active locks for this vp from the local locking code.
15854 15862           * Scan through this list and find out the locks that intersect with
15855 15863           * the lost lock. Once we find the lock that intersects, add the
15856 15864           * intersection area as a new lock to a new list "ri_llp". The lock
15857 15865           * type of the intersection region lock added to ri_llp is the same
15858 15866           * as that found in the active lock list, "list". The intersecting
15859 15867           * region locks are added to ri_llp in increasing l_start order.
15860 15868           */
15861 15869          ASSERT(nfs_zone() == mi->mi_zone);
15862 15870  
15863 15871          locks = flk_active_locks_for_vp(vp);
15864 15872          ri_llp = NULL;
15865 15873  
15866 15874          for (llp = locks; llp != NULL; llp = llp->ll_next) {
15867 15875                  ASSERT(llp->ll_vp == vp);
15868 15876                  /*
15869 15877                   * Pick locks that belong to this pid/lockowner
15870 15878                   */
15871 15879                  if (llp->ll_flock.l_pid != lost_flp->l_pid)
15872 15880                          continue;
15873 15881  
15874 15882                  nfs4_add_lock_range(lost_flp, &llp->ll_flock, &ri_llp, vp);
15875 15883          }
15876 15884  
15877 15885          /*
15878 15886           * Now we have the list of intersections with the lost lock. These are
15879 15887           * the locks that were/are active before the server replied to the
15880 15888           * last/lost lock. Issue these locks to the server here. Playing these
15881 15889           * locks to the server will re-establish aur current local locking state
15882 15890           * with the v4 server.
15883 15891           * If we get an error, send SIGLOST to the application for that lock.
15884 15892           */
15885 15893  
15886 15894          for (llp = ri_llp; llp != NULL; llp = llp->ll_next) {
15887 15895                  NFS4_DEBUG(nfs4_lost_rqst_debug, (CE_NOTE,
15888 15896                      "nfs4_reinstitute_local_lock_state: need to issue "
15889 15897                      "flock: [%"PRIx64" - %"PRIx64"] : %s",
15890 15898                      llp->ll_flock.l_start,
15891 15899                      llp->ll_flock.l_start + llp->ll_flock.l_len,
15892 15900                      llp->ll_flock.l_type == F_RDLCK ? "READ" :
15893 15901                      llp->ll_flock.l_type == F_WRLCK ? "WRITE" : "INVALID"));
15894 15902                  /*
15895 15903                   * No need to relock what we already have
15896 15904                   */
15897 15905                  if (llp->ll_flock.l_type == lost_flp->l_type)
15898 15906                          continue;
15899 15907  
15900 15908                  push_reinstate(vp, cmd, &llp->ll_flock, cr, lop);
15901 15909          }
15902 15910  
15903 15911          /*
15904 15912           * Now keeping the start of the lost lock as our reference parse the
15905 15913           * newly created ri_llp locklist to find the ranges that we have locked
15906 15914           * with the v4 server but not in the current local locking. We need
15907 15915           * to unlock these ranges.
15908 15916           * These ranges can also be reffered to as those ranges, where the lost
15909 15917           * lock does not overlap with the locks in the ri_llp but are locked
15910 15918           * since the server replied to the lost lock.
15911 15919           */
15912 15920          cur_start = lost_flp->l_start;
15913 15921          lost_flp_end = lock_to_end(lost_flp);
15914 15922  
15915 15923          ul_fl.l_type = F_UNLCK;
15916 15924          ul_fl.l_whence = 0;     /* aka SEEK_SET */
15917 15925          ul_fl.l_sysid = lost_flp->l_sysid;
15918 15926          ul_fl.l_pid = lost_flp->l_pid;
15919 15927  
15920 15928          for (llp = ri_llp; llp != NULL; llp = llp->ll_next) {
15921 15929                  llp_ll_flock_end = lock_to_end(&llp->ll_flock);
15922 15930  
15923 15931                  if (llp->ll_flock.l_start <= cur_start) {
15924 15932                          cur_start = start_check(llp_ll_flock_end);
15925 15933                          continue;
15926 15934                  }
15927 15935                  NFS4_DEBUG(nfs4_lost_rqst_debug, (CE_NOTE,
15928 15936                      "nfs4_reinstitute_local_lock_state: "
15929 15937                      "UNLOCK [%"PRIx64" - %"PRIx64"]",
15930 15938                      cur_start, llp->ll_flock.l_start));
15931 15939  
15932 15940                  ul_fl.l_start = cur_start;
15933 15941                  ul_fl.l_len = end_to_len(cur_start,
15934 15942                      (llp->ll_flock.l_start - 1));
15935 15943  
15936 15944                  push_reinstate(vp, cmd, &ul_fl, cr, lop);
15937 15945                  cur_start = start_check(llp_ll_flock_end);
15938 15946          }
15939 15947  
15940 15948          /*
15941 15949           * In the case where the lost lock ends after all intersecting locks,
15942 15950           * unlock the last part of the lost lock range.
15943 15951           */
15944 15952          if (cur_start != start_check(lost_flp_end)) {
15945 15953                  NFS4_DEBUG(nfs4_lost_rqst_debug, (CE_NOTE,
15946 15954                      "nfs4_reinstitute_local_lock_state: UNLOCK end of the "
15947 15955                      "lost lock region [%"PRIx64" - %"PRIx64"]",
15948 15956                      cur_start, lost_flp->l_start + lost_flp->l_len));
15949 15957  
15950 15958                  ul_fl.l_start = cur_start;
15951 15959                  /*
15952 15960                   * Is it an to-EOF lock? if so unlock till the end
15953 15961                   */
15954 15962                  if (lost_flp->l_len == 0)
15955 15963                          ul_fl.l_len = 0;
15956 15964                  else
15957 15965                          ul_fl.l_len = start_check(lost_flp_end) - cur_start;
15958 15966  
15959 15967                  push_reinstate(vp, cmd, &ul_fl, cr, lop);
15960 15968          }
15961 15969  
15962 15970          if (locks != NULL)
15963 15971                  flk_free_locklist(locks);
15964 15972  
15965 15973          /* Free up our newly created locklist */
15966 15974          for (llp = ri_llp; llp != NULL; ) {
15967 15975                  tmp_llp = llp->ll_next;
15968 15976                  kmem_free(llp, sizeof (locklist_t));
15969 15977                  llp = tmp_llp;
15970 15978          }
15971 15979  
15972 15980          /*
15973 15981           * Now return back to the original calling nfs4frlock()
15974 15982           * and let us naturally drop our seqid syncs.
15975 15983           */
15976 15984  }
15977 15985  
15978 15986  /*
15979 15987   * Create a lost state record for the given lock reinstantiation request
15980 15988   * and push it onto the lost state queue.
15981 15989   */
15982 15990  static void
15983 15991  push_reinstate(vnode_t *vp, int cmd, flock64_t *flk, cred_t *cr,
15984 15992      nfs4_lock_owner_t *lop)
15985 15993  {
15986 15994          nfs4_lost_rqst_t req;
15987 15995          nfs_lock_type4 locktype;
15988 15996          nfs4_error_t e = { EINTR, NFS4_OK, RPC_SUCCESS };
15989 15997  
15990 15998          ASSERT(nfs_zone() == VTOMI4(vp)->mi_zone);
15991 15999  
15992 16000          locktype = flk_to_locktype(cmd, flk->l_type);
15993 16001          nfs4frlock_save_lost_rqst(NFS4_LCK_CTYPE_REINSTATE, EINTR, locktype,
15994 16002              NULL, NULL, lop, flk, &req, cr, vp);
15995 16003          (void) nfs4_start_recovery(&e, VTOMI4(vp), vp, NULL, NULL,
15996 16004              (req.lr_op == OP_LOCK || req.lr_op == OP_LOCKU) ?
15997 16005              &req : NULL, flk->l_type == F_UNLCK ? OP_LOCKU : OP_LOCK,
15998 16006              NULL, NULL, NULL);
15999 16007  }
  
    | 
      ↓ open down ↓ | 
    4983 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX