1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 1984, 2010, Oracle and/or its affiliates. All rights reserved.
  24  * Copyright 2016, Joyent, Inc.
  25  */
  26 
  27 /*      Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
  28 /*        All Rights Reserved   */
  29 
  30 /*
  31  * Portions of this source code were derived from Berkeley 4.3 BSD
  32  * under license from the Regents of the University of California.
  33  */
  34 
  35 #include <sys/types.h>
  36 #include <sys/t_lock.h>
  37 #include <sys/ksynch.h>
  38 #include <sys/param.h>
  39 #include <sys/time.h>
  40 #include <sys/systm.h>
  41 #include <sys/sysmacros.h>
  42 #include <sys/resource.h>
  43 #include <sys/signal.h>
  44 #include <sys/cred.h>
  45 #include <sys/user.h>
  46 #include <sys/buf.h>
  47 #include <sys/vfs.h>
  48 #include <sys/vfs_opreg.h>
  49 #include <sys/vnode.h>
  50 #include <sys/proc.h>
  51 #include <sys/disp.h>
  52 #include <sys/file.h>
  53 #include <sys/fcntl.h>
  54 #include <sys/flock.h>
  55 #include <sys/atomic.h>
  56 #include <sys/kmem.h>
  57 #include <sys/uio.h>
  58 #include <sys/dnlc.h>
  59 #include <sys/conf.h>
  60 #include <sys/mman.h>
  61 #include <sys/pathname.h>
  62 #include <sys/debug.h>
  63 #include <sys/vmsystm.h>
  64 #include <sys/cmn_err.h>
  65 #include <sys/filio.h>
  66 #include <sys/policy.h>
  67 
  68 #include <sys/fs/ufs_fs.h>
  69 #include <sys/fs/ufs_lockfs.h>
  70 #include <sys/fs/ufs_filio.h>
  71 #include <sys/fs/ufs_inode.h>
  72 #include <sys/fs/ufs_fsdir.h>
  73 #include <sys/fs/ufs_quota.h>
  74 #include <sys/fs/ufs_log.h>
  75 #include <sys/fs/ufs_snap.h>
  76 #include <sys/fs/ufs_trans.h>
  77 #include <sys/fs/ufs_panic.h>
  78 #include <sys/fs/ufs_bio.h>
  79 #include <sys/dirent.h>           /* must be AFTER <sys/fs/fsdir.h>! */
  80 #include <sys/errno.h>
  81 #include <sys/fssnap_if.h>
  82 #include <sys/unistd.h>
  83 #include <sys/sunddi.h>
  84 
  85 #include <sys/filio.h>            /* _FIOIO */
  86 
  87 #include <vm/hat.h>
  88 #include <vm/page.h>
  89 #include <vm/pvn.h>
  90 #include <vm/as.h>
  91 #include <vm/seg.h>
  92 #include <vm/seg_map.h>
  93 #include <vm/seg_vn.h>
  94 #include <vm/seg_kmem.h>
  95 #include <vm/rm.h>
  96 #include <sys/swap.h>
  97 
  98 #include <fs/fs_subr.h>
  99 
 100 #include <sys/fs/decomp.h>
 101 
 102 static struct instats ins;
 103 
 104 static  int ufs_getpage_ra(struct vnode *, u_offset_t, struct seg *, caddr_t);
 105 static  int ufs_getpage_miss(struct vnode *, u_offset_t, size_t, struct seg *,
 106                 caddr_t, struct page **, size_t, enum seg_rw, int);
 107 static  int ufs_open(struct vnode **, int, struct cred *, caller_context_t *);
 108 static  int ufs_close(struct vnode *, int, int, offset_t, struct cred *,
 109                 caller_context_t *);
 110 static  int ufs_read(struct vnode *, struct uio *, int, struct cred *,
 111                 struct caller_context *);
 112 static  int ufs_write(struct vnode *, struct uio *, int, struct cred *,
 113                 struct caller_context *);
 114 static  int ufs_ioctl(struct vnode *, int, intptr_t, int, struct cred *,
 115                 int *, caller_context_t *);
 116 static  int ufs_getattr(struct vnode *, struct vattr *, int, struct cred *,
 117                 caller_context_t *);
 118 static  int ufs_setattr(struct vnode *, struct vattr *, int, struct cred *,
 119                 caller_context_t *);
 120 static  int ufs_access(struct vnode *, int, int, struct cred *,
 121                 caller_context_t *);
 122 static  int ufs_lookup(struct vnode *, char *, struct vnode **,
 123                 struct pathname *, int, struct vnode *, struct cred *,
 124                 caller_context_t *, int *, pathname_t *);
 125 static  int ufs_create(struct vnode *, char *, struct vattr *, enum vcexcl,
 126                 int, struct vnode **, struct cred *, int,
 127                 caller_context_t *, vsecattr_t  *);
 128 static  int ufs_remove(struct vnode *, char *, struct cred *,
 129                 caller_context_t *, int);
 130 static  int ufs_link(struct vnode *, struct vnode *, char *, struct cred *,
 131                 caller_context_t *, int);
 132 static  int ufs_rename(struct vnode *, char *, struct vnode *, char *,
 133                 struct cred *, caller_context_t *, int);
 134 static  int ufs_mkdir(struct vnode *, char *, struct vattr *, struct vnode **,
 135                 struct cred *, caller_context_t *, int, vsecattr_t *);
 136 static  int ufs_rmdir(struct vnode *, char *, struct vnode *, struct cred *,
 137                 caller_context_t *, int);
 138 static  int ufs_readdir(struct vnode *, struct uio *, struct cred *, int *,
 139                 caller_context_t *, int);
 140 static  int ufs_symlink(struct vnode *, char *, struct vattr *, char *,
 141                 struct cred *, caller_context_t *, int);
 142 static  int ufs_readlink(struct vnode *, struct uio *, struct cred *,
 143                 caller_context_t *);
 144 static  int ufs_fsync(struct vnode *, int, struct cred *, caller_context_t *);
 145 static  void ufs_inactive(struct vnode *, struct cred *, caller_context_t *);
 146 static  int ufs_fid(struct vnode *, struct fid *, caller_context_t *);
 147 static  int ufs_rwlock(struct vnode *, int, caller_context_t *);
 148 static  void ufs_rwunlock(struct vnode *, int, caller_context_t *);
 149 static  int ufs_seek(struct vnode *, offset_t, offset_t *, caller_context_t *);
 150 static  int ufs_frlock(struct vnode *, int, struct flock64 *, int, offset_t,
 151                 struct flk_callback *, struct cred *,
 152                 caller_context_t *);
 153 static  int ufs_space(struct vnode *, int, struct flock64 *, int, offset_t,
 154                 cred_t *, caller_context_t *);
 155 static  int ufs_getpage(struct vnode *, offset_t, size_t, uint_t *,
 156                 struct page **, size_t, struct seg *, caddr_t,
 157                 enum seg_rw, struct cred *, caller_context_t *);
 158 static  int ufs_putpage(struct vnode *, offset_t, size_t, int, struct cred *,
 159                 caller_context_t *);
 160 static  int ufs_putpages(struct vnode *, offset_t, size_t, int, struct cred *);
 161 static  int ufs_map(struct vnode *, offset_t, struct as *, caddr_t *, size_t,
 162                 uchar_t, uchar_t, uint_t, struct cred *, caller_context_t *);
 163 static  int ufs_addmap(struct vnode *, offset_t, struct as *, caddr_t,  size_t,
 164                 uchar_t, uchar_t, uint_t, struct cred *, caller_context_t *);
 165 static  int ufs_delmap(struct vnode *, offset_t, struct as *, caddr_t,  size_t,
 166                 uint_t, uint_t, uint_t, struct cred *, caller_context_t *);
 167 static  int ufs_poll(vnode_t *, short, int, short *, struct pollhead **,
 168                 caller_context_t *);
 169 static  int ufs_dump(vnode_t *, caddr_t, offset_t, offset_t,
 170     caller_context_t *);
 171 static  int ufs_l_pathconf(struct vnode *, int, ulong_t *, struct cred *,
 172                 caller_context_t *);
 173 static  int ufs_pageio(struct vnode *, struct page *, u_offset_t, size_t, int,
 174                 struct cred *, caller_context_t *);
 175 static  int ufs_dumpctl(vnode_t *, int, offset_t *, caller_context_t *);
 176 static  daddr32_t *save_dblks(struct inode *, struct ufsvfs *, daddr32_t *,
 177                 daddr32_t *, int, int);
 178 static  int ufs_getsecattr(struct vnode *, vsecattr_t *, int, struct cred *,
 179                 caller_context_t *);
 180 static  int ufs_setsecattr(struct vnode *, vsecattr_t *, int, struct cred *,
 181                 caller_context_t *);
 182 static  int ufs_priv_access(void *, int, struct cred *);
 183 static  int ufs_eventlookup(struct vnode *, char *, struct cred *,
 184     struct vnode **);
 185 extern int as_map_locked(struct as *, caddr_t, size_t, int ((*)()), void *);
 186 
 187 /*
 188  * For lockfs: ulockfs begin/end is now inlined in the ufs_xxx functions.
 189  *
 190  * XXX - ULOCKFS in fs_pathconf and ufs_ioctl is not inlined yet.
 191  */
 192 struct vnodeops *ufs_vnodeops;
 193 
 194 /* NOTE: "not blkd" below  means that the operation isn't blocked by lockfs */
 195 const fs_operation_def_t ufs_vnodeops_template[] = {
 196         VOPNAME_OPEN,           { .vop_open = ufs_open },       /* not blkd */
 197         VOPNAME_CLOSE,          { .vop_close = ufs_close },     /* not blkd */
 198         VOPNAME_READ,           { .vop_read = ufs_read },
 199         VOPNAME_WRITE,          { .vop_write = ufs_write },
 200         VOPNAME_IOCTL,          { .vop_ioctl = ufs_ioctl },
 201         VOPNAME_GETATTR,        { .vop_getattr = ufs_getattr },
 202         VOPNAME_SETATTR,        { .vop_setattr = ufs_setattr },
 203         VOPNAME_ACCESS,         { .vop_access = ufs_access },
 204         VOPNAME_LOOKUP,         { .vop_lookup = ufs_lookup },
 205         VOPNAME_CREATE,         { .vop_create = ufs_create },
 206         VOPNAME_REMOVE,         { .vop_remove = ufs_remove },
 207         VOPNAME_LINK,           { .vop_link = ufs_link },
 208         VOPNAME_RENAME,         { .vop_rename = ufs_rename },
 209         VOPNAME_MKDIR,          { .vop_mkdir = ufs_mkdir },
 210         VOPNAME_RMDIR,          { .vop_rmdir = ufs_rmdir },
 211         VOPNAME_READDIR,        { .vop_readdir = ufs_readdir },
 212         VOPNAME_SYMLINK,        { .vop_symlink = ufs_symlink },
 213         VOPNAME_READLINK,       { .vop_readlink = ufs_readlink },
 214         VOPNAME_FSYNC,          { .vop_fsync = ufs_fsync },
 215         VOPNAME_INACTIVE,       { .vop_inactive = ufs_inactive }, /* not blkd */
 216         VOPNAME_FID,            { .vop_fid = ufs_fid },
 217         VOPNAME_RWLOCK,         { .vop_rwlock = ufs_rwlock },   /* not blkd */
 218         VOPNAME_RWUNLOCK,       { .vop_rwunlock = ufs_rwunlock }, /* not blkd */
 219         VOPNAME_SEEK,           { .vop_seek = ufs_seek },
 220         VOPNAME_FRLOCK,         { .vop_frlock = ufs_frlock },
 221         VOPNAME_SPACE,          { .vop_space = ufs_space },
 222         VOPNAME_GETPAGE,        { .vop_getpage = ufs_getpage },
 223         VOPNAME_PUTPAGE,        { .vop_putpage = ufs_putpage },
 224         VOPNAME_MAP,            { .vop_map = ufs_map },
 225         VOPNAME_ADDMAP,         { .vop_addmap = ufs_addmap },   /* not blkd */
 226         VOPNAME_DELMAP,         { .vop_delmap = ufs_delmap },   /* not blkd */
 227         VOPNAME_POLL,           { .vop_poll = ufs_poll },       /* not blkd */
 228         VOPNAME_DUMP,           { .vop_dump = ufs_dump },
 229         VOPNAME_PATHCONF,       { .vop_pathconf = ufs_l_pathconf },
 230         VOPNAME_PAGEIO,         { .vop_pageio = ufs_pageio },
 231         VOPNAME_DUMPCTL,        { .vop_dumpctl = ufs_dumpctl },
 232         VOPNAME_GETSECATTR,     { .vop_getsecattr = ufs_getsecattr },
 233         VOPNAME_SETSECATTR,     { .vop_setsecattr = ufs_setsecattr },
 234         VOPNAME_VNEVENT,        { .vop_vnevent = fs_vnevent_support },
 235         NULL,                   NULL
 236 };
 237 
 238 #define MAX_BACKFILE_COUNT      9999
 239 
 240 /*
 241  * Created by ufs_dumpctl() to store a file's disk block info into memory.
 242  * Used by ufs_dump() to dump data to disk directly.
 243  */
 244 struct dump {
 245         struct inode    *ip;            /* the file we contain */
 246         daddr_t         fsbs;           /* number of blocks stored */
 247         struct timeval32 time;          /* time stamp for the struct */
 248         daddr32_t       dblk[1];        /* place holder for block info */
 249 };
 250 
 251 static struct dump *dump_info = NULL;
 252 
 253 /*
 254  * Previously there was no special action required for ordinary files.
 255  * (Devices are handled through the device file system.)
 256  * Now we support Large Files and Large File API requires open to
 257  * fail if file is large.
 258  * We could take care to prevent data corruption
 259  * by doing an atomic check of size and truncate if file is opened with
 260  * FTRUNC flag set but traditionally this is being done by the vfs/vnode
 261  * layers. So taking care of truncation here is a change in the existing
 262  * semantics of VOP_OPEN and therefore we chose not to implement any thing
 263  * here. The check for the size of the file > 2GB is being done at the
 264  * vfs layer in routine vn_open().
 265  */
 266 
 267 /* ARGSUSED */
 268 static int
 269 ufs_open(struct vnode **vpp, int flag, struct cred *cr, caller_context_t *ct)
 270 {
 271         return (0);
 272 }
 273 
 274 /*ARGSUSED*/
 275 static int
 276 ufs_close(struct vnode *vp, int flag, int count, offset_t offset,
 277         struct cred *cr, caller_context_t *ct)
 278 {
 279         cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
 280         cleanshares(vp, ttoproc(curthread)->p_pid);
 281 
 282         /*
 283          * Push partially filled cluster at last close.
 284          * ``last close'' is approximated because the dnlc
 285          * may have a hold on the vnode.
 286          * Checking for VBAD here will also act as a forced umount check.
 287          */
 288         if (vp->v_count <= 2 && vp->v_type != VBAD) {
 289                 struct inode *ip = VTOI(vp);
 290                 if (ip->i_delaylen) {
 291                         ins.in_poc.value.ul++;
 292                         (void) ufs_putpages(vp, ip->i_delayoff, ip->i_delaylen,
 293                             B_ASYNC | B_FREE, cr);
 294                         ip->i_delaylen = 0;
 295                 }
 296         }
 297 
 298         return (0);
 299 }
 300 
 301 /*ARGSUSED*/
 302 static int
 303 ufs_read(struct vnode *vp, struct uio *uiop, int ioflag, struct cred *cr,
 304         struct caller_context *ct)
 305 {
 306         struct inode *ip = VTOI(vp);
 307         struct ufsvfs *ufsvfsp;
 308         struct ulockfs *ulp = NULL;
 309         int error = 0;
 310         int intrans = 0;
 311 
 312         ASSERT(RW_READ_HELD(&ip->i_rwlock));
 313 
 314         /*
 315          * Mandatory locking needs to be done before ufs_lockfs_begin()
 316          * and TRANS_BEGIN_SYNC() calls since mandatory locks can sleep.
 317          */
 318         if (MANDLOCK(vp, ip->i_mode)) {
 319                 /*
 320                  * ufs_getattr ends up being called by chklock
 321                  */
 322                 error = chklock(vp, FREAD, uiop->uio_loffset,
 323                     uiop->uio_resid, uiop->uio_fmode, ct);
 324                 if (error)
 325                         goto out;
 326         }
 327 
 328         ufsvfsp = ip->i_ufsvfs;
 329         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_READ_MASK);
 330         if (error)
 331                 goto out;
 332 
 333         /*
 334          * In the case that a directory is opened for reading as a file
 335          * (eg "cat .") with the  O_RSYNC, O_SYNC and O_DSYNC flags set.
 336          * The locking order had to be changed to avoid a deadlock with
 337          * an update taking place on that directory at the same time.
 338          */
 339         if ((ip->i_mode & IFMT) == IFDIR) {
 340 
 341                 rw_enter(&ip->i_contents, RW_READER);
 342                 error = rdip(ip, uiop, ioflag, cr);
 343                 rw_exit(&ip->i_contents);
 344 
 345                 if (error) {
 346                         if (ulp)
 347                                 ufs_lockfs_end(ulp);
 348                         goto out;
 349                 }
 350 
 351                 if (ulp && (ioflag & FRSYNC) && (ioflag & (FSYNC | FDSYNC)) &&
 352                     TRANS_ISTRANS(ufsvfsp)) {
 353                         rw_exit(&ip->i_rwlock);
 354                         TRANS_BEGIN_SYNC(ufsvfsp, TOP_READ_SYNC, TOP_READ_SIZE,
 355                             error);
 356                         ASSERT(!error);
 357                         TRANS_END_SYNC(ufsvfsp, error, TOP_READ_SYNC,
 358                             TOP_READ_SIZE);
 359                         rw_enter(&ip->i_rwlock, RW_READER);
 360                 }
 361         } else {
 362                 /*
 363                  * Only transact reads to files opened for sync-read and
 364                  * sync-write on a file system that is not write locked.
 365                  *
 366                  * The ``not write locked'' check prevents problems with
 367                  * enabling/disabling logging on a busy file system.  E.g.,
 368                  * logging exists at the beginning of the read but does not
 369                  * at the end.
 370                  *
 371                  */
 372                 if (ulp && (ioflag & FRSYNC) && (ioflag & (FSYNC | FDSYNC)) &&
 373                     TRANS_ISTRANS(ufsvfsp)) {
 374                         TRANS_BEGIN_SYNC(ufsvfsp, TOP_READ_SYNC, TOP_READ_SIZE,
 375                             error);
 376                         ASSERT(!error);
 377                         intrans = 1;
 378                 }
 379 
 380                 rw_enter(&ip->i_contents, RW_READER);
 381                 error = rdip(ip, uiop, ioflag, cr);
 382                 rw_exit(&ip->i_contents);
 383 
 384                 if (intrans) {
 385                         TRANS_END_SYNC(ufsvfsp, error, TOP_READ_SYNC,
 386                             TOP_READ_SIZE);
 387                 }
 388         }
 389 
 390         if (ulp) {
 391                 ufs_lockfs_end(ulp);
 392         }
 393 out:
 394 
 395         return (error);
 396 }
 397 
 398 extern  int     ufs_HW;         /* high water mark */
 399 extern  int     ufs_LW;         /* low water mark */
 400 int     ufs_WRITES = 1;         /* XXX - enable/disable */
 401 int     ufs_throttles = 0;      /* throttling count */
 402 int     ufs_allow_shared_writes = 1;    /* directio shared writes */
 403 
 404 static int
 405 ufs_check_rewrite(struct inode *ip, struct uio *uiop, int ioflag)
 406 {
 407         int     shared_write;
 408 
 409         /*
 410          * If the FDSYNC flag is set then ignore the global
 411          * ufs_allow_shared_writes in this case.
 412          */
 413         shared_write = (ioflag & FDSYNC) | ufs_allow_shared_writes;
 414 
 415         /*
 416          * Filter to determine if this request is suitable as a
 417          * concurrent rewrite. This write must not allocate blocks
 418          * by extending the file or filling in holes. No use trying
 419          * through FSYNC descriptors as the inode will be synchronously
 420          * updated after the write. The uio structure has not yet been
 421          * checked for sanity, so assume nothing.
 422          */
 423         return (((ip->i_mode & IFMT) == IFREG) && !(ioflag & FAPPEND) &&
 424             (uiop->uio_loffset >= (offset_t)0) &&
 425             (uiop->uio_loffset < ip->i_size) && (uiop->uio_resid > 0) &&
 426             ((ip->i_size - uiop->uio_loffset) >= uiop->uio_resid) &&
 427             !(ioflag & FSYNC) && !bmap_has_holes(ip) &&
 428             shared_write);
 429 }
 430 
 431 /*ARGSUSED*/
 432 static int
 433 ufs_write(struct vnode *vp, struct uio *uiop, int ioflag, cred_t *cr,
 434         caller_context_t *ct)
 435 {
 436         struct inode *ip = VTOI(vp);
 437         struct ufsvfs *ufsvfsp;
 438         struct ulockfs *ulp;
 439         int retry = 1;
 440         int error, resv, resid = 0;
 441         int directio_status;
 442         int exclusive;
 443         int rewriteflg;
 444         long start_resid = uiop->uio_resid;
 445 
 446         ASSERT(RW_LOCK_HELD(&ip->i_rwlock));
 447 
 448 retry_mandlock:
 449         /*
 450          * Mandatory locking needs to be done before ufs_lockfs_begin()
 451          * and TRANS_BEGIN_[A]SYNC() calls since mandatory locks can sleep.
 452          * Check for forced unmounts normally done in ufs_lockfs_begin().
 453          */
 454         if ((ufsvfsp = ip->i_ufsvfs) == NULL) {
 455                 error = EIO;
 456                 goto out;
 457         }
 458         if (MANDLOCK(vp, ip->i_mode)) {
 459 
 460                 ASSERT(RW_WRITE_HELD(&ip->i_rwlock));
 461 
 462                 /*
 463                  * ufs_getattr ends up being called by chklock
 464                  */
 465                 error = chklock(vp, FWRITE, uiop->uio_loffset,
 466                     uiop->uio_resid, uiop->uio_fmode, ct);
 467                 if (error)
 468                         goto out;
 469         }
 470 
 471         /* i_rwlock can change in chklock */
 472         exclusive = rw_write_held(&ip->i_rwlock);
 473         rewriteflg = ufs_check_rewrite(ip, uiop, ioflag);
 474 
 475         /*
 476          * Check for fast-path special case of directio re-writes.
 477          */
 478         if ((ip->i_flag & IDIRECTIO || ufsvfsp->vfs_forcedirectio) &&
 479             !exclusive && rewriteflg) {
 480 
 481                 error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_WRITE_MASK);
 482                 if (error)
 483                         goto out;
 484 
 485                 rw_enter(&ip->i_contents, RW_READER);
 486                 error = ufs_directio_write(ip, uiop, ioflag, 1, cr,
 487                     &directio_status);
 488                 if (directio_status == DIRECTIO_SUCCESS) {
 489                         uint_t i_flag_save;
 490 
 491                         if (start_resid != uiop->uio_resid)
 492                                 error = 0;
 493                         /*
 494                          * Special treatment of access times for re-writes.
 495                          * If IMOD is not already set, then convert it
 496                          * to IMODACC for this operation. This defers
 497                          * entering a delta into the log until the inode
 498                          * is flushed. This mimics what is done for read
 499                          * operations and inode access time.
 500                          */
 501                         mutex_enter(&ip->i_tlock);
 502                         i_flag_save = ip->i_flag;
 503                         ip->i_flag |= IUPD | ICHG;
 504                         ip->i_seq++;
 505                         ITIMES_NOLOCK(ip);
 506                         if ((i_flag_save & IMOD) == 0) {
 507                                 ip->i_flag &= ~IMOD;
 508                                 ip->i_flag |= IMODACC;
 509                         }
 510                         mutex_exit(&ip->i_tlock);
 511                         rw_exit(&ip->i_contents);
 512                         if (ulp)
 513                                 ufs_lockfs_end(ulp);
 514                         goto out;
 515                 }
 516                 rw_exit(&ip->i_contents);
 517                 if (ulp)
 518                         ufs_lockfs_end(ulp);
 519         }
 520 
 521         if (!exclusive && !rw_tryupgrade(&ip->i_rwlock)) {
 522                 rw_exit(&ip->i_rwlock);
 523                 rw_enter(&ip->i_rwlock, RW_WRITER);
 524                 /*
 525                  * Mandatory locking could have been enabled
 526                  * after dropping the i_rwlock.
 527                  */
 528                 if (MANDLOCK(vp, ip->i_mode))
 529                         goto retry_mandlock;
 530         }
 531 
 532         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_WRITE_MASK);
 533         if (error)
 534                 goto out;
 535 
 536         /*
 537          * Amount of log space needed for this write
 538          */
 539         if (!rewriteflg || !(ioflag & FDSYNC))
 540                 TRANS_WRITE_RESV(ip, uiop, ulp, &resv, &resid);
 541 
 542         /*
 543          * Throttle writes.
 544          */
 545         if (ufs_WRITES && (ip->i_writes > ufs_HW)) {
 546                 mutex_enter(&ip->i_tlock);
 547                 while (ip->i_writes > ufs_HW) {
 548                         ufs_throttles++;
 549                         cv_wait(&ip->i_wrcv, &ip->i_tlock);
 550                 }
 551                 mutex_exit(&ip->i_tlock);
 552         }
 553 
 554         /*
 555          * Enter Transaction
 556          *
 557          * If the write is a rewrite there is no need to open a transaction
 558          * if the FDSYNC flag is set and not the FSYNC.  In this case just
 559          * set the IMODACC flag to modify do the update at a later time
 560          * thus avoiding the overhead of the logging transaction that is
 561          * not required.
 562          */
 563         if (ioflag & (FSYNC|FDSYNC)) {
 564                 if (ulp) {
 565                         if (rewriteflg) {
 566                                 uint_t i_flag_save;
 567 
 568                                 rw_enter(&ip->i_contents, RW_READER);
 569                                 mutex_enter(&ip->i_tlock);
 570                                 i_flag_save = ip->i_flag;
 571                                 ip->i_flag |= IUPD | ICHG;
 572                                 ip->i_seq++;
 573                                 ITIMES_NOLOCK(ip);
 574                                 if ((i_flag_save & IMOD) == 0) {
 575                                         ip->i_flag &= ~IMOD;
 576                                         ip->i_flag |= IMODACC;
 577                                 }
 578                                 mutex_exit(&ip->i_tlock);
 579                                 rw_exit(&ip->i_contents);
 580                         } else {
 581                                 int terr = 0;
 582                                 TRANS_BEGIN_SYNC(ufsvfsp, TOP_WRITE_SYNC, resv,
 583                                     terr);
 584                                 ASSERT(!terr);
 585                         }
 586                 }
 587         } else {
 588                 if (ulp)
 589                         TRANS_BEGIN_ASYNC(ufsvfsp, TOP_WRITE, resv);
 590         }
 591 
 592         /*
 593          * Write the file
 594          */
 595         rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
 596         rw_enter(&ip->i_contents, RW_WRITER);
 597         if ((ioflag & FAPPEND) != 0 && (ip->i_mode & IFMT) == IFREG) {
 598                 /*
 599                  * In append mode start at end of file.
 600                  */
 601                 uiop->uio_loffset = ip->i_size;
 602         }
 603 
 604         /*
 605          * Mild optimisation, don't call ufs_trans_write() unless we have to
 606          * Also, suppress file system full messages if we will retry.
 607          */
 608         if (retry)
 609                 ip->i_flag |= IQUIET;
 610         if (resid) {
 611                 TRANS_WRITE(ip, uiop, ioflag, error, ulp, cr, resv, resid);
 612         } else {
 613                 error = wrip(ip, uiop, ioflag, cr);
 614         }
 615         ip->i_flag &= ~IQUIET;
 616 
 617         rw_exit(&ip->i_contents);
 618         rw_exit(&ufsvfsp->vfs_dqrwlock);
 619 
 620         /*
 621          * Leave Transaction
 622          */
 623         if (ulp) {
 624                 if (ioflag & (FSYNC|FDSYNC)) {
 625                         if (!rewriteflg) {
 626                                 int terr = 0;
 627 
 628                                 TRANS_END_SYNC(ufsvfsp, terr, TOP_WRITE_SYNC,
 629                                     resv);
 630                                 if (error == 0)
 631                                         error = terr;
 632                         }
 633                 } else {
 634                         TRANS_END_ASYNC(ufsvfsp, TOP_WRITE, resv);
 635                 }
 636                 ufs_lockfs_end(ulp);
 637         }
 638 out:
 639         if ((error == ENOSPC) && retry && TRANS_ISTRANS(ufsvfsp)) {
 640                 /*
 641                  * Any blocks tied up in pending deletes?
 642                  */
 643                 ufs_delete_drain_wait(ufsvfsp, 1);
 644                 retry = 0;
 645                 goto retry_mandlock;
 646         }
 647 
 648         if (error == ENOSPC && (start_resid != uiop->uio_resid))
 649                 error = 0;
 650 
 651         return (error);
 652 }
 653 
 654 /*
 655  * Don't cache write blocks to files with the sticky bit set.
 656  * Used to keep swap files from blowing the page cache on a server.
 657  */
 658 int stickyhack = 1;
 659 
 660 /*
 661  * Free behind hacks.  The pager is busted.
 662  * XXX - need to pass the information down to writedone() in a flag like B_SEQ
 663  * or B_FREE_IF_TIGHT_ON_MEMORY.
 664  */
 665 int     freebehind = 1;
 666 int     smallfile = 0;
 667 u_offset_t smallfile64 = 32 * 1024;
 668 
 669 /*
 670  * While we should, in most cases, cache the pages for write, we
 671  * may also want to cache the pages for read as long as they are
 672  * frequently re-usable.
 673  *
 674  * If cache_read_ahead = 1, the pages for read will go to the tail
 675  * of the cache list when they are released, otherwise go to the head.
 676  */
 677 int     cache_read_ahead = 0;
 678 
 679 /*
 680  * Freebehind exists  so that as we read  large files  sequentially we
 681  * don't consume most of memory with pages  from a few files. It takes
 682  * longer to re-read from disk multiple small files as it does reading
 683  * one large one sequentially.  As system  memory grows customers need
 684  * to retain bigger chunks   of files in  memory.   The advent of  the
 685  * cachelist opens up of the possibility freeing pages  to the head or
 686  * tail of the list.
 687  *
 688  * Not freeing a page is a bet that the page will be read again before
 689  * it's segmap slot is needed for something else. If we loose the bet,
 690  * it means some  other thread is  burdened with the  page free we did
 691  * not do. If we win we save a free and reclaim.
 692  *
 693  * Freeing it at the tail  vs the head of cachelist  is a bet that the
 694  * page will survive until the next  read.  It's also saying that this
 695  * page is more likely to  be re-used than a  page freed some time ago
 696  * and never reclaimed.
 697  *
 698  * Freebehind maintains a  range of  file offset [smallfile1; smallfile2]
 699  *
 700  *            0 < offset < smallfile1 : pages are not freed.
 701  *   smallfile1 < offset < smallfile2 : pages freed to tail of cachelist.
 702  *   smallfile2 < offset              : pages freed to head of cachelist.
 703  *
 704  * The range  is  computed  at most  once  per second  and  depends on
 705  * freemem  and  ncpus_online.  Both parameters  are   bounded to be
 706  * >= smallfile && >= smallfile64.
 707  *
 708  * smallfile1 = (free memory / ncpu) / 1000
 709  * smallfile2 = (free memory / ncpu) / 10
 710  *
 711  * A few examples values:
 712  *
 713  *       Free Mem (in Bytes) [smallfile1; smallfile2]  [smallfile1; smallfile2]
 714  *                                 ncpus_online = 4          ncpus_online = 64
 715  *       ------------------  -----------------------   -----------------------
 716  *             1G                   [256K;  25M]               [32K; 1.5M]
 717  *            10G                   [2.5M; 250M]              [156K; 15M]
 718  *           100G                    [25M; 2.5G]              [1.5M; 150M]
 719  *
 720  */
 721 
 722 #define SMALLFILE1_D 1000
 723 #define SMALLFILE2_D 10
 724 static u_offset_t smallfile1 = 32 * 1024;
 725 static u_offset_t smallfile2 = 32 * 1024;
 726 static clock_t smallfile_update = 0; /* lbolt value of when to recompute */
 727 uint_t smallfile1_d = SMALLFILE1_D;
 728 uint_t smallfile2_d = SMALLFILE2_D;
 729 
 730 /*
 731  * wrip does the real work of write requests for ufs.
 732  */
 733 int
 734 wrip(struct inode *ip, struct uio *uio, int ioflag, struct cred *cr)
 735 {
 736         rlim64_t limit = uio->uio_llimit;
 737         u_offset_t off;
 738         u_offset_t old_i_size;
 739         struct fs *fs;
 740         struct vnode *vp;
 741         struct ufsvfs *ufsvfsp;
 742         caddr_t base;
 743         long start_resid = uio->uio_resid;   /* save starting resid */
 744         long premove_resid;                     /* resid before uiomove() */
 745         uint_t flags;
 746         int newpage;
 747         int iupdat_flag, directio_status;
 748         int n, on, mapon;
 749         int error, pagecreate;
 750         int do_dqrwlock;                /* drop/reacquire vfs_dqrwlock */
 751         int32_t iblocks;
 752         int     new_iblocks;
 753 
 754         /*
 755          * ip->i_size is incremented before the uiomove
 756          * is done on a write.  If the move fails (bad user
 757          * address) reset ip->i_size.
 758          * The better way would be to increment ip->i_size
 759          * only if the uiomove succeeds.
 760          */
 761         int i_size_changed = 0;
 762         o_mode_t type;
 763         int i_seq_needed = 0;
 764 
 765         vp = ITOV(ip);
 766 
 767         /*
 768          * check for forced unmount - should not happen as
 769          * the request passed the lockfs checks.
 770          */
 771         if ((ufsvfsp = ip->i_ufsvfs) == NULL)
 772                 return (EIO);
 773 
 774         fs = ip->i_fs;
 775 
 776         ASSERT(RW_WRITE_HELD(&ip->i_contents));
 777 
 778         /* check for valid filetype */
 779         type = ip->i_mode & IFMT;
 780         if ((type != IFREG) && (type != IFDIR) && (type != IFATTRDIR) &&
 781             (type != IFLNK) && (type != IFSHAD)) {
 782                 return (EIO);
 783         }
 784 
 785         /*
 786          * the actual limit of UFS file size
 787          * is UFS_MAXOFFSET_T
 788          */
 789         if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
 790                 limit = MAXOFFSET_T;
 791 
 792         if (uio->uio_loffset >= limit) {
 793                 proc_t *p = ttoproc(curthread);
 794 
 795                 mutex_enter(&p->p_lock);
 796                 (void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE], p->p_rctls,
 797                     p, RCA_UNSAFE_SIGINFO);
 798                 mutex_exit(&p->p_lock);
 799                 return (EFBIG);
 800         }
 801 
 802         /*
 803          * if largefiles are disallowed, the limit is
 804          * the pre-largefiles value of 2GB
 805          */
 806         if (ufsvfsp->vfs_lfflags & UFS_LARGEFILES)
 807                 limit = MIN(UFS_MAXOFFSET_T, limit);
 808         else
 809                 limit = MIN(MAXOFF32_T, limit);
 810 
 811         if (uio->uio_loffset < (offset_t)0) {
 812                 return (EINVAL);
 813         }
 814         if (uio->uio_resid == 0) {
 815                 return (0);
 816         }
 817 
 818         if (uio->uio_loffset >= limit)
 819                 return (EFBIG);
 820 
 821         ip->i_flag |= INOACC;        /* don't update ref time in getpage */
 822 
 823         if (ioflag & (FSYNC|FDSYNC)) {
 824                 ip->i_flag |= ISYNC;
 825                 iupdat_flag = 1;
 826         }
 827         /*
 828          * Try to go direct
 829          */
 830         if (ip->i_flag & IDIRECTIO || ufsvfsp->vfs_forcedirectio) {
 831                 uio->uio_llimit = limit;
 832                 error = ufs_directio_write(ip, uio, ioflag, 0, cr,
 833                     &directio_status);
 834                 /*
 835                  * If ufs_directio wrote to the file or set the flags,
 836                  * we need to update i_seq, but it may be deferred.
 837                  */
 838                 if (start_resid != uio->uio_resid ||
 839                     (ip->i_flag & (ICHG|IUPD))) {
 840                         i_seq_needed = 1;
 841                         ip->i_flag |= ISEQ;
 842                 }
 843                 if (directio_status == DIRECTIO_SUCCESS)
 844                         goto out;
 845         }
 846 
 847         /*
 848          * Behavior with respect to dropping/reacquiring vfs_dqrwlock:
 849          *
 850          * o shadow inodes: vfs_dqrwlock is not held at all
 851          * o quota updates: vfs_dqrwlock is read or write held
 852          * o other updates: vfs_dqrwlock is read held
 853          *
 854          * The first case is the only one where we do not hold
 855          * vfs_dqrwlock at all while entering wrip().
 856          * We must make sure not to downgrade/drop vfs_dqrwlock if we
 857          * have it as writer, i.e. if we are updating the quota inode.
 858          * There is no potential deadlock scenario in this case as
 859          * ufs_getpage() takes care of this and avoids reacquiring
 860          * vfs_dqrwlock in that case.
 861          *
 862          * This check is done here since the above conditions do not change
 863          * and we possibly loop below, so save a few cycles.
 864          */
 865         if ((type == IFSHAD) ||
 866             (rw_owner(&ufsvfsp->vfs_dqrwlock) == curthread)) {
 867                 do_dqrwlock = 0;
 868         } else {
 869                 do_dqrwlock = 1;
 870         }
 871 
 872         /*
 873          * Large Files: We cast MAXBMASK to offset_t
 874          * inorder to mask out the higher bits. Since offset_t
 875          * is a signed value, the high order bit set in MAXBMASK
 876          * value makes it do the right thing by having all bits 1
 877          * in the higher word. May be removed for _SOLARIS64_.
 878          */
 879 
 880         fs = ip->i_fs;
 881         do {
 882                 u_offset_t uoff = uio->uio_loffset;
 883                 off = uoff & (offset_t)MAXBMASK;
 884                 mapon = (int)(uoff & (offset_t)MAXBOFFSET);
 885                 on = (int)blkoff(fs, uoff);
 886                 n = (int)MIN(fs->fs_bsize - on, uio->uio_resid);
 887                 new_iblocks = 1;
 888 
 889                 if (type == IFREG && uoff + n >= limit) {
 890                         if (uoff >= limit) {
 891                                 error = EFBIG;
 892                                 goto out;
 893                         }
 894                         /*
 895                          * since uoff + n >= limit,
 896                          * therefore n >= limit - uoff, and n is an int
 897                          * so it is safe to cast it to an int
 898                          */
 899                         n = (int)(limit - (rlim64_t)uoff);
 900                 }
 901                 if (uoff + n > ip->i_size) {
 902                         /*
 903                          * We are extending the length of the file.
 904                          * bmap is used so that we are sure that
 905                          * if we need to allocate new blocks, that it
 906                          * is done here before we up the file size.
 907                          */
 908                         error = bmap_write(ip, uoff, (int)(on + n),
 909                             mapon == 0, NULL, cr);
 910                         /*
 911                          * bmap_write never drops i_contents so if
 912                          * the flags are set it changed the file.
 913                          */
 914                         if (ip->i_flag & (ICHG|IUPD)) {
 915                                 i_seq_needed = 1;
 916                                 ip->i_flag |= ISEQ;
 917                         }
 918                         if (error)
 919                                 break;
 920                         /*
 921                          * There is a window of vulnerability here.
 922                          * The sequence of operations: allocate file
 923                          * system blocks, uiomove the data into pages,
 924                          * and then update the size of the file in the
 925                          * inode, must happen atomically.  However, due
 926                          * to current locking constraints, this can not
 927                          * be done.
 928                          */
 929                         ASSERT(ip->i_writer == NULL);
 930                         ip->i_writer = curthread;
 931                         i_size_changed = 1;
 932                         /*
 933                          * If we are writing from the beginning of
 934                          * the mapping, we can just create the
 935                          * pages without having to read them.
 936                          */
 937                         pagecreate = (mapon == 0);
 938                 } else if (n == MAXBSIZE) {
 939                         /*
 940                          * Going to do a whole mappings worth,
 941                          * so we can just create the pages w/o
 942                          * having to read them in.  But before
 943                          * we do that, we need to make sure any
 944                          * needed blocks are allocated first.
 945                          */
 946                         iblocks = ip->i_blocks;
 947                         error = bmap_write(ip, uoff, (int)(on + n),
 948                             BI_ALLOC_ONLY, NULL, cr);
 949                         /*
 950                          * bmap_write never drops i_contents so if
 951                          * the flags are set it changed the file.
 952                          */
 953                         if (ip->i_flag & (ICHG|IUPD)) {
 954                                 i_seq_needed = 1;
 955                                 ip->i_flag |= ISEQ;
 956                         }
 957                         if (error)
 958                                 break;
 959                         pagecreate = 1;
 960                         /*
 961                          * check if the new created page needed the
 962                          * allocation of new disk blocks.
 963                          */
 964                         if (iblocks == ip->i_blocks)
 965                                 new_iblocks = 0; /* no new blocks allocated */
 966                 } else {
 967                         pagecreate = 0;
 968                         /*
 969                          * In sync mode flush the indirect blocks which
 970                          * may have been allocated and not written on
 971                          * disk. In above cases bmap_write will allocate
 972                          * in sync mode.
 973                          */
 974                         if (ioflag & (FSYNC|FDSYNC)) {
 975                                 error = ufs_indirblk_sync(ip, uoff);
 976                                 if (error)
 977                                         break;
 978                         }
 979                 }
 980 
 981                 /*
 982                  * At this point we can enter ufs_getpage() in one
 983                  * of two ways:
 984                  * 1) segmap_getmapflt() calls ufs_getpage() when the
 985                  *    forcefault parameter is true (pagecreate == 0)
 986                  * 2) uiomove() causes a page fault.
 987                  *
 988                  * We have to drop the contents lock to prevent the VM
 989                  * system from trying to reacquire it in ufs_getpage()
 990                  * should the uiomove cause a pagefault.
 991                  *
 992                  * We have to drop the reader vfs_dqrwlock here as well.
 993                  */
 994                 rw_exit(&ip->i_contents);
 995                 if (do_dqrwlock) {
 996                         ASSERT(RW_LOCK_HELD(&ufsvfsp->vfs_dqrwlock));
 997                         ASSERT(!(RW_WRITE_HELD(&ufsvfsp->vfs_dqrwlock)));
 998                         rw_exit(&ufsvfsp->vfs_dqrwlock);
 999                 }
1000 
1001                 newpage = 0;
1002                 premove_resid = uio->uio_resid;
1003 
1004                 /*
1005                  * Touch the page and fault it in if it is not in core
1006                  * before segmap_getmapflt or vpm_data_copy can lock it.
1007                  * This is to avoid the deadlock if the buffer is mapped
1008                  * to the same file through mmap which we want to write.
1009                  */
1010                 uio_prefaultpages((long)n, uio);
1011 
1012                 if (vpm_enable) {
1013                         /*
1014                          * Copy data. If new pages are created, part of
1015                          * the page that is not written will be initizliazed
1016                          * with zeros.
1017                          */
1018                         error = vpm_data_copy(vp, (off + mapon), (uint_t)n,
1019                             uio, !pagecreate, &newpage, 0, S_WRITE);
1020                 } else {
1021 
1022                         base = segmap_getmapflt(segkmap, vp, (off + mapon),
1023                             (uint_t)n, !pagecreate, S_WRITE);
1024 
1025                         /*
1026                          * segmap_pagecreate() returns 1 if it calls
1027                          * page_create_va() to allocate any pages.
1028                          */
1029 
1030                         if (pagecreate)
1031                                 newpage = segmap_pagecreate(segkmap, base,
1032                                     (size_t)n, 0);
1033 
1034                         error = uiomove(base + mapon, (long)n, UIO_WRITE, uio);
1035                 }
1036 
1037                 /*
1038                  * If "newpage" is set, then a new page was created and it
1039                  * does not contain valid data, so it needs to be initialized
1040                  * at this point.
1041                  * Otherwise the page contains old data, which was overwritten
1042                  * partially or as a whole in uiomove.
1043                  * If there is only one iovec structure within uio, then
1044                  * on error uiomove will not be able to update uio->uio_loffset
1045                  * and we would zero the whole page here!
1046                  *
1047                  * If uiomove fails because of an error, the old valid data
1048                  * is kept instead of filling the rest of the page with zero's.
1049                  */
1050                 if (!vpm_enable && newpage &&
1051                     uio->uio_loffset < roundup(off + mapon + n, PAGESIZE)) {
1052                         /*
1053                          * We created pages w/o initializing them completely,
1054                          * thus we need to zero the part that wasn't set up.
1055                          * This happens on most EOF write cases and if
1056                          * we had some sort of error during the uiomove.
1057                          */
1058                         int nzero, nmoved;
1059 
1060                         nmoved = (int)(uio->uio_loffset - (off + mapon));
1061                         ASSERT(nmoved >= 0 && nmoved <= n);
1062                         nzero = roundup(on + n, PAGESIZE) - nmoved;
1063                         ASSERT(nzero > 0 && mapon + nmoved + nzero <= MAXBSIZE);
1064                         (void) kzero(base + mapon + nmoved, (uint_t)nzero);
1065                 }
1066 
1067                 /*
1068                  * Unlock the pages allocated by page_create_va()
1069                  * in segmap_pagecreate()
1070                  */
1071                 if (!vpm_enable && newpage)
1072                         segmap_pageunlock(segkmap, base, (size_t)n, S_WRITE);
1073 
1074                 /*
1075                  * If the size of the file changed, then update the
1076                  * size field in the inode now.  This can't be done
1077                  * before the call to segmap_pageunlock or there is
1078                  * a potential deadlock with callers to ufs_putpage().
1079                  * They will be holding i_contents and trying to lock
1080                  * a page, while this thread is holding a page locked
1081                  * and trying to acquire i_contents.
1082                  */
1083                 if (i_size_changed) {
1084                         rw_enter(&ip->i_contents, RW_WRITER);
1085                         old_i_size = ip->i_size;
1086                         UFS_SET_ISIZE(uoff + n, ip);
1087                         TRANS_INODE(ufsvfsp, ip);
1088                         /*
1089                          * file has grown larger than 2GB. Set flag
1090                          * in superblock to indicate this, if it
1091                          * is not already set.
1092                          */
1093                         if ((ip->i_size > MAXOFF32_T) &&
1094                             !(fs->fs_flags & FSLARGEFILES)) {
1095                                 ASSERT(ufsvfsp->vfs_lfflags & UFS_LARGEFILES);
1096                                 mutex_enter(&ufsvfsp->vfs_lock);
1097                                 fs->fs_flags |= FSLARGEFILES;
1098                                 ufs_sbwrite(ufsvfsp);
1099                                 mutex_exit(&ufsvfsp->vfs_lock);
1100                         }
1101                         mutex_enter(&ip->i_tlock);
1102                         ip->i_writer = NULL;
1103                         cv_broadcast(&ip->i_wrcv);
1104                         mutex_exit(&ip->i_tlock);
1105                         rw_exit(&ip->i_contents);
1106                 }
1107 
1108                 if (error) {
1109                         /*
1110                          * If we failed on a write, we may have already
1111                          * allocated file blocks as well as pages.  It's
1112                          * hard to undo the block allocation, but we must
1113                          * be sure to invalidate any pages that may have
1114                          * been allocated.
1115                          *
1116                          * If the page was created without initialization
1117                          * then we must check if it should be possible
1118                          * to destroy the new page and to keep the old data
1119                          * on the disk.
1120                          *
1121                          * It is possible to destroy the page without
1122                          * having to write back its contents only when
1123                          * - the size of the file keeps unchanged
1124                          * - bmap_write() did not allocate new disk blocks
1125                          *   it is possible to create big files using "seek" and
1126                          *   write to the end of the file. A "write" to a
1127                          *   position before the end of the file would not
1128                          *   change the size of the file but it would allocate
1129                          *   new disk blocks.
1130                          * - uiomove intended to overwrite the whole page.
1131                          * - a new page was created (newpage == 1).
1132                          */
1133 
1134                         if (i_size_changed == 0 && new_iblocks == 0 &&
1135                             newpage) {
1136 
1137                                 /* unwind what uiomove eventually last did */
1138                                 uio->uio_resid = premove_resid;
1139 
1140                                 /*
1141                                  * destroy the page, do not write ambiguous
1142                                  * data to the disk.
1143                                  */
1144                                 flags = SM_DESTROY;
1145                         } else {
1146                                 /*
1147                                  * write the page back to the disk, if dirty,
1148                                  * and remove the page from the cache.
1149                                  */
1150                                 flags = SM_INVAL;
1151                         }
1152 
1153                         if (vpm_enable) {
1154                                 /*
1155                                  *  Flush pages.
1156                                  */
1157                                 (void) vpm_sync_pages(vp, off, n, flags);
1158                         } else {
1159                                 (void) segmap_release(segkmap, base, flags);
1160                         }
1161                 } else {
1162                         flags = 0;
1163                         /*
1164                          * Force write back for synchronous write cases.
1165                          */
1166                         if ((ioflag & (FSYNC|FDSYNC)) || type == IFDIR) {
1167                                 /*
1168                                  * If the sticky bit is set but the
1169                                  * execute bit is not set, we do a
1170                                  * synchronous write back and free
1171                                  * the page when done.  We set up swap
1172                                  * files to be handled this way to
1173                                  * prevent servers from keeping around
1174                                  * the client's swap pages too long.
1175                                  * XXX - there ought to be a better way.
1176                                  */
1177                                 if (IS_SWAPVP(vp)) {
1178                                         flags = SM_WRITE | SM_FREE |
1179                                             SM_DONTNEED;
1180                                         iupdat_flag = 0;
1181                                 } else {
1182                                         flags = SM_WRITE;
1183                                 }
1184                         } else if (n + on == MAXBSIZE || IS_SWAPVP(vp)) {
1185                                 /*
1186                                  * Have written a whole block.
1187                                  * Start an asynchronous write and
1188                                  * mark the buffer to indicate that
1189                                  * it won't be needed again soon.
1190                                  */
1191                                 flags = SM_WRITE | SM_ASYNC | SM_DONTNEED;
1192                         }
1193                         if (vpm_enable) {
1194                                 /*
1195                                  * Flush pages.
1196                                  */
1197                                 error = vpm_sync_pages(vp, off, n, flags);
1198                         } else {
1199                                 error = segmap_release(segkmap, base, flags);
1200                         }
1201                         /*
1202                          * If the operation failed and is synchronous,
1203                          * then we need to unwind what uiomove() last
1204                          * did so we can potentially return an error to
1205                          * the caller.  If this write operation was
1206                          * done in two pieces and the first succeeded,
1207                          * then we won't return an error for the second
1208                          * piece that failed.  However, we only want to
1209                          * return a resid value that reflects what was
1210                          * really done.
1211                          *
1212                          * Failures for non-synchronous operations can
1213                          * be ignored since the page subsystem will
1214                          * retry the operation until it succeeds or the
1215                          * file system is unmounted.
1216                          */
1217                         if (error) {
1218                                 if ((ioflag & (FSYNC | FDSYNC)) ||
1219                                     type == IFDIR) {
1220                                         uio->uio_resid = premove_resid;
1221                                 } else {
1222                                         error = 0;
1223                                 }
1224                         }
1225                 }
1226 
1227                 /*
1228                  * Re-acquire contents lock.
1229                  * If it was dropped, reacquire reader vfs_dqrwlock as well.
1230                  */
1231                 if (do_dqrwlock)
1232                         rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
1233                 rw_enter(&ip->i_contents, RW_WRITER);
1234 
1235                 /*
1236                  * If the uiomove() failed or if a synchronous
1237                  * page push failed, fix up i_size.
1238                  */
1239                 if (error) {
1240                         if (i_size_changed) {
1241                                 /*
1242                                  * The uiomove failed, and we
1243                                  * allocated blocks,so get rid
1244                                  * of them.
1245                                  */
1246                                 (void) ufs_itrunc(ip, old_i_size, 0, cr);
1247                         }
1248                 } else {
1249                         /*
1250                          * XXX - Can this be out of the loop?
1251                          */
1252                         ip->i_flag |= IUPD | ICHG;
1253                         /*
1254                          * Only do one increase of i_seq for multiple
1255                          * pieces.  Because we drop locks, record
1256                          * the fact that we changed the timestamp and
1257                          * are deferring the increase in case another thread
1258                          * pushes our timestamp update.
1259                          */
1260                         i_seq_needed = 1;
1261                         ip->i_flag |= ISEQ;
1262                         if (i_size_changed)
1263                                 ip->i_flag |= IATTCHG;
1264                         if ((ip->i_mode & (IEXEC | (IEXEC >> 3) |
1265                             (IEXEC >> 6))) != 0 &&
1266                             (ip->i_mode & (ISUID | ISGID)) != 0 &&
1267                             secpolicy_vnode_setid_retain(cr,
1268                             (ip->i_mode & ISUID) != 0 && ip->i_uid == 0) != 0) {
1269                                 /*
1270                                  * Clear Set-UID & Set-GID bits on
1271                                  * successful write if not privileged
1272                                  * and at least one of the execute bits
1273                                  * is set.  If we always clear Set-GID,
1274                                  * mandatory file and record locking is
1275                                  * unuseable.
1276                                  */
1277                                 ip->i_mode &= ~(ISUID | ISGID);
1278                         }
1279                 }
1280                 /*
1281                  * In the case the FDSYNC flag is set and this is a
1282                  * "rewrite" we won't log a delta.
1283                  * The FSYNC flag overrides all cases.
1284                  */
1285                 if (!ufs_check_rewrite(ip, uio, ioflag) || !(ioflag & FDSYNC)) {
1286                         TRANS_INODE(ufsvfsp, ip);
1287                 }
1288         } while (error == 0 && uio->uio_resid > 0 && n != 0);
1289 
1290 out:
1291         /*
1292          * Make sure i_seq is increased at least once per write
1293          */
1294         if (i_seq_needed) {
1295                 ip->i_seq++;
1296                 ip->i_flag &= ~ISEQ;     /* no longer deferred */
1297         }
1298 
1299         /*
1300          * Inode is updated according to this table -
1301          *
1302          *   FSYNC        FDSYNC(posix.4)
1303          *   --------------------------
1304          *   always@      IATTCHG|IBDWRITE
1305          *
1306          * @ -  If we are doing synchronous write the only time we should
1307          *      not be sync'ing the ip here is if we have the stickyhack
1308          *      activated, the file is marked with the sticky bit and
1309          *      no exec bit, the file length has not been changed and
1310          *      no new blocks have been allocated during this write.
1311          */
1312 
1313         if ((ip->i_flag & ISYNC) != 0) {
1314                 /*
1315                  * we have eliminated nosync
1316                  */
1317                 if ((ip->i_flag & (IATTCHG|IBDWRITE)) ||
1318                     ((ioflag & FSYNC) && iupdat_flag)) {
1319                         ufs_iupdat(ip, 1);
1320                 }
1321         }
1322 
1323         /*
1324          * If we've already done a partial-write, terminate
1325          * the write but return no error unless the error is ENOSPC
1326          * because the caller can detect this and free resources and
1327          * try again.
1328          */
1329         if ((start_resid != uio->uio_resid) && (error != ENOSPC))
1330                 error = 0;
1331 
1332         ip->i_flag &= ~(INOACC | ISYNC);
1333         ITIMES_NOLOCK(ip);
1334         return (error);
1335 }
1336 
1337 /*
1338  * rdip does the real work of read requests for ufs.
1339  */
1340 int
1341 rdip(struct inode *ip, struct uio *uio, int ioflag, cred_t *cr)
1342 {
1343         u_offset_t off;
1344         caddr_t base;
1345         struct fs *fs;
1346         struct ufsvfs *ufsvfsp;
1347         struct vnode *vp;
1348         long oresid = uio->uio_resid;
1349         u_offset_t n, on, mapon;
1350         int error = 0;
1351         int doupdate = 1;
1352         uint_t flags;
1353         int dofree, directio_status;
1354         krw_t rwtype;
1355         o_mode_t type;
1356         clock_t now;
1357 
1358         vp = ITOV(ip);
1359 
1360         ASSERT(RW_LOCK_HELD(&ip->i_contents));
1361 
1362         ufsvfsp = ip->i_ufsvfs;
1363 
1364         if (ufsvfsp == NULL)
1365                 return (EIO);
1366 
1367         fs = ufsvfsp->vfs_fs;
1368 
1369         /* check for valid filetype */
1370         type = ip->i_mode & IFMT;
1371         if ((type != IFREG) && (type != IFDIR) && (type != IFATTRDIR) &&
1372             (type != IFLNK) && (type != IFSHAD)) {
1373                 return (EIO);
1374         }
1375 
1376         if (uio->uio_loffset > UFS_MAXOFFSET_T) {
1377                 error = 0;
1378                 goto out;
1379         }
1380         if (uio->uio_loffset < (offset_t)0) {
1381                 return (EINVAL);
1382         }
1383         if (uio->uio_resid == 0) {
1384                 return (0);
1385         }
1386 
1387         if (!ULOCKFS_IS_NOIACC(ITOUL(ip)) && (fs->fs_ronly == 0) &&
1388             (!ufsvfsp->vfs_noatime)) {
1389                 mutex_enter(&ip->i_tlock);
1390                 ip->i_flag |= IACC;
1391                 mutex_exit(&ip->i_tlock);
1392         }
1393         /*
1394          * Try to go direct
1395          */
1396         if (ip->i_flag & IDIRECTIO || ufsvfsp->vfs_forcedirectio) {
1397                 error = ufs_directio_read(ip, uio, cr, &directio_status);
1398                 if (directio_status == DIRECTIO_SUCCESS)
1399                         goto out;
1400         }
1401 
1402         rwtype = (rw_write_held(&ip->i_contents)?RW_WRITER:RW_READER);
1403 
1404         do {
1405                 offset_t diff;
1406                 u_offset_t uoff = uio->uio_loffset;
1407                 off = uoff & (offset_t)MAXBMASK;
1408                 mapon = (u_offset_t)(uoff & (offset_t)MAXBOFFSET);
1409                 on = (u_offset_t)blkoff(fs, uoff);
1410                 n = MIN((u_offset_t)fs->fs_bsize - on,
1411                     (u_offset_t)uio->uio_resid);
1412 
1413                 diff = ip->i_size - uoff;
1414 
1415                 if (diff <= (offset_t)0) {
1416                         error = 0;
1417                         goto out;
1418                 }
1419                 if (diff < (offset_t)n)
1420                         n = (int)diff;
1421 
1422                 /*
1423                  * We update smallfile2 and smallfile1 at most every second.
1424                  */
1425                 now = ddi_get_lbolt();
1426                 if (now >= smallfile_update) {
1427                         uint64_t percpufreeb;
1428                         if (smallfile1_d == 0) smallfile1_d = SMALLFILE1_D;
1429                         if (smallfile2_d == 0) smallfile2_d = SMALLFILE2_D;
1430                         percpufreeb = ptob((uint64_t)freemem) / ncpus_online;
1431                         smallfile1 = percpufreeb / smallfile1_d;
1432                         smallfile2 = percpufreeb / smallfile2_d;
1433                         smallfile1 = MAX(smallfile1, smallfile);
1434                         smallfile1 = MAX(smallfile1, smallfile64);
1435                         smallfile2 = MAX(smallfile1, smallfile2);
1436                         smallfile_update = now + hz;
1437                 }
1438 
1439                 dofree = freebehind &&
1440                     ip->i_nextr == (off & PAGEMASK) && off > smallfile1;
1441 
1442                 /*
1443                  * At this point we can enter ufs_getpage() in one of two
1444                  * ways:
1445                  * 1) segmap_getmapflt() calls ufs_getpage() when the
1446                  *    forcefault parameter is true (value of 1 is passed)
1447                  * 2) uiomove() causes a page fault.
1448                  *
1449                  * We cannot hold onto an i_contents reader lock without
1450                  * risking deadlock in ufs_getpage() so drop a reader lock.
1451                  * The ufs_getpage() dolock logic already allows for a
1452                  * thread holding i_contents as writer to work properly
1453                  * so we keep a writer lock.
1454                  */
1455                 if (rwtype == RW_READER)
1456                         rw_exit(&ip->i_contents);
1457 
1458                 if (vpm_enable) {
1459                         /*
1460                          * Copy data.
1461                          */
1462                         error = vpm_data_copy(vp, (off + mapon), (uint_t)n,
1463                             uio, 1, NULL, 0, S_READ);
1464                 } else {
1465                         base = segmap_getmapflt(segkmap, vp, (off + mapon),
1466                             (uint_t)n, 1, S_READ);
1467                         error = uiomove(base + mapon, (long)n, UIO_READ, uio);
1468                 }
1469 
1470                 flags = 0;
1471                 if (!error) {
1472                         /*
1473                          * If  reading sequential  we won't need  this
1474                          * buffer again  soon.  For  offsets in  range
1475                          * [smallfile1,  smallfile2] release the pages
1476                          * at   the  tail  of the   cache list, larger
1477                          * offsets are released at the head.
1478                          */
1479                         if (dofree) {
1480                                 flags = SM_FREE | SM_ASYNC;
1481                                 if ((cache_read_ahead == 0) &&
1482                                     (off > smallfile2))
1483                                         flags |=  SM_DONTNEED;
1484                         }
1485                         /*
1486                          * In POSIX SYNC (FSYNC and FDSYNC) read mode,
1487                          * we want to make sure that the page which has
1488                          * been read, is written on disk if it is dirty.
1489                          * And corresponding indirect blocks should also
1490                          * be flushed out.
1491                          */
1492                         if ((ioflag & FRSYNC) && (ioflag & (FSYNC|FDSYNC))) {
1493                                 flags &= ~SM_ASYNC;
1494                                 flags |= SM_WRITE;
1495                         }
1496                         if (vpm_enable) {
1497                                 error = vpm_sync_pages(vp, off, n, flags);
1498                         } else {
1499                                 error = segmap_release(segkmap, base, flags);
1500                         }
1501                 } else {
1502                         if (vpm_enable) {
1503                                 (void) vpm_sync_pages(vp, off, n, flags);
1504                         } else {
1505                                 (void) segmap_release(segkmap, base, flags);
1506                         }
1507                 }
1508 
1509                 if (rwtype == RW_READER)
1510                         rw_enter(&ip->i_contents, rwtype);
1511         } while (error == 0 && uio->uio_resid > 0 && n != 0);
1512 out:
1513         /*
1514          * Inode is updated according to this table if FRSYNC is set.
1515          *
1516          *   FSYNC        FDSYNC(posix.4)
1517          *   --------------------------
1518          *   always       IATTCHG|IBDWRITE
1519          */
1520         /*
1521          * The inode is not updated if we're logging and the inode is a
1522          * directory with FRSYNC, FSYNC and FDSYNC flags set.
1523          */
1524         if (ioflag & FRSYNC) {
1525                 if (TRANS_ISTRANS(ufsvfsp) && ((ip->i_mode & IFMT) == IFDIR)) {
1526                         doupdate = 0;
1527                 }
1528                 if (doupdate) {
1529                         if ((ioflag & FSYNC) ||
1530                             ((ioflag & FDSYNC) &&
1531                             (ip->i_flag & (IATTCHG|IBDWRITE)))) {
1532                                 ufs_iupdat(ip, 1);
1533                         }
1534                 }
1535         }
1536         /*
1537          * If we've already done a partial read, terminate
1538          * the read but return no error.
1539          */
1540         if (oresid != uio->uio_resid)
1541                 error = 0;
1542         ITIMES(ip);
1543 
1544         return (error);
1545 }
1546 
1547 /* ARGSUSED */
1548 static int
1549 ufs_ioctl(
1550         struct vnode    *vp,
1551         int             cmd,
1552         intptr_t        arg,
1553         int             flag,
1554         struct cred     *cr,
1555         int             *rvalp,
1556         caller_context_t *ct)
1557 {
1558         struct lockfs   lockfs, lockfs_out;
1559         struct ufsvfs   *ufsvfsp = VTOI(vp)->i_ufsvfs;
1560         char            *comment, *original_comment;
1561         struct fs       *fs;
1562         struct ulockfs  *ulp;
1563         offset_t        off;
1564         extern int      maxphys;
1565         int             error;
1566         int             issync;
1567         int             trans_size;
1568 
1569 
1570         /*
1571          * forcibly unmounted
1572          */
1573         if (ufsvfsp == NULL || vp->v_vfsp == NULL ||
1574             vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
1575                 return (EIO);
1576         fs = ufsvfsp->vfs_fs;
1577 
1578         if (cmd == Q_QUOTACTL) {
1579                 error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_QUOTA_MASK);
1580                 if (error)
1581                         return (error);
1582 
1583                 if (ulp) {
1584                         TRANS_BEGIN_ASYNC(ufsvfsp, TOP_QUOTA,
1585                             TOP_SETQUOTA_SIZE(fs));
1586                 }
1587 
1588                 error = quotactl(vp, arg, flag, cr);
1589 
1590                 if (ulp) {
1591                         TRANS_END_ASYNC(ufsvfsp, TOP_QUOTA,
1592                             TOP_SETQUOTA_SIZE(fs));
1593                         ufs_lockfs_end(ulp);
1594                 }
1595                 return (error);
1596         }
1597 
1598         switch (cmd) {
1599                 case _FIOLFS:
1600                         /*
1601                          * file system locking
1602                          */
1603                         if (secpolicy_fs_config(cr, ufsvfsp->vfs_vfs) != 0)
1604                                 return (EPERM);
1605 
1606                         if ((flag & DATAMODEL_MASK) == DATAMODEL_NATIVE) {
1607                                 if (copyin((caddr_t)arg, &lockfs,
1608                                     sizeof (struct lockfs)))
1609                                         return (EFAULT);
1610                         }
1611 #ifdef _SYSCALL32_IMPL
1612                         else {
1613                                 struct lockfs32 lockfs32;
1614                                 /* Translate ILP32 lockfs to LP64 lockfs */
1615                                 if (copyin((caddr_t)arg, &lockfs32,
1616                                     sizeof (struct lockfs32)))
1617                                         return (EFAULT);
1618                                 lockfs.lf_lock = (ulong_t)lockfs32.lf_lock;
1619                                 lockfs.lf_flags = (ulong_t)lockfs32.lf_flags;
1620                                 lockfs.lf_key = (ulong_t)lockfs32.lf_key;
1621                                 lockfs.lf_comlen = (ulong_t)lockfs32.lf_comlen;
1622                                 lockfs.lf_comment =
1623                                     (caddr_t)(uintptr_t)lockfs32.lf_comment;
1624                         }
1625 #endif /* _SYSCALL32_IMPL */
1626 
1627                         if (lockfs.lf_comlen) {
1628                                 if (lockfs.lf_comlen > LOCKFS_MAXCOMMENTLEN)
1629                                         return (ENAMETOOLONG);
1630                                 comment =
1631                                     kmem_alloc(lockfs.lf_comlen, KM_SLEEP);
1632                                 if (copyin(lockfs.lf_comment, comment,
1633                                     lockfs.lf_comlen)) {
1634                                         kmem_free(comment, lockfs.lf_comlen);
1635                                         return (EFAULT);
1636                                 }
1637                                 original_comment = lockfs.lf_comment;
1638                                 lockfs.lf_comment = comment;
1639                         }
1640                         if ((error = ufs_fiolfs(vp, &lockfs, 0)) == 0) {
1641                                 lockfs.lf_comment = original_comment;
1642 
1643                                 if ((flag & DATAMODEL_MASK) ==
1644                                     DATAMODEL_NATIVE) {
1645                                         (void) copyout(&lockfs, (caddr_t)arg,
1646                                             sizeof (struct lockfs));
1647                                 }
1648 #ifdef _SYSCALL32_IMPL
1649                                 else {
1650                                         struct lockfs32 lockfs32;
1651                                         /* Translate LP64 to ILP32 lockfs */
1652                                         lockfs32.lf_lock =
1653                                             (uint32_t)lockfs.lf_lock;
1654                                         lockfs32.lf_flags =
1655                                             (uint32_t)lockfs.lf_flags;
1656                                         lockfs32.lf_key =
1657                                             (uint32_t)lockfs.lf_key;
1658                                         lockfs32.lf_comlen =
1659                                             (uint32_t)lockfs.lf_comlen;
1660                                         lockfs32.lf_comment =
1661                                             (uint32_t)(uintptr_t)
1662                                             lockfs.lf_comment;
1663                                         (void) copyout(&lockfs32, (caddr_t)arg,
1664                                             sizeof (struct lockfs32));
1665                                 }
1666 #endif /* _SYSCALL32_IMPL */
1667 
1668                         } else {
1669                                 if (lockfs.lf_comlen)
1670                                         kmem_free(comment, lockfs.lf_comlen);
1671                         }
1672                         return (error);
1673 
1674                 case _FIOLFSS:
1675                         /*
1676                          * get file system locking status
1677                          */
1678 
1679                         if ((flag & DATAMODEL_MASK) == DATAMODEL_NATIVE) {
1680                                 if (copyin((caddr_t)arg, &lockfs,
1681                                     sizeof (struct lockfs)))
1682                                         return (EFAULT);
1683                         }
1684 #ifdef _SYSCALL32_IMPL
1685                         else {
1686                                 struct lockfs32 lockfs32;
1687                                 /* Translate ILP32 lockfs to LP64 lockfs */
1688                                 if (copyin((caddr_t)arg, &lockfs32,
1689                                     sizeof (struct lockfs32)))
1690                                         return (EFAULT);
1691                                 lockfs.lf_lock = (ulong_t)lockfs32.lf_lock;
1692                                 lockfs.lf_flags = (ulong_t)lockfs32.lf_flags;
1693                                 lockfs.lf_key = (ulong_t)lockfs32.lf_key;
1694                                 lockfs.lf_comlen = (ulong_t)lockfs32.lf_comlen;
1695                                 lockfs.lf_comment =
1696                                     (caddr_t)(uintptr_t)lockfs32.lf_comment;
1697                         }
1698 #endif /* _SYSCALL32_IMPL */
1699 
1700                         if (error =  ufs_fiolfss(vp, &lockfs_out))
1701                                 return (error);
1702                         lockfs.lf_lock = lockfs_out.lf_lock;
1703                         lockfs.lf_key = lockfs_out.lf_key;
1704                         lockfs.lf_flags = lockfs_out.lf_flags;
1705                         lockfs.lf_comlen = MIN(lockfs.lf_comlen,
1706                             lockfs_out.lf_comlen);
1707 
1708                         if ((flag & DATAMODEL_MASK) == DATAMODEL_NATIVE) {
1709                                 if (copyout(&lockfs, (caddr_t)arg,
1710                                     sizeof (struct lockfs)))
1711                                         return (EFAULT);
1712                         }
1713 #ifdef _SYSCALL32_IMPL
1714                         else {
1715                                 /* Translate LP64 to ILP32 lockfs */
1716                                 struct lockfs32 lockfs32;
1717                                 lockfs32.lf_lock = (uint32_t)lockfs.lf_lock;
1718                                 lockfs32.lf_flags = (uint32_t)lockfs.lf_flags;
1719                                 lockfs32.lf_key = (uint32_t)lockfs.lf_key;
1720                                 lockfs32.lf_comlen = (uint32_t)lockfs.lf_comlen;
1721                                 lockfs32.lf_comment =
1722                                     (uint32_t)(uintptr_t)lockfs.lf_comment;
1723                                 if (copyout(&lockfs32, (caddr_t)arg,
1724                                     sizeof (struct lockfs32)))
1725                                         return (EFAULT);
1726                         }
1727 #endif /* _SYSCALL32_IMPL */
1728 
1729                         if (lockfs.lf_comlen &&
1730                             lockfs.lf_comment && lockfs_out.lf_comment)
1731                                 if (copyout(lockfs_out.lf_comment,
1732                                     lockfs.lf_comment, lockfs.lf_comlen))
1733                                         return (EFAULT);
1734                         return (0);
1735 
1736                 case _FIOSATIME:
1737                         /*
1738                          * set access time
1739                          */
1740 
1741                         /*
1742                          * if mounted w/o atime, return quietly.
1743                          * I briefly thought about returning ENOSYS, but
1744                          * figured that most apps would consider this fatal
1745                          * but the idea is to make this as seamless as poss.
1746                          */
1747                         if (ufsvfsp->vfs_noatime)
1748                                 return (0);
1749 
1750                         error = ufs_lockfs_begin(ufsvfsp, &ulp,
1751                             ULOCKFS_SETATTR_MASK);
1752                         if (error)
1753                                 return (error);
1754 
1755                         if (ulp) {
1756                                 trans_size = (int)TOP_SETATTR_SIZE(VTOI(vp));
1757                                 TRANS_BEGIN_CSYNC(ufsvfsp, issync,
1758                                     TOP_SETATTR, trans_size);
1759                         }
1760 
1761                         error = ufs_fiosatime(vp, (struct timeval *)arg,
1762                             flag, cr);
1763 
1764                         if (ulp) {
1765                                 TRANS_END_CSYNC(ufsvfsp, error, issync,
1766                                     TOP_SETATTR, trans_size);
1767                                 ufs_lockfs_end(ulp);
1768                         }
1769                         return (error);
1770 
1771                 case _FIOSDIO:
1772                         /*
1773                          * set delayed-io
1774                          */
1775                         return (ufs_fiosdio(vp, (uint_t *)arg, flag, cr));
1776 
1777                 case _FIOGDIO:
1778                         /*
1779                          * get delayed-io
1780                          */
1781                         return (ufs_fiogdio(vp, (uint_t *)arg, flag, cr));
1782 
1783                 case _FIOIO:
1784                         /*
1785                          * inode open
1786                          */
1787                         error = ufs_lockfs_begin(ufsvfsp, &ulp,
1788                             ULOCKFS_VGET_MASK);
1789                         if (error)
1790                                 return (error);
1791 
1792                         error = ufs_fioio(vp, (struct fioio *)arg, flag, cr);
1793 
1794                         if (ulp) {
1795                                 ufs_lockfs_end(ulp);
1796                         }
1797                         return (error);
1798 
1799                 case _FIOFFS:
1800                         /*
1801                          * file system flush (push w/invalidate)
1802                          */
1803                         if ((caddr_t)arg != NULL)
1804                                 return (EINVAL);
1805                         return (ufs_fioffs(vp, NULL, cr));
1806 
1807                 case _FIOISBUSY:
1808                         /*
1809                          * Contract-private interface for Legato
1810                          * Purge this vnode from the DNLC and decide
1811                          * if this vnode is busy (*arg == 1) or not
1812                          * (*arg == 0)
1813                          */
1814                         if (secpolicy_fs_config(cr, ufsvfsp->vfs_vfs) != 0)
1815                                 return (EPERM);
1816                         error = ufs_fioisbusy(vp, (int *)arg, cr);
1817                         return (error);
1818 
1819                 case _FIODIRECTIO:
1820                         return (ufs_fiodirectio(vp, (int)arg, cr));
1821 
1822                 case _FIOTUNE:
1823                         /*
1824                          * Tune the file system (aka setting fs attributes)
1825                          */
1826                         error = ufs_lockfs_begin(ufsvfsp, &ulp,
1827                             ULOCKFS_SETATTR_MASK);
1828                         if (error)
1829                                 return (error);
1830 
1831                         error = ufs_fiotune(vp, (struct fiotune *)arg, cr);
1832 
1833                         if (ulp)
1834                                 ufs_lockfs_end(ulp);
1835                         return (error);
1836 
1837                 case _FIOLOGENABLE:
1838                         if (secpolicy_fs_config(cr, ufsvfsp->vfs_vfs) != 0)
1839                                 return (EPERM);
1840                         return (ufs_fiologenable(vp, (void *)arg, cr, flag));
1841 
1842                 case _FIOLOGDISABLE:
1843                         if (secpolicy_fs_config(cr, ufsvfsp->vfs_vfs) != 0)
1844                                 return (EPERM);
1845                         return (ufs_fiologdisable(vp, (void *)arg, cr, flag));
1846 
1847                 case _FIOISLOG:
1848                         return (ufs_fioislog(vp, (void *)arg, cr, flag));
1849 
1850                 case _FIOSNAPSHOTCREATE_MULTI:
1851                 {
1852                         struct fiosnapcreate_multi      fc, *fcp;
1853                         size_t  fcm_size;
1854 
1855                         if (copyin((void *)arg, &fc, sizeof (fc)))
1856                                 return (EFAULT);
1857                         if (fc.backfilecount > MAX_BACKFILE_COUNT)
1858                                 return (EINVAL);
1859                         fcm_size = sizeof (struct fiosnapcreate_multi) +
1860                             (fc.backfilecount - 1) * sizeof (int);
1861                         fcp = (struct fiosnapcreate_multi *)
1862                             kmem_alloc(fcm_size, KM_SLEEP);
1863                         if (copyin((void *)arg, fcp, fcm_size)) {
1864                                 kmem_free(fcp, fcm_size);
1865                                 return (EFAULT);
1866                         }
1867                         error = ufs_snap_create(vp, fcp, cr);
1868                         /*
1869                          * Do copyout even if there is an error because
1870                          * the details of error is stored in fcp.
1871                          */
1872                         if (copyout(fcp, (void *)arg, fcm_size))
1873                                 error = EFAULT;
1874                         kmem_free(fcp, fcm_size);
1875                         return (error);
1876                 }
1877 
1878                 case _FIOSNAPSHOTDELETE:
1879                 {
1880                         struct fiosnapdelete    fc;
1881 
1882                         if (copyin((void *)arg, &fc, sizeof (fc)))
1883                                 return (EFAULT);
1884                         error = ufs_snap_delete(vp, &fc, cr);
1885                         if (!error && copyout(&fc, (void *)arg, sizeof (fc)))
1886                                 error = EFAULT;
1887                         return (error);
1888                 }
1889 
1890                 case _FIOGETSUPERBLOCK:
1891                         if (copyout(fs, (void *)arg, SBSIZE))
1892                                 return (EFAULT);
1893                         return (0);
1894 
1895                 case _FIOGETMAXPHYS:
1896                         if (copyout(&maxphys, (void *)arg, sizeof (maxphys)))
1897                                 return (EFAULT);
1898                         return (0);
1899 
1900                 /*
1901                  * The following 3 ioctls are for TSufs support
1902                  * although could potentially be used elsewhere
1903                  */
1904                 case _FIO_SET_LUFS_DEBUG:
1905                         if (secpolicy_fs_config(cr, ufsvfsp->vfs_vfs) != 0)
1906                                 return (EPERM);
1907                         lufs_debug = (uint32_t)arg;
1908                         return (0);
1909 
1910                 case _FIO_SET_LUFS_ERROR:
1911                         if (secpolicy_fs_config(cr, ufsvfsp->vfs_vfs) != 0)
1912                                 return (EPERM);
1913                         TRANS_SETERROR(ufsvfsp);
1914                         return (0);
1915 
1916                 case _FIO_GET_TOP_STATS:
1917                 {
1918                         fio_lufs_stats_t *ls;
1919                         ml_unit_t *ul = ufsvfsp->vfs_log;
1920 
1921                         ls = kmem_zalloc(sizeof (*ls), KM_SLEEP);
1922                         ls->ls_debug = ul->un_debug; /* return debug value */
1923                         /* Copy stucture if statistics are being kept */
1924                         if (ul->un_logmap->mtm_tops) {
1925                                 ls->ls_topstats = *(ul->un_logmap->mtm_tops);
1926                         }
1927                         error = 0;
1928                         if (copyout(ls, (void *)arg, sizeof (*ls)))
1929                                 error = EFAULT;
1930                         kmem_free(ls, sizeof (*ls));
1931                         return (error);
1932                 }
1933 
1934                 case _FIO_SEEK_DATA:
1935                 case _FIO_SEEK_HOLE:
1936                         if (ddi_copyin((void *)arg, &off, sizeof (off), flag))
1937                                 return (EFAULT);
1938                         /* offset paramater is in/out */
1939                         error = ufs_fio_holey(vp, cmd, &off);
1940                         if (error)
1941                                 return (error);
1942                         if (ddi_copyout(&off, (void *)arg, sizeof (off), flag))
1943                                 return (EFAULT);
1944                         return (0);
1945 
1946                 case _FIO_COMPRESSED:
1947                 {
1948                         /*
1949                          * This is a project private ufs ioctl() to mark
1950                          * the inode as that belonging to a compressed
1951                          * file. This is used to mark individual
1952                          * compressed files in a miniroot archive.
1953                          * The files compressed in this manner are
1954                          * automatically decompressed by the dcfs filesystem
1955                          * (via an interception in ufs_lookup - see decompvp())
1956                          * which is layered on top of ufs on a system running
1957                          * from the archive. See uts/common/fs/dcfs for details.
1958                          * This ioctl only marks the file as compressed - the
1959                          * actual compression is done by fiocompress (a
1960                          * userland utility) which invokes this ioctl().
1961                          */
1962                         struct inode *ip = VTOI(vp);
1963 
1964                         error = ufs_lockfs_begin(ufsvfsp, &ulp,
1965                             ULOCKFS_SETATTR_MASK);
1966                         if (error)
1967                                 return (error);
1968 
1969                         if (ulp) {
1970                                 TRANS_BEGIN_ASYNC(ufsvfsp, TOP_IUPDAT,
1971                                     TOP_IUPDAT_SIZE(ip));
1972                         }
1973 
1974                         error = ufs_mark_compressed(vp);
1975 
1976                         if (ulp) {
1977                                 TRANS_END_ASYNC(ufsvfsp, TOP_IUPDAT,
1978                                     TOP_IUPDAT_SIZE(ip));
1979                                 ufs_lockfs_end(ulp);
1980                         }
1981 
1982                         return (error);
1983 
1984                 }
1985 
1986                 default:
1987                         return (ENOTTY);
1988         }
1989 }
1990 
1991 
1992 /* ARGSUSED */
1993 static int
1994 ufs_getattr(struct vnode *vp, struct vattr *vap, int flags,
1995         struct cred *cr, caller_context_t *ct)
1996 {
1997         struct inode *ip = VTOI(vp);
1998         struct ufsvfs *ufsvfsp;
1999         int err;
2000 
2001         if (vap->va_mask == AT_SIZE) {
2002                 /*
2003                  * for performance, if only the size is requested don't bother
2004                  * with anything else.
2005                  */
2006                 UFS_GET_ISIZE(&vap->va_size, ip);
2007                 return (0);
2008         }
2009 
2010         /*
2011          * inlined lockfs checks
2012          */
2013         ufsvfsp = ip->i_ufsvfs;
2014         if ((ufsvfsp == NULL) || ULOCKFS_IS_HLOCK(&ufsvfsp->vfs_ulockfs)) {
2015                 err = EIO;
2016                 goto out;
2017         }
2018 
2019         rw_enter(&ip->i_contents, RW_READER);
2020         /*
2021          * Return all the attributes.  This should be refined so
2022          * that it only returns what's asked for.
2023          */
2024 
2025         /*
2026          * Copy from inode table.
2027          */
2028         vap->va_type = vp->v_type;
2029         vap->va_mode = ip->i_mode & MODEMASK;
2030         /*
2031          * If there is an ACL and there is a mask entry, then do the
2032          * extra work that completes the equivalent of an acltomode(3)
2033          * call.  According to POSIX P1003.1e, the acl mask should be
2034          * returned in the group permissions field.
2035          *
2036          * - start with the original permission and mode bits (from above)
2037          * - clear the group owner bits
2038          * - add in the mask bits.
2039          */
2040         if (ip->i_ufs_acl && ip->i_ufs_acl->aclass.acl_ismask) {
2041                 vap->va_mode &= ~((VREAD | VWRITE | VEXEC) >> 3);
2042                 vap->va_mode |=
2043                     (ip->i_ufs_acl->aclass.acl_maskbits & PERMMASK) << 3;
2044         }
2045         vap->va_uid = ip->i_uid;
2046         vap->va_gid = ip->i_gid;
2047         vap->va_fsid = ip->i_dev;
2048         vap->va_nodeid = (ino64_t)ip->i_number;
2049         vap->va_nlink = ip->i_nlink;
2050         vap->va_size = ip->i_size;
2051         if (vp->v_type == VCHR || vp->v_type == VBLK)
2052                 vap->va_rdev = ip->i_rdev;
2053         else
2054                 vap->va_rdev = 0;    /* not a b/c spec. */
2055         mutex_enter(&ip->i_tlock);
2056         ITIMES_NOLOCK(ip);      /* mark correct time in inode */
2057         vap->va_seq = ip->i_seq;
2058         vap->va_atime.tv_sec = (time_t)ip->i_atime.tv_sec;
2059         vap->va_atime.tv_nsec = ip->i_atime.tv_usec*1000;
2060         vap->va_mtime.tv_sec = (time_t)ip->i_mtime.tv_sec;
2061         vap->va_mtime.tv_nsec = ip->i_mtime.tv_usec*1000;
2062         vap->va_ctime.tv_sec = (time_t)ip->i_ctime.tv_sec;
2063         vap->va_ctime.tv_nsec = ip->i_ctime.tv_usec*1000;
2064         mutex_exit(&ip->i_tlock);
2065 
2066         switch (ip->i_mode & IFMT) {
2067 
2068         case IFBLK:
2069                 vap->va_blksize = MAXBSIZE;          /* was BLKDEV_IOSIZE */
2070                 break;
2071 
2072         case IFCHR:
2073                 vap->va_blksize = MAXBSIZE;
2074                 break;
2075 
2076         default:
2077                 vap->va_blksize = ip->i_fs->fs_bsize;
2078                 break;
2079         }
2080         vap->va_nblocks = (fsblkcnt64_t)ip->i_blocks;
2081         rw_exit(&ip->i_contents);
2082         err = 0;
2083 
2084 out:
2085         return (err);
2086 }
2087 
2088 /*
2089  * Special wrapper to provide a callback for secpolicy_vnode_setattr().
2090  * The i_contents lock is already held by the caller and we need to
2091  * declare the inode as 'void *' argument.
2092  */
2093 static int
2094 ufs_priv_access(void *vip, int mode, struct cred *cr)
2095 {
2096         struct inode *ip = vip;
2097 
2098         return (ufs_iaccess(ip, mode, cr, 0));
2099 }
2100 
2101 /*ARGSUSED4*/
2102 static int
2103 ufs_setattr(
2104         struct vnode *vp,
2105         struct vattr *vap,
2106         int flags,
2107         struct cred *cr,
2108         caller_context_t *ct)
2109 {
2110         struct inode *ip = VTOI(vp);
2111         struct ufsvfs *ufsvfsp = ip->i_ufsvfs;
2112         struct fs *fs;
2113         struct ulockfs *ulp;
2114         char *errmsg1;
2115         char *errmsg2;
2116         long blocks;
2117         long int mask = vap->va_mask;
2118         size_t len1, len2;
2119         int issync;
2120         int trans_size;
2121         int dotrans;
2122         int dorwlock;
2123         int error;
2124         int owner_change;
2125         int dodqlock;
2126         timestruc_t now;
2127         vattr_t oldva;
2128         int retry = 1;
2129         int indeadlock;
2130 
2131         /*
2132          * Cannot set these attributes.
2133          */
2134         if ((mask & AT_NOSET) || (mask & AT_XVATTR))
2135                 return (EINVAL);
2136 
2137         /*
2138          * check for forced unmount
2139          */
2140         if (ufsvfsp == NULL)
2141                 return (EIO);
2142 
2143         fs = ufsvfsp->vfs_fs;
2144         if (fs->fs_ronly != 0)
2145                 return (EROFS);
2146 
2147 again:
2148         errmsg1 = NULL;
2149         errmsg2 = NULL;
2150         dotrans = 0;
2151         dorwlock = 0;
2152         dodqlock = 0;
2153 
2154         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_SETATTR_MASK);
2155         if (error)
2156                 goto out;
2157 
2158         /*
2159          * Acquire i_rwlock before TRANS_BEGIN_CSYNC() if this is a file.
2160          * This follows the protocol for read()/write().
2161          */
2162         if (vp->v_type != VDIR) {
2163                 /*
2164                  * ufs_tryirwlock uses rw_tryenter and checks for SLOCK to
2165                  * avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
2166                  * possible, retries the operation.
2167                  */
2168                 ufs_tryirwlock(&ip->i_rwlock, RW_WRITER, retry_file);
2169                 if (indeadlock) {
2170                         if (ulp)
2171                                 ufs_lockfs_end(ulp);
2172                         goto again;
2173                 }
2174                 dorwlock = 1;
2175         }
2176 
2177         /*
2178          * Truncate file.  Must have write permission and not be a directory.
2179          */
2180         if (mask & AT_SIZE) {
2181                 rw_enter(&ip->i_contents, RW_WRITER);
2182                 if (vp->v_type == VDIR) {
2183                         error = EISDIR;
2184                         goto update_inode;
2185                 }
2186                 if (error = ufs_iaccess(ip, IWRITE, cr, 0))
2187                         goto update_inode;
2188 
2189                 rw_exit(&ip->i_contents);
2190                 error = TRANS_ITRUNC(ip, vap->va_size, 0, cr);
2191                 if (error) {
2192                         rw_enter(&ip->i_contents, RW_WRITER);
2193                         goto update_inode;
2194                 }
2195 
2196                 if (error == 0) {
2197                         if (vap->va_size) {
2198                                 vnevent_truncate(vp, ct);
2199                         } else {
2200                                 vnevent_resize(vp, ct);
2201                         }
2202                 }
2203         }
2204 
2205         if (ulp) {
2206                 trans_size = (int)TOP_SETATTR_SIZE(ip);
2207                 TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_SETATTR, trans_size);
2208                 ++dotrans;
2209         }
2210 
2211         /*
2212          * Acquire i_rwlock after TRANS_BEGIN_CSYNC() if this is a directory.
2213          * This follows the protocol established by
2214          * ufs_link/create/remove/rename/mkdir/rmdir/symlink.
2215          */
2216         if (vp->v_type == VDIR) {
2217                 ufs_tryirwlock_trans(&ip->i_rwlock, RW_WRITER, TOP_SETATTR,
2218                     retry_dir);
2219                 if (indeadlock)
2220                         goto again;
2221                 dorwlock = 1;
2222         }
2223 
2224         /*
2225          * Grab quota lock if we are changing the file's owner.
2226          */
2227         if (mask & AT_UID) {
2228                 rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
2229                 dodqlock = 1;
2230         }
2231         rw_enter(&ip->i_contents, RW_WRITER);
2232 
2233         oldva.va_mode = ip->i_mode;
2234         oldva.va_uid = ip->i_uid;
2235         oldva.va_gid = ip->i_gid;
2236 
2237         vap->va_mask &= ~AT_SIZE;
2238 
2239         error = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags,
2240             ufs_priv_access, ip);
2241         if (error)
2242                 goto update_inode;
2243 
2244         mask = vap->va_mask;
2245 
2246         /*
2247          * Change file access modes.
2248          */
2249         if (mask & AT_MODE) {
2250                 ip->i_mode = (ip->i_mode & IFMT) | (vap->va_mode & ~IFMT);
2251                 TRANS_INODE(ufsvfsp, ip);
2252                 ip->i_flag |= ICHG;
2253                 if (stickyhack) {
2254                         mutex_enter(&vp->v_lock);
2255                         if ((ip->i_mode & (ISVTX | IEXEC | IFDIR)) == ISVTX)
2256                                 vp->v_flag |= VSWAPLIKE;
2257                         else
2258                                 vp->v_flag &= ~VSWAPLIKE;
2259                         mutex_exit(&vp->v_lock);
2260                 }
2261         }
2262         if (mask & (AT_UID|AT_GID)) {
2263                 if (mask & AT_UID) {
2264                         /*
2265                          * Don't change ownership of the quota inode.
2266                          */
2267                         if (ufsvfsp->vfs_qinod == ip) {
2268                                 ASSERT(ufsvfsp->vfs_qflags & MQ_ENABLED);
2269                                 error = EINVAL;
2270                                 goto update_inode;
2271                         }
2272 
2273                         /*
2274                          * No real ownership change.
2275                          */
2276                         if (ip->i_uid == vap->va_uid) {
2277                                 blocks = 0;
2278                                 owner_change = 0;
2279                         }
2280                         /*
2281                          * Remove the blocks and the file, from the old user's
2282                          * quota.
2283                          */
2284                         else {
2285                                 blocks = ip->i_blocks;
2286                                 owner_change = 1;
2287 
2288                                 (void) chkdq(ip, -blocks, /* force */ 1, cr,
2289                                     (char **)NULL, (size_t *)NULL);
2290                                 (void) chkiq(ufsvfsp, /* change */ -1, ip,
2291                                     (uid_t)ip->i_uid, /* force */ 1, cr,
2292                                     (char **)NULL, (size_t *)NULL);
2293                                 dqrele(ip->i_dquot);
2294                         }
2295 
2296                         ip->i_uid = vap->va_uid;
2297 
2298                         /*
2299                          * There is a real ownership change.
2300                          */
2301                         if (owner_change) {
2302                                 /*
2303                                  * Add the blocks and the file to the new
2304                                  * user's quota.
2305                                  */
2306                                 ip->i_dquot = getinoquota(ip);
2307                                 (void) chkdq(ip, blocks, /* force */ 1, cr,
2308                                     &errmsg1, &len1);
2309                                 (void) chkiq(ufsvfsp, /* change */ 1,
2310                                     (struct inode *)NULL, (uid_t)ip->i_uid,
2311                                     /* force */ 1, cr, &errmsg2, &len2);
2312                         }
2313                 }
2314                 if (mask & AT_GID) {
2315                         ip->i_gid = vap->va_gid;
2316                 }
2317                 TRANS_INODE(ufsvfsp, ip);
2318                 ip->i_flag |= ICHG;
2319         }
2320         /*
2321          * Change file access or modified times.
2322          */
2323         if (mask & (AT_ATIME|AT_MTIME)) {
2324                 /* Check that the time value is within ufs range */
2325                 if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
2326                     ((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) {
2327                         error = EOVERFLOW;
2328                         goto update_inode;
2329                 }
2330 
2331                 /*
2332                  * if the "noaccess" mount option is set and only atime
2333                  * update is requested, do nothing. No error is returned.
2334                  */
2335                 if ((ufsvfsp->vfs_noatime) &&
2336                     ((mask & (AT_ATIME|AT_MTIME)) == AT_ATIME))
2337                         goto skip_atime;
2338 
2339                 if (mask & AT_ATIME) {
2340                         ip->i_atime.tv_sec = vap->va_atime.tv_sec;
2341                         ip->i_atime.tv_usec = vap->va_atime.tv_nsec / 1000;
2342                         ip->i_flag &= ~IACC;
2343                 }
2344                 if (mask & AT_MTIME) {
2345                         ip->i_mtime.tv_sec = vap->va_mtime.tv_sec;
2346                         ip->i_mtime.tv_usec = vap->va_mtime.tv_nsec / 1000;
2347                         gethrestime(&now);
2348                         if (now.tv_sec > TIME32_MAX) {
2349                                 /*
2350                                  * In 2038, ctime sticks forever..
2351                                  */
2352                                 ip->i_ctime.tv_sec = TIME32_MAX;
2353                                 ip->i_ctime.tv_usec = 0;
2354                         } else {
2355                                 ip->i_ctime.tv_sec = now.tv_sec;
2356                                 ip->i_ctime.tv_usec = now.tv_nsec / 1000;
2357                         }
2358                         ip->i_flag &= ~(IUPD|ICHG);
2359                         ip->i_flag |= IMODTIME;
2360                 }
2361                 TRANS_INODE(ufsvfsp, ip);
2362                 ip->i_flag |= IMOD;
2363         }
2364 
2365 skip_atime:
2366         /*
2367          * The presence of a shadow inode may indicate an ACL, but does
2368          * not imply an ACL.  Future FSD types should be handled here too
2369          * and check for the presence of the attribute-specific data
2370          * before referencing it.
2371          */
2372         if (ip->i_shadow) {
2373                 /*
2374                  * XXX if ufs_iupdat is changed to sandbagged write fix
2375                  * ufs_acl_setattr to push ip to keep acls consistent
2376                  *
2377                  * Suppress out of inodes messages if we will retry.
2378                  */
2379                 if (retry)
2380                         ip->i_flag |= IQUIET;
2381                 error = ufs_acl_setattr(ip, vap, cr);
2382                 ip->i_flag &= ~IQUIET;
2383         }
2384 
2385 update_inode:
2386         /*
2387          * Setattr always increases the sequence number
2388          */
2389         ip->i_seq++;
2390 
2391         /*
2392          * if nfsd and not logging; push synchronously
2393          */
2394         if ((curthread->t_flag & T_DONTPEND) && !TRANS_ISTRANS(ufsvfsp)) {
2395                 ufs_iupdat(ip, 1);
2396         } else {
2397                 ITIMES_NOLOCK(ip);
2398         }
2399 
2400         rw_exit(&ip->i_contents);
2401         if (dodqlock) {
2402                 rw_exit(&ufsvfsp->vfs_dqrwlock);
2403         }
2404         if (dorwlock)
2405                 rw_exit(&ip->i_rwlock);
2406 
2407         if (ulp) {
2408                 if (dotrans) {
2409                         int terr = 0;
2410                         TRANS_END_CSYNC(ufsvfsp, terr, issync, TOP_SETATTR,
2411                             trans_size);
2412                         if (error == 0)
2413                                 error = terr;
2414                 }
2415                 ufs_lockfs_end(ulp);
2416         }
2417 out:
2418         /*
2419          * If out of inodes or blocks, see if we can free something
2420          * up from the delete queue.
2421          */
2422         if ((error == ENOSPC) && retry && TRANS_ISTRANS(ufsvfsp)) {
2423                 ufs_delete_drain_wait(ufsvfsp, 1);
2424                 retry = 0;
2425                 if (errmsg1 != NULL)
2426                         kmem_free(errmsg1, len1);
2427                 if (errmsg2 != NULL)
2428                         kmem_free(errmsg2, len2);
2429                 goto again;
2430         }
2431         if (errmsg1 != NULL) {
2432                 uprintf(errmsg1);
2433                 kmem_free(errmsg1, len1);
2434         }
2435         if (errmsg2 != NULL) {
2436                 uprintf(errmsg2);
2437                 kmem_free(errmsg2, len2);
2438         }
2439         return (error);
2440 }
2441 
2442 /*ARGSUSED*/
2443 static int
2444 ufs_access(struct vnode *vp, int mode, int flags, struct cred *cr,
2445         caller_context_t *ct)
2446 {
2447         struct inode *ip = VTOI(vp);
2448 
2449         if (ip->i_ufsvfs == NULL)
2450                 return (EIO);
2451 
2452         /*
2453          * The ufs_iaccess function wants to be called with
2454          * mode bits expressed as "ufs specific" bits.
2455          * I.e., VWRITE|VREAD|VEXEC do not make sense to
2456          * ufs_iaccess() but IWRITE|IREAD|IEXEC do.
2457          * But since they're the same we just pass the vnode mode
2458          * bit but just verify that assumption at compile time.
2459          */
2460 #if IWRITE != VWRITE || IREAD != VREAD || IEXEC != VEXEC
2461 #error "ufs_access needs to map Vmodes to Imodes"
2462 #endif
2463         return (ufs_iaccess(ip, mode, cr, 1));
2464 }
2465 
2466 /* ARGSUSED */
2467 static int
2468 ufs_readlink(struct vnode *vp, struct uio *uiop, struct cred *cr,
2469         caller_context_t *ct)
2470 {
2471         struct inode *ip = VTOI(vp);
2472         struct ufsvfs *ufsvfsp;
2473         struct ulockfs *ulp;
2474         int error;
2475         int fastsymlink;
2476 
2477         if (vp->v_type != VLNK) {
2478                 error = EINVAL;
2479                 goto nolockout;
2480         }
2481 
2482         /*
2483          * If the symbolic link is empty there is nothing to read.
2484          * Fast-track these empty symbolic links
2485          */
2486         if (ip->i_size == 0) {
2487                 error = 0;
2488                 goto nolockout;
2489         }
2490 
2491         ufsvfsp = ip->i_ufsvfs;
2492         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_READLINK_MASK);
2493         if (error)
2494                 goto nolockout;
2495         /*
2496          * The ip->i_rwlock protects the data blocks used for FASTSYMLINK
2497          */
2498 again:
2499         fastsymlink = 0;
2500         if (ip->i_flag & IFASTSYMLNK) {
2501                 rw_enter(&ip->i_rwlock, RW_READER);
2502                 rw_enter(&ip->i_contents, RW_READER);
2503                 if (ip->i_flag & IFASTSYMLNK) {
2504                         if (!ULOCKFS_IS_NOIACC(ITOUL(ip)) &&
2505                             (ip->i_fs->fs_ronly == 0) &&
2506                             (!ufsvfsp->vfs_noatime)) {
2507                                 mutex_enter(&ip->i_tlock);
2508                                 ip->i_flag |= IACC;
2509                                 mutex_exit(&ip->i_tlock);
2510                         }
2511                         error = uiomove((caddr_t)&ip->i_db[1],
2512                             MIN(ip->i_size, uiop->uio_resid),
2513                             UIO_READ, uiop);
2514                         ITIMES(ip);
2515                         ++fastsymlink;
2516                 }
2517                 rw_exit(&ip->i_contents);
2518                 rw_exit(&ip->i_rwlock);
2519         }
2520         if (!fastsymlink) {
2521                 ssize_t size;   /* number of bytes read  */
2522                 caddr_t basep;  /* pointer to input data */
2523                 ino_t ino;
2524                 long  igen;
2525                 struct uio tuio;        /* temp uio struct */
2526                 struct uio *tuiop;
2527                 iovec_t tiov;           /* temp iovec struct */
2528                 char kbuf[FSL_SIZE];    /* buffer to hold fast symlink */
2529                 int tflag = 0;          /* flag to indicate temp vars used */
2530 
2531                 ino = ip->i_number;
2532                 igen = ip->i_gen;
2533                 size = uiop->uio_resid;
2534                 basep = uiop->uio_iov->iov_base;
2535                 tuiop = uiop;
2536 
2537                 rw_enter(&ip->i_rwlock, RW_WRITER);
2538                 rw_enter(&ip->i_contents, RW_WRITER);
2539                 if (ip->i_flag & IFASTSYMLNK) {
2540                         rw_exit(&ip->i_contents);
2541                         rw_exit(&ip->i_rwlock);
2542                         goto again;
2543                 }
2544 
2545                 /* can this be a fast symlink and is it a user buffer? */
2546                 if (ip->i_size <= FSL_SIZE &&
2547                     (uiop->uio_segflg == UIO_USERSPACE ||
2548                     uiop->uio_segflg == UIO_USERISPACE)) {
2549 
2550                         bzero(&tuio, sizeof (struct uio));
2551                         /*
2552                          * setup a kernel buffer to read link into.  this
2553                          * is to fix a race condition where the user buffer
2554                          * got corrupted before copying it into the inode.
2555                          */
2556                         size = ip->i_size;
2557                         tiov.iov_len = size;
2558                         tiov.iov_base = kbuf;
2559                         tuio.uio_iov = &tiov;
2560                         tuio.uio_iovcnt = 1;
2561                         tuio.uio_offset = uiop->uio_offset;
2562                         tuio.uio_segflg = UIO_SYSSPACE;
2563                         tuio.uio_fmode = uiop->uio_fmode;
2564                         tuio.uio_extflg = uiop->uio_extflg;
2565                         tuio.uio_limit = uiop->uio_limit;
2566                         tuio.uio_resid = size;
2567 
2568                         basep = tuio.uio_iov->iov_base;
2569                         tuiop = &tuio;
2570                         tflag = 1;
2571                 }
2572 
2573                 error = rdip(ip, tuiop, 0, cr);
2574                 if (!(error == 0 && ip->i_number == ino && ip->i_gen == igen)) {
2575                         rw_exit(&ip->i_contents);
2576                         rw_exit(&ip->i_rwlock);
2577                         goto out;
2578                 }
2579 
2580                 if (tflag == 0)
2581                         size -= uiop->uio_resid;
2582 
2583                 if ((tflag == 0 && ip->i_size <= FSL_SIZE &&
2584                     ip->i_size == size) || (tflag == 1 &&
2585                     tuio.uio_resid == 0)) {
2586                         error = kcopy(basep, &ip->i_db[1], ip->i_size);
2587                         if (error == 0) {
2588                                 ip->i_flag |= IFASTSYMLNK;
2589                                 /*
2590                                  * free page
2591                                  */
2592                                 (void) VOP_PUTPAGE(ITOV(ip),
2593                                     (offset_t)0, PAGESIZE,
2594                                     (B_DONTNEED | B_FREE | B_FORCE | B_ASYNC),
2595                                     cr, ct);
2596                         } else {
2597                                 int i;
2598                                 /* error, clear garbage left behind */
2599                                 for (i = 1; i < NDADDR; i++)
2600                                         ip->i_db[i] = 0;
2601                                 for (i = 0; i < NIADDR; i++)
2602                                         ip->i_ib[i] = 0;
2603                         }
2604                 }
2605                 if (tflag == 1) {
2606                         /* now, copy it into the user buffer */
2607                         error = uiomove((caddr_t)kbuf,
2608                             MIN(size, uiop->uio_resid),
2609                             UIO_READ, uiop);
2610                 }
2611                 rw_exit(&ip->i_contents);
2612                 rw_exit(&ip->i_rwlock);
2613         }
2614 out:
2615         if (ulp) {
2616                 ufs_lockfs_end(ulp);
2617         }
2618 nolockout:
2619         return (error);
2620 }
2621 
2622 /* ARGSUSED */
2623 static int
2624 ufs_fsync(struct vnode *vp, int syncflag, struct cred *cr,
2625         caller_context_t *ct)
2626 {
2627         struct inode *ip = VTOI(vp);
2628         struct ufsvfs *ufsvfsp = ip->i_ufsvfs;
2629         struct ulockfs *ulp;
2630         int error;
2631 
2632         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_FSYNC_MASK);
2633         if (error)
2634                 return (error);
2635 
2636         if (TRANS_ISTRANS(ufsvfsp)) {
2637                 /*
2638                  * First push out any data pages
2639                  */
2640                 if (vn_has_cached_data(vp) && !(syncflag & FNODSYNC) &&
2641                     (vp->v_type != VCHR) && !(IS_SWAPVP(vp))) {
2642                         error = VOP_PUTPAGE(vp, (offset_t)0, (size_t)0,
2643                             0, CRED(), ct);
2644                         if (error)
2645                                 goto out;
2646                 }
2647 
2648                 /*
2649                  * Delta any delayed inode times updates
2650                  * and push inode to log.
2651                  * All other inode deltas will have already been delta'd
2652                  * and will be pushed during the commit.
2653                  */
2654                 if (!(syncflag & FDSYNC) &&
2655                     ((ip->i_flag & (IMOD|IMODACC)) == IMODACC)) {
2656                         if (ulp) {
2657                                 TRANS_BEGIN_ASYNC(ufsvfsp, TOP_FSYNC,
2658                                     TOP_SYNCIP_SIZE);
2659                         }
2660                         rw_enter(&ip->i_contents, RW_READER);
2661                         mutex_enter(&ip->i_tlock);
2662                         ip->i_flag &= ~IMODTIME;
2663                         mutex_exit(&ip->i_tlock);
2664                         ufs_iupdat(ip, I_SYNC);
2665                         rw_exit(&ip->i_contents);
2666                         if (ulp) {
2667                                 TRANS_END_ASYNC(ufsvfsp, TOP_FSYNC,
2668                                     TOP_SYNCIP_SIZE);
2669                         }
2670                 }
2671 
2672                 /*
2673                  * Commit the Moby transaction
2674                  *
2675                  * Deltas have already been made so we just need to
2676                  * commit them with a synchronous transaction.
2677                  * TRANS_BEGIN_SYNC() will return an error
2678                  * if there are no deltas to commit, for an
2679                  * empty transaction.
2680                  */
2681                 if (ulp) {
2682                         TRANS_BEGIN_SYNC(ufsvfsp, TOP_FSYNC, TOP_COMMIT_SIZE,
2683                             error);
2684                         if (error) {
2685                                 error = 0; /* commit wasn't needed */
2686                                 goto out;
2687                         }
2688                         TRANS_END_SYNC(ufsvfsp, error, TOP_FSYNC,
2689                             TOP_COMMIT_SIZE);
2690                 }
2691         } else {        /* not logging */
2692                 if (!(IS_SWAPVP(vp)))
2693                         if (syncflag & FNODSYNC) {
2694                                 /* Just update the inode only */
2695                                 TRANS_IUPDAT(ip, 1);
2696                                 error = 0;
2697                         } else if (syncflag & FDSYNC)
2698                                 /* Do data-synchronous writes */
2699                                 error = TRANS_SYNCIP(ip, 0, I_DSYNC, TOP_FSYNC);
2700                         else
2701                                 /* Do synchronous writes */
2702                                 error = TRANS_SYNCIP(ip, 0, I_SYNC, TOP_FSYNC);
2703 
2704                 rw_enter(&ip->i_contents, RW_WRITER);
2705                 if (!error)
2706                         error = ufs_sync_indir(ip);
2707                 rw_exit(&ip->i_contents);
2708         }
2709 out:
2710         if (ulp) {
2711                 ufs_lockfs_end(ulp);
2712         }
2713         return (error);
2714 }
2715 
2716 /*ARGSUSED*/
2717 static void
2718 ufs_inactive(struct vnode *vp, struct cred *cr, caller_context_t *ct)
2719 {
2720         ufs_iinactive(VTOI(vp));
2721 }
2722 
2723 /*
2724  * Unix file system operations having to do with directory manipulation.
2725  */
2726 int ufs_lookup_idle_count = 2;  /* Number of inodes to idle each time */
2727 /* ARGSUSED */
2728 static int
2729 ufs_lookup(struct vnode *dvp, char *nm, struct vnode **vpp,
2730         struct pathname *pnp, int flags, struct vnode *rdir, struct cred *cr,
2731         caller_context_t *ct, int *direntflags, pathname_t *realpnp)
2732 {
2733         struct inode *ip;
2734         struct inode *sip;
2735         struct inode *xip;
2736         struct ufsvfs *ufsvfsp;
2737         struct ulockfs *ulp;
2738         struct vnode *vp;
2739         int error;
2740 
2741         /*
2742          * Check flags for type of lookup (regular file or attribute file)
2743          */
2744 
2745         ip = VTOI(dvp);
2746 
2747         if (flags & LOOKUP_XATTR) {
2748 
2749                 /*
2750                  * If not mounted with XATTR support then return EINVAL
2751                  */
2752 
2753                 if (!(ip->i_ufsvfs->vfs_vfs->vfs_flag & VFS_XATTR))
2754                         return (EINVAL);
2755                 /*
2756                  * We don't allow recursive attributes...
2757                  * Maybe someday we will.
2758                  */
2759                 if ((ip->i_cflags & IXATTR)) {
2760                         return (EINVAL);
2761                 }
2762 
2763                 if ((vp = dnlc_lookup(dvp, XATTR_DIR_NAME)) == NULL) {
2764                         error = ufs_xattr_getattrdir(dvp, &sip, flags, cr);
2765                         if (error) {
2766                                 *vpp = NULL;
2767                                 goto out;
2768                         }
2769 
2770                         vp = ITOV(sip);
2771                         dnlc_update(dvp, XATTR_DIR_NAME, vp);
2772                 }
2773 
2774                 /*
2775                  * Check accessibility of directory.
2776                  */
2777                 if (vp == DNLC_NO_VNODE) {
2778                         VN_RELE(vp);
2779                         error = ENOENT;
2780                         goto out;
2781                 }
2782                 if ((error = ufs_iaccess(VTOI(vp), IEXEC, cr, 1)) != 0) {
2783                         VN_RELE(vp);
2784                         goto out;
2785                 }
2786 
2787                 *vpp = vp;
2788                 return (0);
2789         }
2790 
2791         /*
2792          * Check for a null component, which we should treat as
2793          * looking at dvp from within it's parent, so we don't
2794          * need a call to ufs_iaccess(), as it has already been
2795          * done.
2796          */
2797         if (nm[0] == 0) {
2798                 VN_HOLD(dvp);
2799                 error = 0;
2800                 *vpp = dvp;
2801                 goto out;
2802         }
2803 
2804         /*
2805          * Check for "." ie itself. this is a quick check and
2806          * avoids adding "." into the dnlc (which have been seen
2807          * to occupy >10% of the cache).
2808          */
2809         if ((nm[0] == '.') && (nm[1] == 0)) {
2810                 /*
2811                  * Don't return without checking accessibility
2812                  * of the directory. We only need the lock if
2813                  * we are going to return it.
2814                  */
2815                 if ((error = ufs_iaccess(ip, IEXEC, cr, 1)) == 0) {
2816                         VN_HOLD(dvp);
2817                         *vpp = dvp;
2818                 }
2819                 goto out;
2820         }
2821 
2822         /*
2823          * Fast path: Check the directory name lookup cache.
2824          */
2825         if (vp = dnlc_lookup(dvp, nm)) {
2826                 /*
2827                  * Check accessibility of directory.
2828                  */
2829                 if ((error = ufs_iaccess(ip, IEXEC, cr, 1)) != 0) {
2830                         VN_RELE(vp);
2831                         goto out;
2832                 }
2833                 if (vp == DNLC_NO_VNODE) {
2834                         VN_RELE(vp);
2835                         error = ENOENT;
2836                         goto out;
2837                 }
2838                 xip = VTOI(vp);
2839                 ulp = NULL;
2840                 goto fastpath;
2841         }
2842 
2843         /*
2844          * Keep the idle queue from getting too long by
2845          * idling two inodes before attempting to allocate another.
2846          *    This operation must be performed before entering
2847          *    lockfs or a transaction.
2848          */
2849         if (ufs_idle_q.uq_ne > ufs_idle_q.uq_hiwat)
2850                 if ((curthread->t_flag & T_DONTBLOCK) == 0) {
2851                         ins.in_lidles.value.ul += ufs_lookup_idle_count;
2852                         ufs_idle_some(ufs_lookup_idle_count);
2853                 }
2854 
2855 retry_lookup:
2856         /*
2857          * Check accessibility of directory.
2858          */
2859         if (error = ufs_diraccess(ip, IEXEC, cr))
2860                 goto out;
2861 
2862         ufsvfsp = ip->i_ufsvfs;
2863         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_LOOKUP_MASK);
2864         if (error)
2865                 goto out;
2866 
2867         error = ufs_dirlook(ip, nm, &xip, cr, 1, 0);
2868 
2869 fastpath:
2870         if (error == 0) {
2871                 ip = xip;
2872                 *vpp = ITOV(ip);
2873 
2874                 /*
2875                  * If vnode is a device return special vnode instead.
2876                  */
2877                 if (IS_DEVVP(*vpp)) {
2878                         struct vnode *newvp;
2879 
2880                         newvp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type,
2881                             cr);
2882                         VN_RELE(*vpp);
2883                         if (newvp == NULL)
2884                                 error = ENOSYS;
2885                         else
2886                                 *vpp = newvp;
2887                 } else if (ip->i_cflags & ICOMPRESS) {
2888                         struct vnode *newvp;
2889 
2890                         /*
2891                          * Compressed file, substitute dcfs vnode
2892                          */
2893                         newvp = decompvp(*vpp, cr, ct);
2894                         VN_RELE(*vpp);
2895                         if (newvp == NULL)
2896                                 error = ENOSYS;
2897                         else
2898                                 *vpp = newvp;
2899                 }
2900         }
2901         if (ulp) {
2902                 ufs_lockfs_end(ulp);
2903         }
2904 
2905         if (error == EAGAIN)
2906                 goto retry_lookup;
2907 
2908 out:
2909         return (error);
2910 }
2911 
2912 /*ARGSUSED*/
2913 static int
2914 ufs_create(struct vnode *dvp, char *name, struct vattr *vap, enum vcexcl excl,
2915         int mode, struct vnode **vpp, struct cred *cr, int flag,
2916         caller_context_t *ct, vsecattr_t *vsecp)
2917 {
2918         struct inode *ip;
2919         struct inode *xip;
2920         struct inode *dip;
2921         struct vnode *xvp;
2922         struct ufsvfs *ufsvfsp;
2923         struct ulockfs *ulp;
2924         int error;
2925         int issync;
2926         int truncflag;
2927         int trans_size;
2928         int noentry;
2929         int defer_dip_seq_update = 0;   /* need to defer update of dip->i_seq */
2930         int retry = 1;
2931         int indeadlock;
2932 
2933 again:
2934         ip = VTOI(dvp);
2935         ufsvfsp = ip->i_ufsvfs;
2936         truncflag = 0;
2937 
2938         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_CREATE_MASK);
2939         if (error)
2940                 goto out;
2941 
2942         if (ulp) {
2943                 trans_size = (int)TOP_CREATE_SIZE(ip);
2944                 TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_CREATE, trans_size);
2945         }
2946 
2947         if ((vap->va_mode & VSVTX) && secpolicy_vnode_stky_modify(cr) != 0)
2948                 vap->va_mode &= ~VSVTX;
2949 
2950         if (*name == '\0') {
2951                 /*
2952                  * Null component name refers to the directory itself.
2953                  */
2954                 VN_HOLD(dvp);
2955                 /*
2956                  * Even though this is an error case, we need to grab the
2957                  * quota lock since the error handling code below is common.
2958                  */
2959                 rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
2960                 rw_enter(&ip->i_contents, RW_WRITER);
2961                 error = EEXIST;
2962         } else {
2963                 xip = NULL;
2964                 noentry = 0;
2965                 /*
2966                  * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
2967                  * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
2968                  * possible, retries the operation.
2969                  */
2970                 ufs_tryirwlock_trans(&ip->i_rwlock, RW_WRITER, TOP_CREATE,
2971                     retry_dir);
2972                 if (indeadlock)
2973                         goto again;
2974 
2975                 xvp = dnlc_lookup(dvp, name);
2976                 if (xvp == DNLC_NO_VNODE) {
2977                         noentry = 1;
2978                         VN_RELE(xvp);
2979                         xvp = NULL;
2980                 }
2981                 if (xvp) {
2982                         rw_exit(&ip->i_rwlock);
2983                         if (error = ufs_iaccess(ip, IEXEC, cr, 1)) {
2984                                 VN_RELE(xvp);
2985                         } else {
2986                                 error = EEXIST;
2987                                 xip = VTOI(xvp);
2988                         }
2989                 } else {
2990                         /*
2991                          * Suppress file system full message if we will retry
2992                          */
2993                         error = ufs_direnter_cm(ip, name, DE_CREATE,
2994                             vap, &xip, cr, (noentry | (retry ? IQUIET : 0)));
2995                         if (error == EAGAIN) {
2996                                 if (ulp) {
2997                                         TRANS_END_CSYNC(ufsvfsp, error, issync,
2998                                             TOP_CREATE, trans_size);
2999                                         ufs_lockfs_end(ulp);
3000                                 }
3001                                 goto again;
3002                         }
3003                         rw_exit(&ip->i_rwlock);
3004                 }
3005                 ip = xip;
3006                 if (ip != NULL) {
3007                         rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
3008                         rw_enter(&ip->i_contents, RW_WRITER);
3009                 }
3010         }
3011 
3012         /*
3013          * If the file already exists and this is a non-exclusive create,
3014          * check permissions and allow access for non-directories.
3015          * Read-only create of an existing directory is also allowed.
3016          * We fail an exclusive create of anything which already exists.
3017          */
3018         if (error == EEXIST) {
3019                 dip = VTOI(dvp);
3020                 if (excl == NONEXCL) {
3021                         if ((((ip->i_mode & IFMT) == IFDIR) ||
3022                             ((ip->i_mode & IFMT) == IFATTRDIR)) &&
3023                             (mode & IWRITE))
3024                                 error = EISDIR;
3025                         else if (mode)
3026                                 error = ufs_iaccess(ip, mode, cr, 0);
3027                         else
3028                                 error = 0;
3029                 }
3030                 if (error) {
3031                         rw_exit(&ip->i_contents);
3032                         rw_exit(&ufsvfsp->vfs_dqrwlock);
3033                         VN_RELE(ITOV(ip));
3034                         goto unlock;
3035                 }
3036                 /*
3037                  * If the error EEXIST was set, then i_seq can not
3038                  * have been updated. The sequence number interface
3039                  * is defined such that a non-error VOP_CREATE must
3040                  * increase the dir va_seq it by at least one. If we
3041                  * have cleared the error, increase i_seq. Note that
3042                  * we are increasing the dir i_seq and in rare cases
3043                  * ip may actually be from the dvp, so we already have
3044                  * the locks and it will not be subject to truncation.
3045                  * In case we have to update i_seq of the parent
3046                  * directory dip, we have to defer it till we have
3047                  * released our locks on ip due to lock ordering requirements.
3048                  */
3049                 if (ip != dip)
3050                         defer_dip_seq_update = 1;
3051                 else
3052                         ip->i_seq++;
3053 
3054                 if (((ip->i_mode & IFMT) == IFREG) &&
3055                     (vap->va_mask & AT_SIZE) && vap->va_size == 0) {
3056                         /*
3057                          * Truncate regular files, if requested by caller.
3058                          * Grab i_rwlock to make sure no one else is
3059                          * currently writing to the file (we promised
3060                          * bmap we would do this).
3061                          * Must get the locks in the correct order.
3062                          */
3063                         if (ip->i_size == 0) {
3064                                 ip->i_flag |= ICHG | IUPD;
3065                                 ip->i_seq++;
3066                                 TRANS_INODE(ufsvfsp, ip);
3067                         } else {
3068                                 /*
3069                                  * Large Files: Why this check here?
3070                                  * Though we do it in vn_create() we really
3071                                  * want to guarantee that we do not destroy
3072                                  * Large file data by atomically checking
3073                                  * the size while holding the contents
3074                                  * lock.
3075                                  */
3076                                 if (flag && !(flag & FOFFMAX) &&
3077                                     ((ip->i_mode & IFMT) == IFREG) &&
3078                                     (ip->i_size > (offset_t)MAXOFF32_T)) {
3079                                         rw_exit(&ip->i_contents);
3080                                         rw_exit(&ufsvfsp->vfs_dqrwlock);
3081                                         error = EOVERFLOW;
3082                                         goto unlock;
3083                                 }
3084                                 if (TRANS_ISTRANS(ufsvfsp))
3085                                         truncflag++;
3086                                 else {
3087                                         rw_exit(&ip->i_contents);
3088                                         rw_exit(&ufsvfsp->vfs_dqrwlock);
3089                                         ufs_tryirwlock_trans(&ip->i_rwlock,
3090                                             RW_WRITER, TOP_CREATE,
3091                                             retry_file);
3092                                         if (indeadlock) {
3093                                                 VN_RELE(ITOV(ip));
3094                                                 goto again;
3095                                         }
3096                                         rw_enter(&ufsvfsp->vfs_dqrwlock,
3097                                             RW_READER);
3098                                         rw_enter(&ip->i_contents, RW_WRITER);
3099                                         (void) ufs_itrunc(ip, (u_offset_t)0, 0,
3100                                             cr);
3101                                         rw_exit(&ip->i_rwlock);
3102                                 }
3103 
3104                         }
3105                         if (error == 0) {
3106                                 vnevent_create(ITOV(ip), ct);
3107                         }
3108                 }
3109         }
3110 
3111         if (error) {
3112                 if (ip != NULL) {
3113                         rw_exit(&ufsvfsp->vfs_dqrwlock);
3114                         rw_exit(&ip->i_contents);
3115                 }
3116                 goto unlock;
3117         }
3118 
3119         *vpp = ITOV(ip);
3120         ITIMES(ip);
3121         rw_exit(&ip->i_contents);
3122         rw_exit(&ufsvfsp->vfs_dqrwlock);
3123 
3124         /*
3125          * If vnode is a device return special vnode instead.
3126          */
3127         if (!error && IS_DEVVP(*vpp)) {
3128                 struct vnode *newvp;
3129 
3130                 newvp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr);
3131                 VN_RELE(*vpp);
3132                 if (newvp == NULL) {
3133                         error = ENOSYS;
3134                         goto unlock;
3135                 }
3136                 truncflag = 0;
3137                 *vpp = newvp;
3138         }
3139 unlock:
3140 
3141         /*
3142          * Do the deferred update of the parent directory's sequence
3143          * number now.
3144          */
3145         if (defer_dip_seq_update == 1) {
3146                 rw_enter(&dip->i_contents, RW_READER);
3147                 mutex_enter(&dip->i_tlock);
3148                 dip->i_seq++;
3149                 mutex_exit(&dip->i_tlock);
3150                 rw_exit(&dip->i_contents);
3151         }
3152 
3153         if (ulp) {
3154                 int terr = 0;
3155 
3156                 TRANS_END_CSYNC(ufsvfsp, terr, issync, TOP_CREATE,
3157                     trans_size);
3158 
3159                 /*
3160                  * If we haven't had a more interesting failure
3161                  * already, then anything that might've happened
3162                  * here should be reported.
3163                  */
3164                 if (error == 0)
3165                         error = terr;
3166         }
3167 
3168         if (!error && truncflag) {
3169                 ufs_tryirwlock(&ip->i_rwlock, RW_WRITER, retry_trunc);
3170                 if (indeadlock) {
3171                         if (ulp)
3172                                 ufs_lockfs_end(ulp);
3173                         VN_RELE(ITOV(ip));
3174                         goto again;
3175                 }
3176                 (void) TRANS_ITRUNC(ip, (u_offset_t)0, 0, cr);
3177                 rw_exit(&ip->i_rwlock);
3178         }
3179 
3180         if (ulp)
3181                 ufs_lockfs_end(ulp);
3182 
3183         /*
3184          * If no inodes available, try to free one up out of the
3185          * pending delete queue.
3186          */
3187         if ((error == ENOSPC) && retry && TRANS_ISTRANS(ufsvfsp)) {
3188                 ufs_delete_drain_wait(ufsvfsp, 1);
3189                 retry = 0;
3190                 goto again;
3191         }
3192 
3193 out:
3194         return (error);
3195 }
3196 
3197 extern int ufs_idle_max;
3198 /*ARGSUSED*/
3199 static int
3200 ufs_remove(struct vnode *vp, char *nm, struct cred *cr,
3201         caller_context_t *ct, int flags)
3202 {
3203         struct inode *ip = VTOI(vp);
3204         struct ufsvfs *ufsvfsp  = ip->i_ufsvfs;
3205         struct ulockfs *ulp;
3206         vnode_t *rmvp = NULL;   /* Vnode corresponding to name being removed */
3207         int indeadlock;
3208         int error;
3209         int issync;
3210         int trans_size;
3211 
3212         /*
3213          * don't let the delete queue get too long
3214          */
3215         if (ufsvfsp == NULL) {
3216                 error = EIO;
3217                 goto out;
3218         }
3219         if (ufsvfsp->vfs_delete.uq_ne > ufs_idle_max)
3220                 ufs_delete_drain(vp->v_vfsp, 1, 1);
3221 
3222         error = ufs_eventlookup(vp, nm, cr, &rmvp);
3223         if (rmvp != NULL) {
3224                 /* Only send the event if there were no errors */
3225                 if (error == 0)
3226                         vnevent_remove(rmvp, vp, nm, ct);
3227                 VN_RELE(rmvp);
3228         }
3229 
3230 retry_remove:
3231         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_REMOVE_MASK);
3232         if (error)
3233                 goto out;
3234 
3235         if (ulp)
3236                 TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_REMOVE,
3237                     trans_size = (int)TOP_REMOVE_SIZE(VTOI(vp)));
3238 
3239         /*
3240          * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
3241          * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
3242          * possible, retries the operation.
3243          */
3244         ufs_tryirwlock_trans(&ip->i_rwlock, RW_WRITER, TOP_REMOVE, retry);
3245         if (indeadlock)
3246                 goto retry_remove;
3247         error = ufs_dirremove(ip, nm, (struct inode *)0, (struct vnode *)0,
3248             DR_REMOVE, cr);
3249         rw_exit(&ip->i_rwlock);
3250 
3251         if (ulp) {
3252                 TRANS_END_CSYNC(ufsvfsp, error, issync, TOP_REMOVE, trans_size);
3253                 ufs_lockfs_end(ulp);
3254         }
3255 
3256 out:
3257         return (error);
3258 }
3259 
3260 /*
3261  * Link a file or a directory.  Only privileged processes are allowed to
3262  * make links to directories.
3263  */
3264 /*ARGSUSED*/
3265 static int
3266 ufs_link(struct vnode *tdvp, struct vnode *svp, char *tnm, struct cred *cr,
3267         caller_context_t *ct, int flags)
3268 {
3269         struct inode *sip;
3270         struct inode *tdp = VTOI(tdvp);
3271         struct ufsvfs *ufsvfsp = tdp->i_ufsvfs;
3272         struct ulockfs *ulp;
3273         struct vnode *realvp;
3274         int error;
3275         int issync;
3276         int trans_size;
3277         int isdev;
3278         int indeadlock;
3279 
3280 retry_link:
3281         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_LINK_MASK);
3282         if (error)
3283                 goto out;
3284 
3285         if (ulp)
3286                 TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_LINK,
3287                     trans_size = (int)TOP_LINK_SIZE(VTOI(tdvp)));
3288 
3289         if (VOP_REALVP(svp, &realvp, ct) == 0)
3290                 svp = realvp;
3291 
3292         /*
3293          * Make sure link for extended attributes is valid
3294          * We only support hard linking of attr in ATTRDIR to ATTRDIR
3295          *
3296          * Make certain we don't attempt to look at a device node as
3297          * a ufs inode.
3298          */
3299 
3300         isdev = IS_DEVVP(svp);
3301         if (((isdev == 0) && ((VTOI(svp)->i_cflags & IXATTR) == 0) &&
3302             ((tdp->i_mode & IFMT) == IFATTRDIR)) ||
3303             ((isdev == 0) && (VTOI(svp)->i_cflags & IXATTR) &&
3304             ((tdp->i_mode & IFMT) == IFDIR))) {
3305                 error = EINVAL;
3306                 goto unlock;
3307         }
3308 
3309         sip = VTOI(svp);
3310         if ((svp->v_type == VDIR &&
3311             secpolicy_fs_linkdir(cr, ufsvfsp->vfs_vfs) != 0) ||
3312             (sip->i_uid != crgetuid(cr) && secpolicy_basic_link(cr) != 0)) {
3313                 error = EPERM;
3314                 goto unlock;
3315         }
3316 
3317         /*
3318          * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
3319          * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
3320          * possible, retries the operation.
3321          */
3322         ufs_tryirwlock_trans(&tdp->i_rwlock, RW_WRITER, TOP_LINK, retry);
3323         if (indeadlock)
3324                 goto retry_link;
3325         error = ufs_direnter_lr(tdp, tnm, DE_LINK, (struct inode *)0,
3326             sip, cr);
3327         rw_exit(&tdp->i_rwlock);
3328 
3329 unlock:
3330         if (ulp) {
3331                 TRANS_END_CSYNC(ufsvfsp, error, issync, TOP_LINK, trans_size);
3332                 ufs_lockfs_end(ulp);
3333         }
3334 
3335         if (!error) {
3336                 vnevent_link(svp, ct);
3337         }
3338 out:
3339         return (error);
3340 }
3341 
3342 uint64_t ufs_rename_retry_cnt;
3343 uint64_t ufs_rename_upgrade_retry_cnt;
3344 uint64_t ufs_rename_dircheck_retry_cnt;
3345 clock_t  ufs_rename_backoff_delay = 1;
3346 
3347 /*
3348  * Rename a file or directory.
3349  * We are given the vnode and entry string of the source and the
3350  * vnode and entry string of the place we want to move the source
3351  * to (the target). The essential operation is:
3352  *      unlink(target);
3353  *      link(source, target);
3354  *      unlink(source);
3355  * but "atomically".  Can't do full commit without saving state in
3356  * the inode on disk, which isn't feasible at this time.  Best we
3357  * can do is always guarantee that the TARGET exists.
3358  */
3359 
3360 /*ARGSUSED*/
3361 static int
3362 ufs_rename(
3363         struct vnode *sdvp,             /* old (source) parent vnode */
3364         char *snm,                      /* old (source) entry name */
3365         struct vnode *tdvp,             /* new (target) parent vnode */
3366         char *tnm,                      /* new (target) entry name */
3367         struct cred *cr,
3368         caller_context_t *ct,
3369         int flags)
3370 {
3371         struct inode *sip = NULL;       /* source inode */
3372         struct inode *ip = NULL;        /* check inode */
3373         struct inode *sdp;              /* old (source) parent inode */
3374         struct inode *tdp;              /* new (target) parent inode */
3375         struct vnode *svp = NULL;       /* source vnode */
3376         struct vnode *tvp = NULL;       /* target vnode, if it exists */
3377         struct vnode *realvp;
3378         struct ufsvfs *ufsvfsp;
3379         struct ulockfs *ulp = NULL;
3380         struct ufs_slot slot;
3381         timestruc_t now;
3382         int error;
3383         int issync;
3384         int trans_size;
3385         krwlock_t *first_lock;
3386         krwlock_t *second_lock;
3387         krwlock_t *reverse_lock;
3388         int serr, terr;
3389 
3390         sdp = VTOI(sdvp);
3391         slot.fbp = NULL;
3392         ufsvfsp = sdp->i_ufsvfs;
3393 
3394         if (VOP_REALVP(tdvp, &realvp, ct) == 0)
3395                 tdvp = realvp;
3396 
3397         /* Must do this before taking locks in case of DNLC miss */
3398         terr = ufs_eventlookup(tdvp, tnm, cr, &tvp);
3399         serr = ufs_eventlookup(sdvp, snm, cr, &svp);
3400 
3401         if ((serr == 0) && ((terr == 0) || (terr == ENOENT))) {
3402                 if (tvp != NULL)
3403                         vnevent_pre_rename_dest(tvp, tdvp, tnm, ct);
3404 
3405                 /*
3406                  * Notify the target directory of the rename event
3407                  * if source and target directories are not the same.
3408                  */
3409                 if (sdvp != tdvp)
3410                         vnevent_pre_rename_dest_dir(tdvp, svp, tnm, ct);
3411 
3412                 if (svp != NULL)
3413                         vnevent_pre_rename_src(svp, sdvp, snm, ct);
3414         }
3415 
3416         if (svp != NULL)
3417                 VN_RELE(svp);
3418 
3419 retry_rename:
3420         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_RENAME_MASK);
3421         if (error)
3422                 goto unlock;
3423 
3424         if (ulp)
3425                 TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_RENAME,
3426                     trans_size = (int)TOP_RENAME_SIZE(sdp));
3427 
3428         if (VOP_REALVP(tdvp, &realvp, ct) == 0)
3429                 tdvp = realvp;
3430 
3431         tdp = VTOI(tdvp);
3432 
3433         /*
3434          * We only allow renaming of attributes from ATTRDIR to ATTRDIR.
3435          */
3436         if ((tdp->i_mode & IFMT) != (sdp->i_mode & IFMT)) {
3437                 error = EINVAL;
3438                 goto unlock;
3439         }
3440 
3441         /*
3442          * Check accessibility of directory.
3443          */
3444         if (error = ufs_diraccess(sdp, IEXEC, cr))
3445                 goto unlock;
3446 
3447         /*
3448          * Look up inode of file we're supposed to rename.
3449          */
3450         gethrestime(&now);
3451         if (error = ufs_dirlook(sdp, snm, &sip, cr, 0, 0)) {
3452                 if (error == EAGAIN) {
3453                         if (ulp) {
3454                                 TRANS_END_CSYNC(ufsvfsp, error, issync,
3455                                     TOP_RENAME, trans_size);
3456                                 ufs_lockfs_end(ulp);
3457                         }
3458                         goto retry_rename;
3459                 }
3460 
3461                 goto unlock;
3462         }
3463 
3464         /*
3465          * Lock both the source and target directories (they may be
3466          * the same) to provide the atomicity semantics that was
3467          * previously provided by the per file system vfs_rename_lock
3468          *
3469          * with vfs_rename_lock removed to allow simultaneous renames
3470          * within a file system, ufs_dircheckpath can deadlock while
3471          * traversing back to ensure that source is not a parent directory
3472          * of target parent directory. This is because we get into
3473          * ufs_dircheckpath with the sdp and tdp locks held as RW_WRITER.
3474          * If the tdp and sdp of the simultaneous renames happen to be
3475          * in the path of each other, it can lead to a deadlock. This
3476          * can be avoided by getting the locks as RW_READER here and then
3477          * upgrading to RW_WRITER after completing the ufs_dircheckpath.
3478          *
3479          * We hold the target directory's i_rwlock after calling
3480          * ufs_lockfs_begin but in many other operations (like ufs_readdir)
3481          * VOP_RWLOCK is explicitly called by the filesystem independent code
3482          * before calling the file system operation. In these cases the order
3483          * is reversed (i.e i_rwlock is taken first and then ufs_lockfs_begin
3484          * is called). This is fine as long as ufs_lockfs_begin acts as a VOP
3485          * counter but with ufs_quiesce setting the SLOCK bit this becomes a
3486          * synchronizing object which might lead to a deadlock. So we use
3487          * rw_tryenter instead of rw_enter. If we fail to get this lock and
3488          * find that SLOCK bit is set, we call ufs_lockfs_end and restart the
3489          * operation.
3490          */
3491 retry:
3492         first_lock = &tdp->i_rwlock;
3493         second_lock = &sdp->i_rwlock;
3494 retry_firstlock:
3495         if (!rw_tryenter(first_lock, RW_READER)) {
3496                 /*
3497                  * We didn't get the lock. Check if the SLOCK is set in the
3498                  * ufsvfs. If yes, we might be in a deadlock. Safer to give up
3499                  * and wait for SLOCK to be cleared.
3500                  */
3501 
3502                 if (ulp && ULOCKFS_IS_SLOCK(ulp)) {
3503                         TRANS_END_CSYNC(ufsvfsp, error, issync, TOP_RENAME,
3504                             trans_size);
3505                         ufs_lockfs_end(ulp);
3506                         goto retry_rename;
3507 
3508                 } else {
3509                         /*
3510                          * SLOCK isn't set so this is a genuine synchronization
3511                          * case. Let's try again after giving them a breather.
3512                          */
3513                         delay(RETRY_LOCK_DELAY);
3514                         goto  retry_firstlock;
3515                 }
3516         }
3517         /*
3518          * Need to check if the tdp and sdp are same !!!
3519          */
3520         if ((tdp != sdp) && (!rw_tryenter(second_lock, RW_READER))) {
3521                 /*
3522                  * We didn't get the lock. Check if the SLOCK is set in the
3523                  * ufsvfs. If yes, we might be in a deadlock. Safer to give up
3524                  * and wait for SLOCK to be cleared.
3525                  */
3526 
3527                 rw_exit(first_lock);
3528                 if (ulp && ULOCKFS_IS_SLOCK(ulp)) {
3529                         TRANS_END_CSYNC(ufsvfsp, error, issync, TOP_RENAME,
3530                             trans_size);
3531                         ufs_lockfs_end(ulp);
3532                         goto retry_rename;
3533 
3534                 } else {
3535                         /*
3536                          * So we couldn't get the second level peer lock *and*
3537                          * the SLOCK bit isn't set. Too bad we can be
3538                          * contentding with someone wanting these locks otherway
3539                          * round. Reverse the locks in case there is a heavy
3540                          * contention for the second level lock.
3541                          */
3542                         reverse_lock = first_lock;
3543                         first_lock = second_lock;
3544                         second_lock = reverse_lock;
3545                         ufs_rename_retry_cnt++;
3546                         goto  retry_firstlock;
3547                 }
3548         }
3549 
3550         if (sip == tdp) {
3551                 error = EINVAL;
3552                 goto errout;
3553         }
3554         /*
3555          * Make sure we can delete the source entry.  This requires
3556          * write permission on the containing directory.
3557          * Check for sticky directories.
3558          */
3559         rw_enter(&sdp->i_contents, RW_READER);
3560         rw_enter(&sip->i_contents, RW_READER);
3561         if ((error = ufs_iaccess(sdp, IWRITE, cr, 0)) != 0 ||
3562             (error = ufs_sticky_remove_access(sdp, sip, cr)) != 0) {
3563                 rw_exit(&sip->i_contents);
3564                 rw_exit(&sdp->i_contents);
3565                 goto errout;
3566         }
3567 
3568         /*
3569          * If this is a rename of a directory and the parent is
3570          * different (".." must be changed), then the source
3571          * directory must not be in the directory hierarchy
3572          * above the target, as this would orphan everything
3573          * below the source directory.  Also the user must have
3574          * write permission in the source so as to be able to
3575          * change "..".
3576          */
3577         if ((((sip->i_mode & IFMT) == IFDIR) ||
3578             ((sip->i_mode & IFMT) == IFATTRDIR)) && sdp != tdp) {
3579                 ino_t   inum;
3580 
3581                 if (error = ufs_iaccess(sip, IWRITE, cr, 0)) {
3582                         rw_exit(&sip->i_contents);
3583                         rw_exit(&sdp->i_contents);
3584                         goto errout;
3585                 }
3586                 inum = sip->i_number;
3587                 rw_exit(&sip->i_contents);
3588                 rw_exit(&sdp->i_contents);
3589                 if ((error = ufs_dircheckpath(inum, tdp, sdp, cr))) {
3590                         /*
3591                          * If we got EAGAIN ufs_dircheckpath detected a
3592                          * potential deadlock and backed out. We need
3593                          * to retry the operation since sdp and tdp have
3594                          * to be released to avoid the deadlock.
3595                          */
3596                         if (error == EAGAIN) {
3597                                 rw_exit(&tdp->i_rwlock);
3598                                 if (tdp != sdp)
3599                                         rw_exit(&sdp->i_rwlock);
3600                                 delay(ufs_rename_backoff_delay);
3601                                 ufs_rename_dircheck_retry_cnt++;
3602                                 goto retry;
3603                         }
3604                         goto errout;
3605                 }
3606         } else {
3607                 rw_exit(&sip->i_contents);
3608                 rw_exit(&sdp->i_contents);
3609         }
3610 
3611 
3612         /*
3613          * Check for renaming '.' or '..' or alias of '.'
3614          */
3615         if (strcmp(snm, ".") == 0 || strcmp(snm, "..") == 0 || sdp == sip) {
3616                 error = EINVAL;
3617                 goto errout;
3618         }
3619 
3620         /*
3621          * Simultaneous renames can deadlock in ufs_dircheckpath since it
3622          * tries to traverse back the file tree with both tdp and sdp held
3623          * as RW_WRITER. To avoid that we have to hold the tdp and sdp locks
3624          * as RW_READERS  till ufs_dircheckpath is done.
3625          * Now that ufs_dircheckpath is done with, we can upgrade the locks
3626          * to RW_WRITER.
3627          */
3628         if (!rw_tryupgrade(&tdp->i_rwlock)) {
3629                 /*
3630                  * The upgrade failed. We got to give away the lock
3631                  * as to avoid deadlocking with someone else who is
3632                  * waiting for writer lock. With the lock gone, we
3633                  * cannot be sure the checks done above will hold
3634                  * good when we eventually get them back as writer.
3635                  * So if we can't upgrade we drop the locks and retry
3636                  * everything again.
3637                  */
3638                 rw_exit(&tdp->i_rwlock);
3639                 if (tdp != sdp)
3640                         rw_exit(&sdp->i_rwlock);
3641                 delay(ufs_rename_backoff_delay);
3642                 ufs_rename_upgrade_retry_cnt++;
3643                 goto retry;
3644         }
3645         if (tdp != sdp) {
3646                 if (!rw_tryupgrade(&sdp->i_rwlock)) {
3647                         /*
3648                          * The upgrade failed. We got to give away the lock
3649                          * as to avoid deadlocking with someone else who is
3650                          * waiting for writer lock. With the lock gone, we
3651                          * cannot be sure the checks done above will hold
3652                          * good when we eventually get them back as writer.
3653                          * So if we can't upgrade we drop the locks and retry
3654                          * everything again.
3655                          */
3656                         rw_exit(&tdp->i_rwlock);
3657                         rw_exit(&sdp->i_rwlock);
3658                         delay(ufs_rename_backoff_delay);
3659                         ufs_rename_upgrade_retry_cnt++;
3660                         goto retry;
3661                 }
3662         }
3663 
3664         /*
3665          * Now that all the locks are held check to make sure another thread
3666          * didn't slip in and take out the sip.
3667          */
3668         slot.status = NONE;
3669         if ((sip->i_ctime.tv_usec * 1000) > now.tv_nsec ||
3670             sip->i_ctime.tv_sec > now.tv_sec) {
3671                 rw_enter(&sdp->i_ufsvfs->vfs_dqrwlock, RW_READER);
3672                 rw_enter(&sdp->i_contents, RW_WRITER);
3673                 error = ufs_dircheckforname(sdp, snm, strlen(snm), &slot,
3674                     &ip, cr, 0);
3675                 rw_exit(&sdp->i_contents);
3676                 rw_exit(&sdp->i_ufsvfs->vfs_dqrwlock);
3677                 if (error) {
3678                         goto errout;
3679                 }
3680                 if (ip == NULL) {
3681                         error = ENOENT;
3682                         goto errout;
3683                 } else {
3684                         /*
3685                          * If the inode was found need to drop the v_count
3686                          * so as not to keep the filesystem from being
3687                          * unmounted at a later time.
3688                          */
3689                         VN_RELE(ITOV(ip));
3690                 }
3691 
3692                 /*
3693                  * Release the slot.fbp that has the page mapped and
3694                  * locked SE_SHARED, and could be used in in
3695                  * ufs_direnter_lr() which needs to get the SE_EXCL lock
3696                  * on said page.
3697                  */
3698                 if (slot.fbp) {
3699                         fbrelse(slot.fbp, S_OTHER);
3700                         slot.fbp = NULL;
3701                 }
3702         }
3703 
3704         /*
3705          * Link source to the target.
3706          */
3707         if (error = ufs_direnter_lr(tdp, tnm, DE_RENAME, sdp, sip, cr)) {
3708                 /*
3709                  * ESAME isn't really an error; it indicates that the
3710                  * operation should not be done because the source and target
3711                  * are the same file, but that no error should be reported.
3712                  */
3713                 if (error == ESAME)
3714                         error = 0;
3715                 goto errout;
3716         }
3717 
3718         if (error == 0 && tvp != NULL)
3719                 vnevent_rename_dest(tvp, tdvp, tnm, ct);
3720 
3721         /*
3722          * Unlink the source.
3723          * Remove the source entry.  ufs_dirremove() checks that the entry
3724          * still reflects sip, and returns an error if it doesn't.
3725          * If the entry has changed just forget about it.  Release
3726          * the source inode.
3727          */
3728         if ((error = ufs_dirremove(sdp, snm, sip, (struct vnode *)0,
3729             DR_RENAME, cr)) == ENOENT)
3730                 error = 0;
3731 
3732         if (error == 0) {
3733                 vnevent_rename_src(ITOV(sip), sdvp, snm, ct);
3734                 vnevent_rename_dest_dir(tdvp, ITOV(sip), tnm, ct);
3735         }
3736 
3737 errout:
3738         if (slot.fbp)
3739                 fbrelse(slot.fbp, S_OTHER);
3740 
3741         rw_exit(&tdp->i_rwlock);
3742         if (sdp != tdp) {
3743                 rw_exit(&sdp->i_rwlock);
3744         }
3745 
3746 unlock:
3747         if (tvp != NULL)
3748                 VN_RELE(tvp);
3749         if (sip != NULL)
3750                 VN_RELE(ITOV(sip));
3751 
3752         if (ulp) {
3753                 TRANS_END_CSYNC(ufsvfsp, error, issync, TOP_RENAME, trans_size);
3754                 ufs_lockfs_end(ulp);
3755         }
3756 
3757         return (error);
3758 }
3759 
3760 /*ARGSUSED*/
3761 static int
3762 ufs_mkdir(struct vnode *dvp, char *dirname, struct vattr *vap,
3763         struct vnode **vpp, struct cred *cr, caller_context_t *ct, int flags,
3764         vsecattr_t *vsecp)
3765 {
3766         struct inode *ip;
3767         struct inode *xip;
3768         struct ufsvfs *ufsvfsp;
3769         struct ulockfs *ulp;
3770         int error;
3771         int issync;
3772         int trans_size;
3773         int indeadlock;
3774         int retry = 1;
3775 
3776         ASSERT((vap->va_mask & (AT_TYPE|AT_MODE)) == (AT_TYPE|AT_MODE));
3777 
3778         /*
3779          * Can't make directory in attr hidden dir
3780          */
3781         if ((VTOI(dvp)->i_mode & IFMT) == IFATTRDIR)
3782                 return (EINVAL);
3783 
3784 again:
3785         ip = VTOI(dvp);
3786         ufsvfsp = ip->i_ufsvfs;
3787         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_MKDIR_MASK);
3788         if (error)
3789                 goto out;
3790         if (ulp)
3791                 TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_MKDIR,
3792                     trans_size = (int)TOP_MKDIR_SIZE(ip));
3793 
3794         /*
3795          * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
3796          * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
3797          * possible, retries the operation.
3798          */
3799         ufs_tryirwlock_trans(&ip->i_rwlock, RW_WRITER, TOP_MKDIR, retry);
3800         if (indeadlock)
3801                 goto again;
3802 
3803         error = ufs_direnter_cm(ip, dirname, DE_MKDIR, vap, &xip, cr,
3804             (retry ? IQUIET : 0));
3805         if (error == EAGAIN) {
3806                 if (ulp) {
3807                         TRANS_END_CSYNC(ufsvfsp, error, issync, TOP_MKDIR,
3808                             trans_size);
3809                         ufs_lockfs_end(ulp);
3810                 }
3811                 goto again;
3812         }
3813 
3814         rw_exit(&ip->i_rwlock);
3815         if (error == 0) {
3816                 ip = xip;
3817                 *vpp = ITOV(ip);
3818         } else if (error == EEXIST)
3819                 VN_RELE(ITOV(xip));
3820 
3821         if (ulp) {
3822                 int terr = 0;
3823                 TRANS_END_CSYNC(ufsvfsp, terr, issync, TOP_MKDIR, trans_size);
3824                 ufs_lockfs_end(ulp);
3825                 if (error == 0)
3826                         error = terr;
3827         }
3828 out:
3829         if ((error == ENOSPC) && retry && TRANS_ISTRANS(ufsvfsp)) {
3830                 ufs_delete_drain_wait(ufsvfsp, 1);
3831                 retry = 0;
3832                 goto again;
3833         }
3834 
3835         return (error);
3836 }
3837 
3838 /*ARGSUSED*/
3839 static int
3840 ufs_rmdir(struct vnode *vp, char *nm, struct vnode *cdir, struct cred *cr,
3841         caller_context_t *ct, int flags)
3842 {
3843         struct inode *ip = VTOI(vp);
3844         struct ufsvfs *ufsvfsp = ip->i_ufsvfs;
3845         struct ulockfs *ulp;
3846         vnode_t *rmvp = NULL;   /* Vnode of removed directory */
3847         int error;
3848         int issync;
3849         int trans_size;
3850         int indeadlock;
3851 
3852         /*
3853          * don't let the delete queue get too long
3854          */
3855         if (ufsvfsp == NULL) {
3856                 error = EIO;
3857                 goto out;
3858         }
3859         if (ufsvfsp->vfs_delete.uq_ne > ufs_idle_max)
3860                 ufs_delete_drain(vp->v_vfsp, 1, 1);
3861 
3862         error = ufs_eventlookup(vp, nm, cr, &rmvp);
3863         if (rmvp != NULL) {
3864                 /* Only send the event if there were no errors */
3865                 if (error == 0)
3866                         vnevent_rmdir(rmvp, vp, nm, ct);
3867                 VN_RELE(rmvp);
3868         }
3869 
3870 retry_rmdir:
3871         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_RMDIR_MASK);
3872         if (error)
3873                 goto out;
3874 
3875         if (ulp)
3876                 TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_RMDIR,
3877                     trans_size = TOP_RMDIR_SIZE);
3878 
3879         /*
3880          * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
3881          * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
3882          * possible, retries the operation.
3883          */
3884         ufs_tryirwlock_trans(&ip->i_rwlock, RW_WRITER, TOP_RMDIR, retry);
3885         if (indeadlock)
3886                 goto retry_rmdir;
3887         error = ufs_dirremove(ip, nm, (struct inode *)0, cdir, DR_RMDIR, cr);
3888 
3889         rw_exit(&ip->i_rwlock);
3890 
3891         if (ulp) {
3892                 TRANS_END_CSYNC(ufsvfsp, error, issync, TOP_RMDIR,
3893                     trans_size);
3894                 ufs_lockfs_end(ulp);
3895         }
3896 
3897 out:
3898         return (error);
3899 }
3900 
3901 /* ARGSUSED */
3902 static int
3903 ufs_readdir(
3904         struct vnode *vp,
3905         struct uio *uiop,
3906         struct cred *cr,
3907         int *eofp,
3908         caller_context_t *ct,
3909         int flags)
3910 {
3911         struct iovec *iovp;
3912         struct inode *ip;
3913         struct direct *idp;
3914         struct dirent64 *odp;
3915         struct fbuf *fbp;
3916         struct ufsvfs *ufsvfsp;
3917         struct ulockfs *ulp;
3918         caddr_t outbuf;
3919         size_t bufsize;
3920         uint_t offset;
3921         uint_t bytes_wanted, total_bytes_wanted;
3922         int incount = 0;
3923         int outcount = 0;
3924         int error;
3925 
3926         ip = VTOI(vp);
3927         ASSERT(RW_READ_HELD(&ip->i_rwlock));
3928 
3929         if (uiop->uio_loffset >= MAXOFF32_T) {
3930                 if (eofp)
3931                         *eofp = 1;
3932                 return (0);
3933         }
3934 
3935         /*
3936          * Check if we have been called with a valid iov_len
3937          * and bail out if not, otherwise we may potentially loop
3938          * forever further down.
3939          */
3940         if (uiop->uio_iov->iov_len <= 0) {
3941                 error = EINVAL;
3942                 goto out;
3943         }
3944 
3945         /*
3946          * Large Files: When we come here we are guaranteed that
3947          * uio_offset can be used safely. The high word is zero.
3948          */
3949 
3950         ufsvfsp = ip->i_ufsvfs;
3951         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_READDIR_MASK);
3952         if (error)
3953                 goto out;
3954 
3955         iovp = uiop->uio_iov;
3956         total_bytes_wanted = iovp->iov_len;
3957 
3958         /* Large Files: directory files should not be "large" */
3959 
3960         ASSERT(ip->i_size <= MAXOFF32_T);
3961 
3962         /* Force offset to be valid (to guard against bogus lseek() values) */
3963         offset = (uint_t)uiop->uio_offset & ~(DIRBLKSIZ - 1);
3964 
3965         /* Quit if at end of file or link count of zero (posix) */
3966         if (offset >= (uint_t)ip->i_size || ip->i_nlink <= 0) {
3967                 if (eofp)
3968                         *eofp = 1;
3969                 error = 0;
3970                 goto unlock;
3971         }
3972 
3973         /*
3974          * Get space to change directory entries into fs independent format.
3975          * Do fast alloc for the most commonly used-request size (filesystem
3976          * block size).
3977          */
3978         if (uiop->uio_segflg != UIO_SYSSPACE || uiop->uio_iovcnt != 1) {
3979                 bufsize = total_bytes_wanted;
3980                 outbuf = kmem_alloc(bufsize, KM_SLEEP);
3981                 odp = (struct dirent64 *)outbuf;
3982         } else {
3983                 bufsize = total_bytes_wanted;
3984                 odp = (struct dirent64 *)iovp->iov_base;
3985         }
3986 
3987 nextblk:
3988         bytes_wanted = total_bytes_wanted;
3989 
3990         /* Truncate request to file size */
3991         if (offset + bytes_wanted > (int)ip->i_size)
3992                 bytes_wanted = (int)(ip->i_size - offset);
3993 
3994         /* Comply with MAXBSIZE boundary restrictions of fbread() */
3995         if ((offset & MAXBOFFSET) + bytes_wanted > MAXBSIZE)
3996                 bytes_wanted = MAXBSIZE - (offset & MAXBOFFSET);
3997 
3998         /*
3999          * Read in the next chunk.
4000          * We are still holding the i_rwlock.
4001          */
4002         error = fbread(vp, (offset_t)offset, bytes_wanted, S_OTHER, &fbp);
4003 
4004         if (error)
4005                 goto update_inode;
4006         if (!ULOCKFS_IS_NOIACC(ITOUL(ip)) && (ip->i_fs->fs_ronly == 0) &&
4007             (!ufsvfsp->vfs_noatime)) {
4008                 ip->i_flag |= IACC;
4009         }
4010         incount = 0;
4011         idp = (struct direct *)fbp->fb_addr;
4012         if (idp->d_ino == 0 && idp->d_reclen == 0 && idp->d_namlen == 0) {
4013                 cmn_err(CE_WARN, "ufs_readdir: bad dir, inumber = %llu, "
4014                     "fs = %s\n",
4015                     (u_longlong_t)ip->i_number, ufsvfsp->vfs_fs->fs_fsmnt);
4016                 fbrelse(fbp, S_OTHER);
4017                 error = ENXIO;
4018                 goto update_inode;
4019         }
4020         /* Transform to file-system independent format */
4021         while (incount < bytes_wanted) {
4022                 /*
4023                  * If the current directory entry is mangled, then skip
4024                  * to the next block.  It would be nice to set the FSBAD
4025                  * flag in the super-block so that a fsck is forced on
4026                  * next reboot, but locking is a problem.
4027                  */
4028                 if (idp->d_reclen & 0x3) {
4029                         offset = (offset + DIRBLKSIZ) & ~(DIRBLKSIZ-1);
4030                         break;
4031                 }
4032 
4033                 /* Skip to requested offset and skip empty entries */
4034                 if (idp->d_ino != 0 && offset >= (uint_t)uiop->uio_offset) {
4035                         ushort_t this_reclen =
4036                             DIRENT64_RECLEN(idp->d_namlen);
4037                         /* Buffer too small for any entries */
4038                         if (!outcount && this_reclen > bufsize) {
4039                                 fbrelse(fbp, S_OTHER);
4040                                 error = EINVAL;
4041                                 goto update_inode;
4042                         }
4043                         /* If would overrun the buffer, quit */
4044                         if (outcount + this_reclen > bufsize) {
4045                                 break;
4046                         }
4047                         /* Take this entry */
4048                         odp->d_ino = (ino64_t)idp->d_ino;
4049                         odp->d_reclen = (ushort_t)this_reclen;
4050                         odp->d_off = (offset_t)(offset + idp->d_reclen);
4051 
4052                         /* use strncpy(9f) to zero out uninitialized bytes */
4053 
4054                         ASSERT(strlen(idp->d_name) + 1 <=
4055                             DIRENT64_NAMELEN(this_reclen));
4056                         (void) strncpy(odp->d_name, idp->d_name,
4057                             DIRENT64_NAMELEN(this_reclen));
4058                         outcount += odp->d_reclen;
4059                         odp = (struct dirent64 *)
4060                             ((intptr_t)odp + odp->d_reclen);
4061                         ASSERT(outcount <= bufsize);
4062                 }
4063                 if (idp->d_reclen) {
4064                         incount += idp->d_reclen;
4065                         offset += idp->d_reclen;
4066                         idp = (struct direct *)((intptr_t)idp + idp->d_reclen);
4067                 } else {
4068                         offset = (offset + DIRBLKSIZ) & ~(DIRBLKSIZ-1);
4069                         break;
4070                 }
4071         }
4072         /* Release the chunk */
4073         fbrelse(fbp, S_OTHER);
4074 
4075         /* Read whole block, but got no entries, read another if not eof */
4076 
4077         /*
4078          * Large Files: casting i_size to int here is not a problem
4079          * because directory sizes are always less than MAXOFF32_T.
4080          * See assertion above.
4081          */
4082 
4083         if (offset < (int)ip->i_size && !outcount)
4084                 goto nextblk;
4085 
4086         /* Copy out the entry data */
4087         if (uiop->uio_segflg == UIO_SYSSPACE && uiop->uio_iovcnt == 1) {
4088                 iovp->iov_base += outcount;
4089                 iovp->iov_len -= outcount;
4090                 uiop->uio_resid -= outcount;
4091                 uiop->uio_offset = offset;
4092         } else if ((error = uiomove(outbuf, (long)outcount, UIO_READ,
4093             uiop)) == 0)
4094                 uiop->uio_offset = offset;
4095 update_inode:
4096         ITIMES(ip);
4097         if (uiop->uio_segflg != UIO_SYSSPACE || uiop->uio_iovcnt != 1)
4098                 kmem_free(outbuf, bufsize);
4099 
4100         if (eofp && error == 0)
4101                 *eofp = (uiop->uio_offset >= (int)ip->i_size);
4102 unlock:
4103         if (ulp) {
4104                 ufs_lockfs_end(ulp);
4105         }
4106 out:
4107         return (error);
4108 }
4109 
4110 /*ARGSUSED*/
4111 static int
4112 ufs_symlink(
4113         struct vnode *dvp,              /* ptr to parent dir vnode */
4114         char *linkname,                 /* name of symbolic link */
4115         struct vattr *vap,              /* attributes */
4116         char *target,                   /* target path */
4117         struct cred *cr,                /* user credentials */
4118         caller_context_t *ct,
4119         int flags)
4120 {
4121         struct inode *ip, *dip = VTOI(dvp);
4122         struct ufsvfs *ufsvfsp = dip->i_ufsvfs;
4123         struct ulockfs *ulp;
4124         int error;
4125         int issync;
4126         int trans_size;
4127         int residual;
4128         int ioflag;
4129         int retry = 1;
4130 
4131         /*
4132          * No symlinks in attrdirs at this time
4133          */
4134         if ((VTOI(dvp)->i_mode & IFMT) == IFATTRDIR)
4135                 return (EINVAL);
4136 
4137 again:
4138         ip = (struct inode *)NULL;
4139         vap->va_type = VLNK;
4140         vap->va_rdev = 0;
4141 
4142         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_SYMLINK_MASK);
4143         if (error)
4144                 goto out;
4145 
4146         if (ulp)
4147                 TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_SYMLINK,
4148                     trans_size = (int)TOP_SYMLINK_SIZE(dip));
4149 
4150         /*
4151          * We must create the inode before the directory entry, to avoid
4152          * racing with readlink().  ufs_dirmakeinode requires that we
4153          * hold the quota lock as reader, and directory locks as writer.
4154          */
4155 
4156         rw_enter(&dip->i_rwlock, RW_WRITER);
4157         rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
4158         rw_enter(&dip->i_contents, RW_WRITER);
4159 
4160         /*
4161          * Suppress any out of inodes messages if we will retry on
4162          * ENOSP
4163          */
4164         if (retry)
4165                 dip->i_flag |= IQUIET;
4166 
4167         error = ufs_dirmakeinode(dip, &ip, vap, DE_SYMLINK, cr);
4168 
4169         dip->i_flag &= ~IQUIET;
4170 
4171         rw_exit(&dip->i_contents);
4172         rw_exit(&ufsvfsp->vfs_dqrwlock);
4173         rw_exit(&dip->i_rwlock);
4174 
4175         if (error)
4176                 goto unlock;
4177 
4178         /*
4179          * OK.  The inode has been created.  Write out the data of the
4180          * symbolic link.  Since symbolic links are metadata, and should
4181          * remain consistent across a system crash, we need to force the
4182          * data out synchronously.
4183          *
4184          * (This is a change from the semantics in earlier releases, which
4185          * only created symbolic links synchronously if the semi-documented
4186          * 'syncdir' option was set, or if we were being invoked by the NFS
4187          * server, which requires symbolic links to be created synchronously.)
4188          *
4189          * We need to pass in a pointer for the residual length; otherwise
4190          * ufs_rdwri() will always return EIO if it can't write the data,
4191          * even if the error was really ENOSPC or EDQUOT.
4192          */
4193 
4194         ioflag = FWRITE | FDSYNC;
4195         residual = 0;
4196 
4197         rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
4198         rw_enter(&ip->i_contents, RW_WRITER);
4199 
4200         /*
4201          * Suppress file system full messages if we will retry
4202          */
4203         if (retry)
4204                 ip->i_flag |= IQUIET;
4205 
4206         error = ufs_rdwri(UIO_WRITE, ioflag, ip, target, strlen(target),
4207             (offset_t)0, UIO_SYSSPACE, &residual, cr);
4208 
4209         ip->i_flag &= ~IQUIET;
4210 
4211         if (error) {
4212                 rw_exit(&ip->i_contents);
4213                 rw_exit(&ufsvfsp->vfs_dqrwlock);
4214                 goto remove;
4215         }
4216 
4217         /*
4218          * If the link's data is small enough, we can cache it in the inode.
4219          * This is a "fast symbolic link".  We don't use the first direct
4220          * block because that's actually used to point at the symbolic link's
4221          * contents on disk; but we know that none of the other direct or
4222          * indirect blocks can be used because symbolic links are restricted
4223          * to be smaller than a file system block.
4224          */
4225 
4226         ASSERT(MAXPATHLEN <= VBSIZE(ITOV(ip)));
4227 
4228         if (ip->i_size > 0 && ip->i_size <= FSL_SIZE) {
4229                 if (kcopy(target, &ip->i_db[1], ip->i_size) == 0) {
4230                         ip->i_flag |= IFASTSYMLNK;
4231                 } else {
4232                         int i;
4233                         /* error, clear garbage left behind */
4234                         for (i = 1; i < NDADDR; i++)
4235                                 ip->i_db[i] = 0;
4236                         for (i = 0; i < NIADDR; i++)
4237                                 ip->i_ib[i] = 0;
4238                 }
4239         }
4240 
4241         rw_exit(&ip->i_contents);
4242         rw_exit(&ufsvfsp->vfs_dqrwlock);
4243 
4244         /*
4245          * OK.  We've successfully created the symbolic link.  All that
4246          * remains is to insert it into the appropriate directory.
4247          */
4248 
4249         rw_enter(&dip->i_rwlock, RW_WRITER);
4250         error = ufs_direnter_lr(dip, linkname, DE_SYMLINK, NULL, ip, cr);
4251         rw_exit(&dip->i_rwlock);
4252 
4253         /*
4254          * Fall through into remove-on-error code.  We're either done, or we
4255          * need to remove the inode (if we couldn't insert it).
4256          */
4257 
4258 remove:
4259         if (error && (ip != NULL)) {
4260                 rw_enter(&ip->i_contents, RW_WRITER);
4261                 ip->i_nlink--;
4262                 ip->i_flag |= ICHG;
4263                 ip->i_seq++;
4264                 ufs_setreclaim(ip);
4265                 rw_exit(&ip->i_contents);
4266         }
4267 
4268 unlock:
4269         if (ip != NULL)
4270                 VN_RELE(ITOV(ip));
4271 
4272         if (ulp) {
4273                 int terr = 0;
4274 
4275                 TRANS_END_CSYNC(ufsvfsp, terr, issync, TOP_SYMLINK,
4276                     trans_size);
4277                 ufs_lockfs_end(ulp);
4278                 if (error == 0)
4279                         error = terr;
4280         }
4281 
4282         /*
4283          * We may have failed due to lack of an inode or of a block to
4284          * store the target in.  Try flushing the delete queue to free
4285          * logically-available things up and try again.
4286          */
4287         if ((error == ENOSPC) && retry && TRANS_ISTRANS(ufsvfsp)) {
4288                 ufs_delete_drain_wait(ufsvfsp, 1);
4289                 retry = 0;
4290                 goto again;
4291         }
4292 
4293 out:
4294         return (error);
4295 }
4296 
4297 /*
4298  * Ufs specific routine used to do ufs io.
4299  */
4300 int
4301 ufs_rdwri(enum uio_rw rw, int ioflag, struct inode *ip, caddr_t base,
4302         ssize_t len, offset_t offset, enum uio_seg seg, int *aresid,
4303         struct cred *cr)
4304 {
4305         struct uio auio;
4306         struct iovec aiov;
4307         int error;
4308 
4309         ASSERT(RW_LOCK_HELD(&ip->i_contents));
4310 
4311         bzero((caddr_t)&auio, sizeof (uio_t));
4312         bzero((caddr_t)&aiov, sizeof (iovec_t));
4313 
4314         aiov.iov_base = base;
4315         aiov.iov_len = len;
4316         auio.uio_iov = &aiov;
4317         auio.uio_iovcnt = 1;
4318         auio.uio_loffset = offset;
4319         auio.uio_segflg = (short)seg;
4320         auio.uio_resid = len;
4321 
4322         if (rw == UIO_WRITE) {
4323                 auio.uio_fmode = FWRITE;
4324                 auio.uio_extflg = UIO_COPY_DEFAULT;
4325                 auio.uio_llimit = curproc->p_fsz_ctl;
4326                 error = wrip(ip, &auio, ioflag, cr);
4327         } else {
4328                 auio.uio_fmode = FREAD;
4329                 auio.uio_extflg = UIO_COPY_CACHED;
4330                 auio.uio_llimit = MAXOFFSET_T;
4331                 error = rdip(ip, &auio, ioflag, cr);
4332         }
4333 
4334         if (aresid) {
4335                 *aresid = auio.uio_resid;
4336         } else if (auio.uio_resid) {
4337                 error = EIO;
4338         }
4339         return (error);
4340 }
4341 
4342 /*ARGSUSED*/
4343 static int
4344 ufs_fid(struct vnode *vp, struct fid *fidp, caller_context_t *ct)
4345 {
4346         struct ufid *ufid;
4347         struct inode *ip = VTOI(vp);
4348 
4349         if (ip->i_ufsvfs == NULL)
4350                 return (EIO);
4351 
4352         if (fidp->fid_len < (sizeof (struct ufid) - sizeof (ushort_t))) {
4353                 fidp->fid_len = sizeof (struct ufid) - sizeof (ushort_t);
4354                 return (ENOSPC);
4355         }
4356 
4357         ufid = (struct ufid *)fidp;
4358         bzero((char *)ufid, sizeof (struct ufid));
4359         ufid->ufid_len = sizeof (struct ufid) - sizeof (ushort_t);
4360         ufid->ufid_ino = ip->i_number;
4361         ufid->ufid_gen = ip->i_gen;
4362 
4363         return (0);
4364 }
4365 
4366 /* ARGSUSED2 */
4367 static int
4368 ufs_rwlock(struct vnode *vp, int write_lock, caller_context_t *ctp)
4369 {
4370         struct inode    *ip = VTOI(vp);
4371         struct ufsvfs   *ufsvfsp;
4372         int             forcedirectio;
4373 
4374         /*
4375          * Read case is easy.
4376          */
4377         if (!write_lock) {
4378                 rw_enter(&ip->i_rwlock, RW_READER);
4379                 return (V_WRITELOCK_FALSE);
4380         }
4381 
4382         /*
4383          * Caller has requested a writer lock, but that inhibits any
4384          * concurrency in the VOPs that follow. Acquire the lock shared
4385          * and defer exclusive access until it is known to be needed in
4386          * other VOP handlers. Some cases can be determined here.
4387          */
4388 
4389         /*
4390          * If directio is not set, there is no chance of concurrency,
4391          * so just acquire the lock exclusive. Beware of a forced
4392          * unmount before looking at the mount option.
4393          */
4394         ufsvfsp = ip->i_ufsvfs;
4395         forcedirectio = ufsvfsp ? ufsvfsp->vfs_forcedirectio : 0;
4396         if (!(ip->i_flag & IDIRECTIO || forcedirectio) ||
4397             !ufs_allow_shared_writes) {
4398                 rw_enter(&ip->i_rwlock, RW_WRITER);
4399                 return (V_WRITELOCK_TRUE);
4400         }
4401 
4402         /*
4403          * Mandatory locking forces acquiring i_rwlock exclusive.
4404          */
4405         if (MANDLOCK(vp, ip->i_mode)) {
4406                 rw_enter(&ip->i_rwlock, RW_WRITER);
4407                 return (V_WRITELOCK_TRUE);
4408         }
4409 
4410         /*
4411          * Acquire the lock shared in case a concurrent write follows.
4412          * Mandatory locking could have become enabled before the lock
4413          * was acquired. Re-check and upgrade if needed.
4414          */
4415         rw_enter(&ip->i_rwlock, RW_READER);
4416         if (MANDLOCK(vp, ip->i_mode)) {
4417                 rw_exit(&ip->i_rwlock);
4418                 rw_enter(&ip->i_rwlock, RW_WRITER);
4419                 return (V_WRITELOCK_TRUE);
4420         }
4421         return (V_WRITELOCK_FALSE);
4422 }
4423 
4424 /*ARGSUSED*/
4425 static void
4426 ufs_rwunlock(struct vnode *vp, int write_lock, caller_context_t *ctp)
4427 {
4428         struct inode    *ip = VTOI(vp);
4429 
4430         rw_exit(&ip->i_rwlock);
4431 }
4432 
4433 /* ARGSUSED */
4434 static int
4435 ufs_seek(struct vnode *vp, offset_t ooff, offset_t *noffp,
4436         caller_context_t *ct)
4437 {
4438         return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
4439 }
4440 
4441 /* ARGSUSED */
4442 static int
4443 ufs_frlock(struct vnode *vp, int cmd, struct flock64 *bfp, int flag,
4444         offset_t offset, struct flk_callback *flk_cbp, struct cred *cr,
4445         caller_context_t *ct)
4446 {
4447         struct inode *ip = VTOI(vp);
4448 
4449         if (ip->i_ufsvfs == NULL)
4450                 return (EIO);
4451 
4452         /*
4453          * If file is being mapped, disallow frlock.
4454          * XXX I am not holding tlock while checking i_mapcnt because the
4455          * current locking strategy drops all locks before calling fs_frlock.
4456          * So, mapcnt could change before we enter fs_frlock making is
4457          * meaningless to have held tlock in the first place.
4458          */
4459         if (ip->i_mapcnt > 0 && MANDLOCK(vp, ip->i_mode))
4460                 return (EAGAIN);
4461         return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
4462 }
4463 
4464 /* ARGSUSED */
4465 static int
4466 ufs_space(struct vnode *vp, int cmd, struct flock64 *bfp, int flag,
4467         offset_t offset, cred_t *cr, caller_context_t *ct)
4468 {
4469         struct ufsvfs *ufsvfsp = VTOI(vp)->i_ufsvfs;
4470         struct ulockfs *ulp;
4471         int error;
4472 
4473         if ((error = convoff(vp, bfp, 0, offset)) == 0) {
4474                 if (cmd == F_FREESP) {
4475                         error = ufs_lockfs_begin(ufsvfsp, &ulp,
4476                             ULOCKFS_SPACE_MASK);
4477                         if (error)
4478                                 return (error);
4479                         error = ufs_freesp(vp, bfp, flag, cr);
4480 
4481                         if (error == 0) {
4482                                 if (bfp->l_start == 0) {
4483                                         vnevent_truncate(vp, ct);
4484                                 } else {
4485                                         vnevent_resize(vp, ct);
4486                                 }
4487                         }
4488                 } else if (cmd == F_ALLOCSP) {
4489                         error = ufs_lockfs_begin(ufsvfsp, &ulp,
4490                             ULOCKFS_FALLOCATE_MASK);
4491                         if (error)
4492                                 return (error);
4493                         error = ufs_allocsp(vp, bfp, cr);
4494                 } else
4495                         return (EINVAL); /* Command not handled here */
4496 
4497                 if (ulp)
4498                         ufs_lockfs_end(ulp);
4499 
4500         }
4501         return (error);
4502 }
4503 
4504 /*
4505  * Used to determine if read ahead should be done. Also used to
4506  * to determine when write back occurs.
4507  */
4508 #define CLUSTSZ(ip)             ((ip)->i_ufsvfs->vfs_ioclustsz)
4509 
4510 /*
4511  * A faster version of ufs_getpage.
4512  *
4513  * We optimize by inlining the pvn_getpages iterator, eliminating
4514  * calls to bmap_read if file doesn't have UFS holes, and avoiding
4515  * the overhead of page_exists().
4516  *
4517  * When files has UFS_HOLES and ufs_getpage is called with S_READ,
4518  * we set *protp to PROT_READ to avoid calling bmap_read. This approach
4519  * victimizes performance when a file with UFS holes is faulted
4520  * first in the S_READ mode, and then in the S_WRITE mode. We will get
4521  * two MMU faults in this case.
4522  *
4523  * XXX - the inode fields which control the sequential mode are not
4524  *       protected by any mutex. The read ahead will act wild if
4525  *       multiple processes will access the file concurrently and
4526  *       some of them in sequential mode. One particulary bad case
4527  *       is if another thread will change the value of i_nextrio between
4528  *       the time this thread tests the i_nextrio value and then reads it
4529  *       again to use it as the offset for the read ahead.
4530  */
4531 /*ARGSUSED*/
4532 static int
4533 ufs_getpage(struct vnode *vp, offset_t off, size_t len, uint_t *protp,
4534         page_t *plarr[], size_t plsz, struct seg *seg, caddr_t addr,
4535         enum seg_rw rw, struct cred *cr, caller_context_t *ct)
4536 {
4537         u_offset_t      uoff = (u_offset_t)off; /* type conversion */
4538         u_offset_t      pgoff;
4539         u_offset_t      eoff;
4540         struct inode    *ip = VTOI(vp);
4541         struct ufsvfs   *ufsvfsp = ip->i_ufsvfs;
4542         struct fs       *fs;
4543         struct ulockfs  *ulp;
4544         page_t          **pl;
4545         caddr_t         pgaddr;
4546         krw_t           rwtype;
4547         int             err;
4548         int             has_holes;
4549         int             beyond_eof;
4550         int             seqmode;
4551         int             pgsize = PAGESIZE;
4552         int             dolock;
4553         int             do_qlock;
4554         int             trans_size;
4555 
4556         ASSERT((uoff & PAGEOFFSET) == 0);
4557 
4558         if (protp)
4559                 *protp = PROT_ALL;
4560 
4561         /*
4562          * Obey the lockfs protocol
4563          */
4564         err = ufs_lockfs_begin_getpage(ufsvfsp, &ulp, seg,
4565             rw == S_READ || rw == S_EXEC, protp);
4566         if (err)
4567                 goto out;
4568 
4569         fs = ufsvfsp->vfs_fs;
4570 
4571         if (ulp && (rw == S_CREATE || rw == S_WRITE) &&
4572             !(vp->v_flag & VISSWAP)) {
4573                 /*
4574                  * Try to start a transaction, will return if blocking is
4575                  * expected to occur and the address space is not the
4576                  * kernel address space.
4577                  */
4578                 trans_size = TOP_GETPAGE_SIZE(ip);
4579                 if (seg->s_as != &kas) {
4580                         TRANS_TRY_BEGIN_ASYNC(ufsvfsp, TOP_GETPAGE,
4581                             trans_size, err)
4582                         if (err == EWOULDBLOCK) {
4583                                 /*
4584                                  * Use EDEADLK here because the VM code
4585                                  * can normally never see this error.
4586                                  */
4587                                 err = EDEADLK;
4588                                 ufs_lockfs_end(ulp);
4589                                 goto out;
4590                         }
4591                 } else {
4592                         TRANS_BEGIN_ASYNC(ufsvfsp, TOP_GETPAGE, trans_size);
4593                 }
4594         }
4595 
4596         if (vp->v_flag & VNOMAP) {
4597                 err = ENOSYS;
4598                 goto unlock;
4599         }
4600 
4601         seqmode = ip->i_nextr == uoff && rw != S_CREATE;
4602 
4603         rwtype = RW_READER;             /* start as a reader */
4604         dolock = (rw_owner(&ip->i_contents) != curthread);
4605         /*
4606          * If this thread owns the lock, i.e., this thread grabbed it
4607          * as writer somewhere above, then we don't need to grab the
4608          * lock as reader in this routine.
4609          */
4610         do_qlock = (rw_owner(&ufsvfsp->vfs_dqrwlock) != curthread);
4611 
4612 retrylock:
4613         if (dolock) {
4614                 /*
4615                  * Grab the quota lock if we need to call
4616                  * bmap_write() below (with i_contents as writer).
4617                  */
4618                 if (do_qlock && rwtype == RW_WRITER)
4619                         rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
4620                 rw_enter(&ip->i_contents, rwtype);
4621         }
4622 
4623         /*
4624          * We may be getting called as a side effect of a bmap using
4625          * fbread() when the blocks might be being allocated and the
4626          * size has not yet been up'ed.  In this case we want to be
4627          * able to return zero pages if we get back UFS_HOLE from
4628          * calling bmap for a non write case here.  We also might have
4629          * to read some frags from the disk into a page if we are
4630          * extending the number of frags for a given lbn in bmap().
4631          * Large Files: The read of i_size here is atomic because
4632          * i_contents is held here. If dolock is zero, the lock
4633          * is held in bmap routines.
4634          */
4635         beyond_eof = uoff + len >
4636             P2ROUNDUP_TYPED(ip->i_size, PAGESIZE, u_offset_t);
4637         if (beyond_eof && seg != segkmap) {
4638                 if (dolock) {
4639                         rw_exit(&ip->i_contents);
4640                         if (do_qlock && rwtype == RW_WRITER)
4641                                 rw_exit(&ufsvfsp->vfs_dqrwlock);
4642                 }
4643                 err = EFAULT;
4644                 goto unlock;
4645         }
4646 
4647         /*
4648          * Must hold i_contents lock throughout the call to pvn_getpages
4649          * since locked pages are returned from each call to ufs_getapage.
4650          * Must *not* return locked pages and then try for contents lock
4651          * due to lock ordering requirements (inode > page)
4652          */
4653 
4654         has_holes = bmap_has_holes(ip);
4655 
4656         if ((rw == S_WRITE || rw == S_CREATE) && has_holes && !beyond_eof) {
4657                 int     blk_size;
4658                 u_offset_t offset;
4659 
4660                 /*
4661                  * We must acquire the RW_WRITER lock in order to
4662                  * call bmap_write().
4663                  */
4664                 if (dolock && rwtype == RW_READER) {
4665                         rwtype = RW_WRITER;
4666 
4667                         /*
4668                          * Grab the quota lock before
4669                          * upgrading i_contents, but if we can't grab it
4670                          * don't wait here due to lock order:
4671                          * vfs_dqrwlock > i_contents.
4672                          */
4673                         if (do_qlock &&
4674                             rw_tryenter(&ufsvfsp->vfs_dqrwlock, RW_READER)
4675                             == 0) {
4676                                 rw_exit(&ip->i_contents);
4677                                 goto retrylock;
4678                         }
4679                         if (!rw_tryupgrade(&ip->i_contents)) {
4680                                 rw_exit(&ip->i_contents);
4681                                 if (do_qlock)
4682                                         rw_exit(&ufsvfsp->vfs_dqrwlock);
4683                                 goto retrylock;
4684                         }
4685                 }
4686 
4687                 /*
4688                  * May be allocating disk blocks for holes here as
4689                  * a result of mmap faults. write(2) does the bmap_write
4690                  * in rdip/wrip, not here. We are not dealing with frags
4691                  * in this case.
4692                  */
4693                 /*
4694                  * Large Files: We cast fs_bmask field to offset_t
4695                  * just as we do for MAXBMASK because uoff is a 64-bit
4696                  * data type. fs_bmask will still be a 32-bit type
4697                  * as we cannot change any ondisk data structures.
4698                  */
4699 
4700                 offset = uoff & (offset_t)fs->fs_bmask;
4701                 while (offset < uoff + len) {
4702                         blk_size = (int)blksize(fs, ip, lblkno(fs, offset));
4703                         err = bmap_write(ip, offset, blk_size,
4704                             BI_NORMAL, NULL, cr);
4705                         if (ip->i_flag & (ICHG|IUPD))
4706                                 ip->i_seq++;
4707                         if (err)
4708                                 goto update_inode;
4709                         offset += blk_size; /* XXX - make this contig */
4710                 }
4711         }
4712 
4713         /*
4714          * Can be a reader from now on.
4715          */
4716         if (dolock && rwtype == RW_WRITER) {
4717                 rw_downgrade(&ip->i_contents);
4718                 /*
4719                  * We can release vfs_dqrwlock early so do it, but make
4720                  * sure we don't try to release it again at the bottom.
4721                  */
4722                 if (do_qlock) {
4723                         rw_exit(&ufsvfsp->vfs_dqrwlock);
4724                         do_qlock = 0;
4725                 }
4726         }
4727 
4728         /*
4729          * We remove PROT_WRITE in cases when the file has UFS holes
4730          * because we don't  want to call bmap_read() to check each
4731          * page if it is backed with a disk block.
4732          */
4733         if (protp && has_holes && rw != S_WRITE && rw != S_CREATE)
4734                 *protp &= ~PROT_WRITE;
4735 
4736         err = 0;
4737 
4738         /*
4739          * The loop looks up pages in the range [off, off + len).
4740          * For each page, we first check if we should initiate an asynchronous
4741          * read ahead before we call page_lookup (we may sleep in page_lookup
4742          * for a previously initiated disk read).
4743          */
4744         eoff = (uoff + len);
4745         for (pgoff = uoff, pgaddr = addr, pl = plarr;
4746             pgoff < eoff; /* empty */) {
4747                 page_t  *pp;
4748                 u_offset_t      nextrio;
4749                 se_t    se;
4750                 int retval;
4751 
4752                 se = ((rw == S_CREATE || rw == S_OTHER) ? SE_EXCL : SE_SHARED);
4753 
4754                 /* Handle async getpage (faultahead) */
4755                 if (plarr == NULL) {
4756                         ip->i_nextrio = pgoff;
4757                         (void) ufs_getpage_ra(vp, pgoff, seg, pgaddr);
4758                         pgoff += pgsize;
4759                         pgaddr += pgsize;
4760                         continue;
4761                 }
4762                 /*
4763                  * Check if we should initiate read ahead of next cluster.
4764                  * We call page_exists only when we need to confirm that
4765                  * we have the current page before we initiate the read ahead.
4766                  */
4767                 nextrio = ip->i_nextrio;
4768                 if (seqmode &&
4769                     pgoff + CLUSTSZ(ip) >= nextrio && pgoff <= nextrio &&
4770                     nextrio < ip->i_size && page_exists(vp, pgoff)) {
4771                         retval = ufs_getpage_ra(vp, pgoff, seg, pgaddr);
4772                         /*
4773                          * We always read ahead the next cluster of data
4774                          * starting from i_nextrio. If the page (vp,nextrio)
4775                          * is actually in core at this point, the routine
4776                          * ufs_getpage_ra() will stop pre-fetching data
4777                          * until we read that page in a synchronized manner
4778                          * through ufs_getpage_miss(). So, we should increase
4779                          * i_nextrio if the page (vp, nextrio) exists.
4780                          */
4781                         if ((retval == 0) && page_exists(vp, nextrio)) {
4782                                 ip->i_nextrio = nextrio + pgsize;
4783                         }
4784                 }
4785 
4786                 if ((pp = page_lookup(vp, pgoff, se)) != NULL) {
4787                         /*
4788                          * We found the page in the page cache.
4789                          */
4790                         *pl++ = pp;
4791                         pgoff += pgsize;
4792                         pgaddr += pgsize;
4793                         len -= pgsize;
4794                         plsz -= pgsize;
4795                 } else  {
4796                         /*
4797                          * We have to create the page, or read it from disk.
4798                          */
4799                         if (err = ufs_getpage_miss(vp, pgoff, len, seg, pgaddr,
4800                             pl, plsz, rw, seqmode))
4801                                 goto error;
4802 
4803                         while (*pl != NULL) {
4804                                 pl++;
4805                                 pgoff += pgsize;
4806                                 pgaddr += pgsize;
4807                                 len -= pgsize;
4808                                 plsz -= pgsize;
4809                         }
4810                 }
4811         }
4812 
4813         /*
4814          * Return pages up to plsz if they are in the page cache.
4815          * We cannot return pages if there is a chance that they are
4816          * backed with a UFS hole and rw is S_WRITE or S_CREATE.
4817          */
4818         if (plarr && !(has_holes && (rw == S_WRITE || rw == S_CREATE))) {
4819 
4820                 ASSERT((protp == NULL) ||
4821                     !(has_holes && (*protp & PROT_WRITE)));
4822 
4823                 eoff = pgoff + plsz;
4824                 while (pgoff < eoff) {
4825                         page_t          *pp;
4826 
4827                         if ((pp = page_lookup_nowait(vp, pgoff,
4828                             SE_SHARED)) == NULL)
4829                                 break;
4830 
4831                         *pl++ = pp;
4832                         pgoff += pgsize;
4833                         plsz -= pgsize;
4834                 }
4835         }
4836 
4837         if (plarr)
4838                 *pl = NULL;                     /* Terminate page list */
4839         ip->i_nextr = pgoff;
4840 
4841 error:
4842         if (err && plarr) {
4843                 /*
4844                  * Release any pages we have locked.
4845                  */
4846                 while (pl > &plarr[0])
4847                         page_unlock(*--pl);
4848 
4849                 plarr[0] = NULL;
4850         }
4851 
4852 update_inode:
4853         /*
4854          * If the inode is not already marked for IACC (in rdip() for read)
4855          * and the inode is not marked for no access time update (in wrip()
4856          * for write) then update the inode access time and mod time now.
4857          */
4858         if ((ip->i_flag & (IACC | INOACC)) == 0) {
4859                 if ((rw != S_OTHER) && (ip->i_mode & IFMT) != IFDIR) {
4860                         if (!ULOCKFS_IS_NOIACC(ITOUL(ip)) &&
4861                             (fs->fs_ronly == 0) &&
4862                             (!ufsvfsp->vfs_noatime)) {
4863                                 mutex_enter(&ip->i_tlock);
4864                                 ip->i_flag |= IACC;
4865                                 ITIMES_NOLOCK(ip);
4866                                 mutex_exit(&ip->i_tlock);
4867                         }
4868                 }
4869         }
4870 
4871         if (dolock) {
4872                 rw_exit(&ip->i_contents);
4873                 if (do_qlock && rwtype == RW_WRITER)
4874                         rw_exit(&ufsvfsp->vfs_dqrwlock);
4875         }
4876 
4877 unlock:
4878         if (ulp) {
4879                 if ((rw == S_CREATE || rw == S_WRITE) &&
4880                     !(vp->v_flag & VISSWAP)) {
4881                         TRANS_END_ASYNC(ufsvfsp, TOP_GETPAGE, trans_size);
4882                 }
4883                 ufs_lockfs_end(ulp);
4884         }
4885 out:
4886         return (err);
4887 }
4888 
4889 /*
4890  * ufs_getpage_miss is called when ufs_getpage missed the page in the page
4891  * cache. The page is either read from the disk, or it's created.
4892  * A page is created (without disk read) if rw == S_CREATE, or if
4893  * the page is not backed with a real disk block (UFS hole).
4894  */
4895 /* ARGSUSED */
4896 static int
4897 ufs_getpage_miss(struct vnode *vp, u_offset_t off, size_t len, struct seg *seg,
4898         caddr_t addr, page_t *pl[], size_t plsz, enum seg_rw rw, int seq)
4899 {
4900         struct inode    *ip = VTOI(vp);
4901         page_t          *pp;
4902         daddr_t         bn;
4903         size_t          io_len;
4904         int             crpage = 0;
4905         int             err;
4906         int             contig;
4907         int             bsize = ip->i_fs->fs_bsize;
4908 
4909         /*
4910          * Figure out whether the page can be created, or must be
4911          * must be read from the disk.
4912          */
4913         if (rw == S_CREATE)
4914                 crpage = 1;
4915         else {
4916                 contig = 0;
4917                 if (err = bmap_read(ip, off, &bn, &contig))
4918                         return (err);
4919 
4920                 crpage = (bn == UFS_HOLE);
4921 
4922                 /*
4923                  * If its also a fallocated block that hasn't been written to
4924                  * yet, we will treat it just like a UFS_HOLE and create
4925                  * a zero page for it
4926                  */
4927                 if (ISFALLOCBLK(ip, bn))
4928                         crpage = 1;
4929         }
4930 
4931         if (crpage) {
4932                 if ((pp = page_create_va(vp, off, PAGESIZE, PG_WAIT, seg,
4933                     addr)) == NULL) {
4934                         return (ufs_fault(vp,
4935                             "ufs_getpage_miss: page_create == NULL"));
4936                 }
4937 
4938                 if (rw != S_CREATE)
4939                         pagezero(pp, 0, PAGESIZE);
4940 
4941                 io_len = PAGESIZE;
4942         } else {
4943                 u_offset_t      io_off;
4944                 uint_t  xlen;
4945                 struct buf      *bp;
4946                 ufsvfs_t        *ufsvfsp = ip->i_ufsvfs;
4947 
4948                 /*
4949                  * If access is not in sequential order, we read from disk
4950                  * in bsize units.
4951                  *
4952                  * We limit the size of the transfer to bsize if we are reading
4953                  * from the beginning of the file. Note in this situation we
4954                  * will hedge our bets and initiate an async read ahead of
4955                  * the second block.
4956                  */
4957                 if (!seq || off == 0)
4958                         contig = MIN(contig, bsize);
4959 
4960                 pp = pvn_read_kluster(vp, off, seg, addr, &io_off,
4961                     &io_len, off, contig, 0);
4962 
4963                 /*
4964                  * Some other thread has entered the page.
4965                  * ufs_getpage will retry page_lookup.
4966                  */
4967                 if (pp == NULL) {
4968                         pl[0] = NULL;
4969                         return (0);
4970                 }
4971 
4972                 /*
4973                  * Zero part of the page which we are not
4974                  * going to read from the disk.
4975                  */
4976                 xlen = io_len & PAGEOFFSET;
4977                 if (xlen != 0)
4978                         pagezero(pp->p_prev, xlen, PAGESIZE - xlen);
4979 
4980                 bp = pageio_setup(pp, io_len, ip->i_devvp, B_READ);
4981                 bp->b_edev = ip->i_dev;
4982                 bp->b_dev = cmpdev(ip->i_dev);
4983                 bp->b_blkno = bn;
4984                 bp->b_un.b_addr = (caddr_t)0;
4985                 bp->b_file = ip->i_vnode;
4986                 bp->b_offset = off;
4987 
4988                 if (ufsvfsp->vfs_log) {
4989                         lufs_read_strategy(ufsvfsp->vfs_log, bp);
4990                 } else if (ufsvfsp->vfs_snapshot) {
4991                         fssnap_strategy(&ufsvfsp->vfs_snapshot, bp);
4992                 } else {
4993                         ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
4994                         ub.ub_getpages.value.ul++;
4995                         (void) bdev_strategy(bp);
4996                         lwp_stat_update(LWP_STAT_INBLK, 1);
4997                 }
4998 
4999                 ip->i_nextrio = off + ((io_len + PAGESIZE - 1) & PAGEMASK);
5000 
5001                 /*
5002                  * If the file access is sequential, initiate read ahead
5003                  * of the next cluster.
5004                  */
5005                 if (seq && ip->i_nextrio < ip->i_size)
5006                         (void) ufs_getpage_ra(vp, off, seg, addr);
5007                 err = biowait(bp);
5008                 pageio_done(bp);
5009 
5010                 if (err) {
5011                         pvn_read_done(pp, B_ERROR);
5012                         return (err);
5013                 }
5014         }
5015 
5016         pvn_plist_init(pp, pl, plsz, off, io_len, rw);
5017         return (0);
5018 }
5019 
5020 /*
5021  * Read ahead a cluster from the disk. Returns the length in bytes.
5022  */
5023 static int
5024 ufs_getpage_ra(struct vnode *vp, u_offset_t off, struct seg *seg, caddr_t addr)
5025 {
5026         struct inode    *ip = VTOI(vp);
5027         page_t          *pp;
5028         u_offset_t      io_off = ip->i_nextrio;
5029         ufsvfs_t        *ufsvfsp;
5030         caddr_t         addr2 = addr + (io_off - off);
5031         struct buf      *bp;
5032         daddr_t         bn;
5033         size_t          io_len;
5034         int             err;
5035         int             contig;
5036         int             xlen;
5037         int             bsize = ip->i_fs->fs_bsize;
5038 
5039         /*
5040          * If the directio advisory is in effect on this file,
5041          * then do not do buffered read ahead. Read ahead makes
5042          * it more difficult on threads using directio as they
5043          * will be forced to flush the pages from this vnode.
5044          */
5045         if ((ufsvfsp = ip->i_ufsvfs) == NULL)
5046                 return (0);
5047         if (ip->i_flag & IDIRECTIO || ufsvfsp->vfs_forcedirectio)
5048                 return (0);
5049 
5050         /*
5051          * Is this test needed?
5052          */
5053         if (addr2 >= seg->s_base + seg->s_size)
5054                 return (0);
5055 
5056         contig = 0;
5057         err = bmap_read(ip, io_off, &bn, &contig);
5058         /*
5059          * If its a UFS_HOLE or a fallocated block, do not perform
5060          * any read ahead's since there probably is nothing to read ahead
5061          */
5062         if (err || bn == UFS_HOLE || ISFALLOCBLK(ip, bn))
5063                 return (0);
5064 
5065         /*
5066          * Limit the transfer size to bsize if this is the 2nd block.
5067          */
5068         if (io_off == (u_offset_t)bsize)
5069                 contig = MIN(contig, bsize);
5070 
5071         if ((pp = pvn_read_kluster(vp, io_off, seg, addr2, &io_off,
5072             &io_len, io_off, contig, 1)) == NULL)
5073                 return (0);
5074 
5075         /*
5076          * Zero part of page which we are not going to read from disk
5077          */
5078         if ((xlen = (io_len & PAGEOFFSET)) > 0)
5079                 pagezero(pp->p_prev, xlen, PAGESIZE - xlen);
5080 
5081         ip->i_nextrio = (io_off + io_len + PAGESIZE - 1) & PAGEMASK;
5082 
5083         bp = pageio_setup(pp, io_len, ip->i_devvp, B_READ | B_ASYNC);
5084         bp->b_edev = ip->i_dev;
5085         bp->b_dev = cmpdev(ip->i_dev);
5086         bp->b_blkno = bn;
5087         bp->b_un.b_addr = (caddr_t)0;
5088         bp->b_file = ip->i_vnode;
5089         bp->b_offset = off;
5090 
5091         if (ufsvfsp->vfs_log) {
5092                 lufs_read_strategy(ufsvfsp->vfs_log, bp);
5093         } else if (ufsvfsp->vfs_snapshot) {
5094                 fssnap_strategy(&ufsvfsp->vfs_snapshot, bp);
5095         } else {
5096                 ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
5097                 ub.ub_getras.value.ul++;
5098                 (void) bdev_strategy(bp);
5099                 lwp_stat_update(LWP_STAT_INBLK, 1);
5100         }
5101 
5102         return (io_len);
5103 }
5104 
5105 int     ufs_delay = 1;
5106 /*
5107  * Flags are composed of {B_INVAL, B_FREE, B_DONTNEED, B_FORCE, B_ASYNC}
5108  *
5109  * LMXXX - the inode really ought to contain a pointer to one of these
5110  * async args.  Stuff gunk in there and just hand the whole mess off.
5111  * This would replace i_delaylen, i_delayoff.
5112  */
5113 /*ARGSUSED*/
5114 static int
5115 ufs_putpage(struct vnode *vp, offset_t off, size_t len, int flags,
5116         struct cred *cr, caller_context_t *ct)
5117 {
5118         struct inode *ip = VTOI(vp);
5119         int err = 0;
5120 
5121         if (vp->v_count == 0) {
5122                 return (ufs_fault(vp, "ufs_putpage: bad v_count == 0"));
5123         }
5124 
5125         /*
5126          * XXX - Why should this check be made here?
5127          */
5128         if (vp->v_flag & VNOMAP) {
5129                 err = ENOSYS;
5130                 goto errout;
5131         }
5132 
5133         if (ip->i_ufsvfs == NULL) {
5134                 err = EIO;
5135                 goto errout;
5136         }
5137 
5138         if (flags & B_ASYNC) {
5139                 if (ufs_delay && len &&
5140                     (flags & ~(B_ASYNC|B_DONTNEED|B_FREE)) == 0) {
5141                         mutex_enter(&ip->i_tlock);
5142                         /*
5143                          * If nobody stalled, start a new cluster.
5144                          */
5145                         if (ip->i_delaylen == 0) {
5146                                 ip->i_delayoff = off;
5147                                 ip->i_delaylen = len;
5148                                 mutex_exit(&ip->i_tlock);
5149                                 goto errout;
5150                         }
5151                         /*
5152                          * If we have a full cluster or they are not contig,
5153                          * then push last cluster and start over.
5154                          */
5155                         if (ip->i_delaylen >= CLUSTSZ(ip) ||
5156                             ip->i_delayoff + ip->i_delaylen != off) {
5157                                 u_offset_t doff;
5158                                 size_t dlen;
5159 
5160                                 doff = ip->i_delayoff;
5161                                 dlen = ip->i_delaylen;
5162                                 ip->i_delayoff = off;
5163                                 ip->i_delaylen = len;
5164                                 mutex_exit(&ip->i_tlock);
5165                                 err = ufs_putpages(vp, doff, dlen,
5166                                     flags, cr);
5167                                 /* LMXXX - flags are new val, not old */
5168                                 goto errout;
5169                         }
5170                         /*
5171                          * There is something there, it's not full, and
5172                          * it is contig.
5173                          */
5174                         ip->i_delaylen += len;
5175                         mutex_exit(&ip->i_tlock);
5176                         goto errout;
5177                 }
5178                 /*
5179                  * Must have weird flags or we are not clustering.
5180                  */
5181         }
5182 
5183         err = ufs_putpages(vp, off, len, flags, cr);
5184 
5185 errout:
5186         return (err);
5187 }
5188 
5189 /*
5190  * If len == 0, do from off to EOF.
5191  *
5192  * The normal cases should be len == 0 & off == 0 (entire vp list),
5193  * len == MAXBSIZE (from segmap_release actions), and len == PAGESIZE
5194  * (from pageout).
5195  */
5196 /*ARGSUSED*/
5197 static int
5198 ufs_putpages(
5199         struct vnode *vp,
5200         offset_t off,
5201         size_t len,
5202         int flags,
5203         struct cred *cr)
5204 {
5205         u_offset_t io_off;
5206         u_offset_t eoff;
5207         struct inode *ip = VTOI(vp);
5208         page_t *pp;
5209         size_t io_len;
5210         int err = 0;
5211         int dolock;
5212 
5213         if (vp->v_count == 0)
5214                 return (ufs_fault(vp, "ufs_putpages: v_count == 0"));
5215         /*
5216          * Acquire the readers/write inode lock before locking
5217          * any pages in this inode.
5218          * The inode lock is held during i/o.
5219          */
5220         if (len == 0) {
5221                 mutex_enter(&ip->i_tlock);
5222                 ip->i_delayoff = ip->i_delaylen = 0;
5223                 mutex_exit(&ip->i_tlock);
5224         }
5225         dolock = (rw_owner(&ip->i_contents) != curthread);
5226         if (dolock) {
5227                 /*
5228                  * Must synchronize this thread and any possible thread
5229                  * operating in the window of vulnerability in wrip().
5230                  * It is dangerous to allow both a thread doing a putpage
5231                  * and a thread writing, so serialize them.  The exception
5232                  * is when the thread in wrip() does something which causes
5233                  * a putpage operation.  Then, the thread must be allowed
5234                  * to continue.  It may encounter a bmap_read problem in
5235                  * ufs_putapage, but that is handled in ufs_putapage.
5236                  * Allow async writers to proceed, we don't want to block
5237                  * the pageout daemon.
5238                  */
5239                 if (ip->i_writer == curthread)
5240                         rw_enter(&ip->i_contents, RW_READER);
5241                 else {
5242                         for (;;) {
5243                                 rw_enter(&ip->i_contents, RW_READER);
5244                                 mutex_enter(&ip->i_tlock);
5245                                 /*
5246                                  * If there is no thread in the critical
5247                                  * section of wrip(), then proceed.
5248                                  * Otherwise, wait until there isn't one.
5249                                  */
5250                                 if (ip->i_writer == NULL) {
5251                                         mutex_exit(&ip->i_tlock);
5252                                         break;
5253                                 }
5254                                 rw_exit(&ip->i_contents);
5255                                 /*
5256                                  * Bounce async writers when we have a writer
5257                                  * working on this file so we don't deadlock
5258                                  * the pageout daemon.
5259                                  */
5260                                 if (flags & B_ASYNC) {
5261                                         mutex_exit(&ip->i_tlock);
5262                                         return (0);
5263                                 }
5264                                 cv_wait(&ip->i_wrcv, &ip->i_tlock);
5265                                 mutex_exit(&ip->i_tlock);
5266                         }
5267                 }
5268         }
5269 
5270         if (!vn_has_cached_data(vp)) {
5271                 if (dolock)
5272                         rw_exit(&ip->i_contents);
5273                 return (0);
5274         }
5275 
5276         if (len == 0) {
5277                 /*
5278                  * Search the entire vp list for pages >= off.
5279                  */
5280                 err = pvn_vplist_dirty(vp, (u_offset_t)off, ufs_putapage,
5281                     flags, cr);
5282         } else {
5283                 /*
5284                  * Loop over all offsets in the range looking for
5285                  * pages to deal with.
5286                  */
5287                 if ((eoff = blkroundup(ip->i_fs, ip->i_size)) != 0)
5288                         eoff = MIN(off + len, eoff);
5289                 else
5290                         eoff = off + len;
5291 
5292                 for (io_off = off; io_off < eoff; io_off += io_len) {
5293                         /*
5294                          * If we are not invalidating, synchronously
5295                          * freeing or writing pages, use the routine
5296                          * page_lookup_nowait() to prevent reclaiming
5297                          * them from the free list.
5298                          */
5299                         if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
5300                                 pp = page_lookup(vp, io_off,
5301                                     (flags & (B_INVAL | B_FREE)) ?
5302                                     SE_EXCL : SE_SHARED);
5303                         } else {
5304                                 pp = page_lookup_nowait(vp, io_off,
5305                                     (flags & B_FREE) ? SE_EXCL : SE_SHARED);
5306                         }
5307 
5308                         if (pp == NULL || pvn_getdirty(pp, flags) == 0)
5309                                 io_len = PAGESIZE;
5310                         else {
5311                                 u_offset_t *io_offp = &io_off;
5312 
5313                                 err = ufs_putapage(vp, pp, io_offp, &io_len,
5314                                     flags, cr);
5315                                 if (err != 0)
5316                                         break;
5317                                 /*
5318                                  * "io_off" and "io_len" are returned as
5319                                  * the range of pages we actually wrote.
5320                                  * This allows us to skip ahead more quickly
5321                                  * since several pages may've been dealt
5322                                  * with by this iteration of the loop.
5323                                  */
5324                         }
5325                 }
5326         }
5327         if (err == 0 && off == 0 && (len == 0 || len >= ip->i_size)) {
5328                 /*
5329                  * We have just sync'ed back all the pages on
5330                  * the inode, turn off the IMODTIME flag.
5331                  */
5332                 mutex_enter(&ip->i_tlock);
5333                 ip->i_flag &= ~IMODTIME;
5334                 mutex_exit(&ip->i_tlock);
5335         }
5336         if (dolock)
5337                 rw_exit(&ip->i_contents);
5338         return (err);
5339 }
5340 
5341 static void
5342 ufs_iodone(buf_t *bp)
5343 {
5344         struct inode *ip;
5345 
5346         ASSERT((bp->b_pages->p_vnode != NULL) && !(bp->b_flags & B_READ));
5347 
5348         bp->b_iodone = NULL;
5349 
5350         ip = VTOI(bp->b_pages->p_vnode);
5351 
5352         mutex_enter(&ip->i_tlock);
5353         if (ip->i_writes >= ufs_LW) {
5354                 if ((ip->i_writes -= bp->b_bcount) <= ufs_LW)
5355                         if (ufs_WRITES)
5356                                 cv_broadcast(&ip->i_wrcv); /* wake all up */
5357         } else {
5358                 ip->i_writes -= bp->b_bcount;
5359         }
5360 
5361         mutex_exit(&ip->i_tlock);
5362         iodone(bp);
5363 }
5364 
5365 /*
5366  * Write out a single page, possibly klustering adjacent
5367  * dirty pages.  The inode lock must be held.
5368  *
5369  * LMXXX - bsize < pagesize not done.
5370  */
5371 /*ARGSUSED*/
5372 int
5373 ufs_putapage(
5374         struct vnode *vp,
5375         page_t *pp,
5376         u_offset_t *offp,
5377         size_t *lenp,           /* return values */
5378         int flags,
5379         struct cred *cr)
5380 {
5381         u_offset_t io_off;
5382         u_offset_t off;
5383         struct inode *ip = VTOI(vp);
5384         struct ufsvfs *ufsvfsp = ip->i_ufsvfs;
5385         struct fs *fs;
5386         struct buf *bp;
5387         size_t io_len;
5388         daddr_t bn;
5389         int err;
5390         int contig;
5391         int dotrans;
5392 
5393         ASSERT(RW_LOCK_HELD(&ip->i_contents));
5394 
5395         if (ufsvfsp == NULL) {
5396                 err = EIO;
5397                 goto out_trace;
5398         }
5399 
5400         fs = ip->i_fs;
5401         ASSERT(fs->fs_ronly == 0);
5402 
5403         /*
5404          * If the modified time on the inode has not already been
5405          * set elsewhere (e.g. for write/setattr) we set the time now.
5406          * This gives us approximate modified times for mmap'ed files
5407          * which are modified via stores in the user address space.
5408          */
5409         if ((ip->i_flag & IMODTIME) == 0) {
5410                 mutex_enter(&ip->i_tlock);
5411                 ip->i_flag |= IUPD;
5412                 ip->i_seq++;
5413                 ITIMES_NOLOCK(ip);
5414                 mutex_exit(&ip->i_tlock);
5415         }
5416 
5417         /*
5418          * Align the request to a block boundry (for old file systems),
5419          * and go ask bmap() how contiguous things are for this file.
5420          */
5421         off = pp->p_offset & (offset_t)fs->fs_bmask;  /* block align it */
5422         contig = 0;
5423         err = bmap_read(ip, off, &bn, &contig);
5424         if (err)
5425                 goto out;
5426         if (bn == UFS_HOLE) {                   /* putpage never allocates */
5427                 /*
5428                  * logging device is in error mode; simply return EIO
5429                  */
5430                 if (TRANS_ISERROR(ufsvfsp)) {
5431                         err = EIO;
5432                         goto out;
5433                 }
5434                 /*
5435                  * Oops, the thread in the window in wrip() did some
5436                  * sort of operation which caused a putpage in the bad
5437                  * range.  In this case, just return an error which will
5438                  * cause the software modified bit on the page to set
5439                  * and the page will get written out again later.
5440                  */
5441                 if (ip->i_writer == curthread) {
5442                         err = EIO;
5443                         goto out;
5444                 }
5445                 /*
5446                  * If the pager is trying to push a page in the bad range
5447                  * just tell him to try again later when things are better.
5448                  */
5449                 if (flags & B_ASYNC) {
5450                         err = EAGAIN;
5451                         goto out;
5452                 }
5453                 err = ufs_fault(ITOV(ip), "ufs_putapage: bn == UFS_HOLE");
5454                 goto out;
5455         }
5456 
5457         /*
5458          * If it is an fallocate'd block, reverse the negativity since
5459          * we are now writing to it
5460          */
5461         if (ISFALLOCBLK(ip, bn)) {
5462                 err = bmap_set_bn(vp, off, dbtofsb(fs, -bn));
5463                 if (err)
5464                         goto out;
5465 
5466                 bn = -bn;
5467         }
5468 
5469         /*
5470          * Take the length (of contiguous bytes) passed back from bmap()
5471          * and _try_ and get a set of pages covering that extent.
5472          */
5473         pp = pvn_write_kluster(vp, pp, &io_off, &io_len, off, contig, flags);
5474 
5475         /*
5476          * May have run out of memory and not clustered backwards.
5477          * off          p_offset
5478          * [  pp - 1  ][   pp   ]
5479          * [    block           ]
5480          * We told bmap off, so we have to adjust the bn accordingly.
5481          */
5482         if (io_off > off) {
5483                 bn += btod(io_off - off);
5484                 contig -= (io_off - off);
5485         }
5486 
5487         /*
5488          * bmap was carefull to tell us the right size so use that.
5489          * There might be unallocated frags at the end.
5490          * LMXXX - bzero the end of the page?  We must be writing after EOF.
5491          */
5492         if (io_len > contig) {
5493                 ASSERT(io_len - contig < fs->fs_bsize);
5494                 io_len -= (io_len - contig);
5495         }
5496 
5497         /*
5498          * Handle the case where we are writing the last page after EOF.
5499          *
5500          * XXX - just a patch for i-mt3.
5501          */
5502         if (io_len == 0) {
5503                 ASSERT(pp->p_offset >=
5504                     (u_offset_t)(roundup(ip->i_size, PAGESIZE)));
5505                 io_len = PAGESIZE;
5506         }
5507 
5508         bp = pageio_setup(pp, io_len, ip->i_devvp, B_WRITE | flags);
5509 
5510         ULOCKFS_SET_MOD(ITOUL(ip));
5511 
5512         bp->b_edev = ip->i_dev;
5513         bp->b_dev = cmpdev(ip->i_dev);
5514         bp->b_blkno = bn;
5515         bp->b_un.b_addr = (caddr_t)0;
5516         bp->b_file = ip->i_vnode;
5517 
5518         /*
5519          * File contents of shadow or quota inodes are metadata, and updates
5520          * to these need to be put into a logging transaction. All direct
5521          * callers in UFS do that, but fsflush can come here _before_ the
5522          * normal codepath. An example would be updating ACL information, for
5523          * which the normal codepath would be:
5524          *      ufs_si_store()
5525          *      ufs_rdwri()
5526          *      wrip()
5527          *      segmap_release()
5528          *      VOP_PUTPAGE()
5529          * Here, fsflush can pick up the dirty page before segmap_release()
5530          * forces it out. If that happens, there's no transaction.
5531          * We therefore need to test whether a transaction exists, and if not
5532          * create one - for fsflush.
5533          */
5534         dotrans =
5535             (((ip->i_mode & IFMT) == IFSHAD || ufsvfsp->vfs_qinod == ip) &&
5536             ((curthread->t_flag & T_DONTBLOCK) == 0) &&
5537             (TRANS_ISTRANS(ufsvfsp)));
5538 
5539         if (dotrans) {
5540                 curthread->t_flag |= T_DONTBLOCK;
5541                 TRANS_BEGIN_ASYNC(ufsvfsp, TOP_PUTPAGE, TOP_PUTPAGE_SIZE(ip));
5542         }
5543         if (TRANS_ISTRANS(ufsvfsp)) {
5544                 if ((ip->i_mode & IFMT) == IFSHAD) {
5545                         TRANS_BUF(ufsvfsp, 0, io_len, bp, DT_SHAD);
5546                 } else if (ufsvfsp->vfs_qinod == ip) {
5547                         TRANS_DELTA(ufsvfsp, ldbtob(bn), bp->b_bcount, DT_QR,
5548                             0, 0);
5549                 }
5550         }
5551         if (dotrans) {
5552                 TRANS_END_ASYNC(ufsvfsp, TOP_PUTPAGE, TOP_PUTPAGE_SIZE(ip));
5553                 curthread->t_flag &= ~T_DONTBLOCK;
5554         }
5555 
5556         /* write throttle */
5557 
5558         ASSERT(bp->b_iodone == NULL);
5559         bp->b_iodone = (int (*)())ufs_iodone;
5560         mutex_enter(&ip->i_tlock);
5561         ip->i_writes += bp->b_bcount;
5562         mutex_exit(&ip->i_tlock);
5563 
5564         if (bp->b_flags & B_ASYNC) {
5565                 if (ufsvfsp->vfs_log) {
5566                         lufs_write_strategy(ufsvfsp->vfs_log, bp);
5567                 } else if (ufsvfsp->vfs_snapshot) {
5568                         fssnap_strategy(&ufsvfsp->vfs_snapshot, bp);
5569                 } else {
5570                         ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
5571                         ub.ub_putasyncs.value.ul++;
5572                         (void) bdev_strategy(bp);
5573                         lwp_stat_update(LWP_STAT_OUBLK, 1);
5574                 }
5575         } else {
5576                 if (ufsvfsp->vfs_log) {
5577                         lufs_write_strategy(ufsvfsp->vfs_log, bp);
5578                 } else if (ufsvfsp->vfs_snapshot) {
5579                         fssnap_strategy(&ufsvfsp->vfs_snapshot, bp);
5580                 } else {
5581                         ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
5582                         ub.ub_putsyncs.value.ul++;
5583                         (void) bdev_strategy(bp);
5584                         lwp_stat_update(LWP_STAT_OUBLK, 1);
5585                 }
5586                 err = biowait(bp);
5587                 pageio_done(bp);
5588                 pvn_write_done(pp, ((err) ? B_ERROR : 0) | B_WRITE | flags);
5589         }
5590 
5591         pp = NULL;
5592 
5593 out:
5594         if (err != 0 && pp != NULL)
5595                 pvn_write_done(pp, B_ERROR | B_WRITE | flags);
5596 
5597         if (offp)
5598                 *offp = io_off;
5599         if (lenp)
5600                 *lenp = io_len;
5601 out_trace:
5602         return (err);
5603 }
5604 
5605 uint64_t ufs_map_alock_retry_cnt;
5606 uint64_t ufs_map_lockfs_retry_cnt;
5607 
5608 /* ARGSUSED */
5609 static int
5610 ufs_map(struct vnode *vp,
5611         offset_t off,
5612         struct as *as,
5613         caddr_t *addrp,
5614         size_t len,
5615         uchar_t prot,
5616         uchar_t maxprot,
5617         uint_t flags,
5618         struct cred *cr,
5619         caller_context_t *ct)
5620 {
5621         struct segvn_crargs vn_a;
5622         struct ufsvfs *ufsvfsp = VTOI(vp)->i_ufsvfs;
5623         struct ulockfs *ulp;
5624         int error, sig;
5625         k_sigset_t smask;
5626         caddr_t hint = *addrp;
5627 
5628         if (vp->v_flag & VNOMAP) {
5629                 error = ENOSYS;
5630                 goto out;
5631         }
5632 
5633         if (off < (offset_t)0 || (offset_t)(off + len) < (offset_t)0) {
5634                 error = ENXIO;
5635                 goto out;
5636         }
5637 
5638         if (vp->v_type != VREG) {
5639                 error = ENODEV;
5640                 goto out;
5641         }
5642 
5643 retry_map:
5644         *addrp = hint;
5645         /*
5646          * If file is being locked, disallow mapping.
5647          */
5648         if (vn_has_mandatory_locks(vp, VTOI(vp)->i_mode)) {
5649                 error = EAGAIN;
5650                 goto out;
5651         }
5652 
5653         as_rangelock(as);
5654         /*
5655          * Note that if we are retrying (because ufs_lockfs_trybegin failed in
5656          * the previous attempt), some other thread could have grabbed
5657          * the same VA range if MAP_FIXED is set. In that case, choose_addr
5658          * would unmap the valid VA range, that is ok.
5659          */
5660         error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
5661         if (error != 0) {
5662                 as_rangeunlock(as);
5663                 goto out;
5664         }
5665 
5666         /*
5667          * a_lock has to be acquired before entering the lockfs protocol
5668          * because that is the order in which pagefault works. Also we cannot
5669          * block on a_lock here because this waiting writer will prevent
5670          * further readers like ufs_read from progressing and could cause
5671          * deadlock between ufs_read/ufs_map/pagefault when a quiesce is
5672          * pending.
5673          */
5674         while (!AS_LOCK_TRYENTER(as, RW_WRITER)) {
5675                 ufs_map_alock_retry_cnt++;
5676                 delay(RETRY_LOCK_DELAY);
5677         }
5678 
5679         /*
5680          * We can't hold as->a_lock and wait for lockfs to succeed because
5681          * the proc tools might hang on a_lock, so call ufs_lockfs_trybegin()
5682          * instead.
5683          */
5684         if (error = ufs_lockfs_trybegin(ufsvfsp, &ulp, ULOCKFS_MAP_MASK)) {
5685                 /*
5686                  * ufs_lockfs_trybegin() did not succeed. It is safer to give up
5687                  * as->a_lock and wait for ulp->ul_fs_lock status to change.
5688                  */
5689                 ufs_map_lockfs_retry_cnt++;
5690                 AS_LOCK_EXIT(as);
5691                 as_rangeunlock(as);
5692                 if (error == EIO)
5693                         goto out;
5694 
5695                 mutex_enter(&ulp->ul_lock);
5696                 while (ulp->ul_fs_lock & ULOCKFS_MAP_MASK) {
5697                         if (ULOCKFS_IS_SLOCK(ulp) || ufsvfsp->vfs_nointr) {
5698                                 cv_wait(&ulp->ul_cv, &ulp->ul_lock);
5699                         } else {
5700                                 sigintr(&smask, 1);
5701                                 sig = cv_wait_sig(&ulp->ul_cv, &ulp->ul_lock);
5702                                 sigunintr(&smask);
5703                                 if (((ulp->ul_fs_lock & ULOCKFS_MAP_MASK) &&
5704                                     !sig) || ufsvfsp->vfs_dontblock) {
5705                                         mutex_exit(&ulp->ul_lock);
5706                                         return (EINTR);
5707                                 }
5708                         }
5709                 }
5710                 mutex_exit(&ulp->ul_lock);
5711                 goto retry_map;
5712         }
5713 
5714         vn_a.vp = vp;
5715         vn_a.offset = (u_offset_t)off;
5716         vn_a.type = flags & MAP_TYPE;
5717         vn_a.prot = prot;
5718         vn_a.maxprot = maxprot;
5719         vn_a.cred = cr;
5720         vn_a.amp = NULL;
5721         vn_a.flags = flags & ~MAP_TYPE;
5722         vn_a.szc = 0;
5723         vn_a.lgrp_mem_policy_flags = 0;
5724 
5725         error = as_map_locked(as, *addrp, len, segvn_create, &vn_a);
5726         if (ulp)
5727                 ufs_lockfs_end(ulp);
5728         as_rangeunlock(as);
5729 out:
5730         return (error);
5731 }
5732 
5733 /* ARGSUSED */
5734 static int
5735 ufs_addmap(struct vnode *vp,
5736         offset_t off,
5737         struct as *as,
5738         caddr_t addr,
5739         size_t  len,
5740         uchar_t  prot,
5741         uchar_t  maxprot,
5742         uint_t    flags,
5743         struct cred *cr,
5744         caller_context_t *ct)
5745 {
5746         struct inode *ip = VTOI(vp);
5747 
5748         if (vp->v_flag & VNOMAP) {
5749                 return (ENOSYS);
5750         }
5751 
5752         mutex_enter(&ip->i_tlock);
5753         ip->i_mapcnt += btopr(len);
5754         mutex_exit(&ip->i_tlock);
5755         return (0);
5756 }
5757 
5758 /*ARGSUSED*/
5759 static int
5760 ufs_delmap(struct vnode *vp, offset_t off, struct as *as, caddr_t addr,
5761         size_t len, uint_t prot,  uint_t maxprot,  uint_t flags,
5762         struct cred *cr, caller_context_t *ct)
5763 {
5764         struct inode *ip = VTOI(vp);
5765 
5766         if (vp->v_flag & VNOMAP) {
5767                 return (ENOSYS);
5768         }
5769 
5770         mutex_enter(&ip->i_tlock);
5771         ip->i_mapcnt -= btopr(len);  /* Count released mappings */
5772         ASSERT(ip->i_mapcnt >= 0);
5773         mutex_exit(&ip->i_tlock);
5774         return (0);
5775 }
5776 /*
5777  * Return the answer requested to poll() for non-device files
5778  */
5779 struct pollhead ufs_pollhd;
5780 
5781 /* ARGSUSED */
5782 int
5783 ufs_poll(vnode_t *vp, short ev, int any, short *revp, struct pollhead **phpp,
5784         caller_context_t *ct)
5785 {
5786         struct ufsvfs   *ufsvfsp;
5787 
5788         *revp = 0;
5789         ufsvfsp = VTOI(vp)->i_ufsvfs;
5790 
5791         if (!ufsvfsp) {
5792                 *revp = POLLHUP;
5793                 goto out;
5794         }
5795 
5796         if (ULOCKFS_IS_HLOCK(&ufsvfsp->vfs_ulockfs) ||
5797             ULOCKFS_IS_ELOCK(&ufsvfsp->vfs_ulockfs)) {
5798                 *revp |= POLLERR;
5799 
5800         } else {
5801                 if ((ev & POLLOUT) && !ufsvfsp->vfs_fs->fs_ronly &&
5802                     !ULOCKFS_IS_WLOCK(&ufsvfsp->vfs_ulockfs))
5803                         *revp |= POLLOUT;
5804 
5805                 if ((ev & POLLWRBAND) && !ufsvfsp->vfs_fs->fs_ronly &&
5806                     !ULOCKFS_IS_WLOCK(&ufsvfsp->vfs_ulockfs))
5807                         *revp |= POLLWRBAND;
5808 
5809                 if (ev & POLLIN)
5810                         *revp |= POLLIN;
5811 
5812                 if (ev & POLLRDNORM)
5813                         *revp |= POLLRDNORM;
5814 
5815                 if (ev & POLLRDBAND)
5816                         *revp |= POLLRDBAND;
5817         }
5818 
5819         if ((ev & POLLPRI) && (*revp & (POLLERR|POLLHUP)))
5820                 *revp |= POLLPRI;
5821 out:
5822         *phpp = !any && !*revp ? &ufs_pollhd : (struct pollhead *)NULL;
5823 
5824         return (0);
5825 }
5826 
5827 /* ARGSUSED */
5828 static int
5829 ufs_l_pathconf(struct vnode *vp, int cmd, ulong_t *valp, struct cred *cr,
5830         caller_context_t *ct)
5831 {
5832         struct ufsvfs   *ufsvfsp = VTOI(vp)->i_ufsvfs;
5833         struct ulockfs  *ulp = NULL;
5834         struct inode    *sip = NULL;
5835         int             error;
5836         struct inode    *ip = VTOI(vp);
5837         int             issync;
5838 
5839         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_PATHCONF_MASK);
5840         if (error)
5841                 return (error);
5842 
5843         switch (cmd) {
5844                 /*
5845                  * Have to handle _PC_NAME_MAX here, because the normal way
5846                  * [fs_pathconf() -> VOP_STATVFS() -> ufs_statvfs()]
5847                  * results in a lock ordering reversal between
5848                  * ufs_lockfs_{begin,end}() and
5849                  * ufs_thread_{suspend,continue}().
5850                  *
5851                  * Keep in sync with ufs_statvfs().
5852                  */
5853         case _PC_NAME_MAX:
5854                 *valp = MAXNAMLEN;
5855                 break;
5856 
5857         case _PC_FILESIZEBITS:
5858                 if (ufsvfsp->vfs_lfflags & UFS_LARGEFILES)
5859                         *valp = UFS_FILESIZE_BITS;
5860                 else
5861                         *valp = 32;
5862                 break;
5863 
5864         case _PC_XATTR_EXISTS:
5865                 if (vp->v_vfsp->vfs_flag & VFS_XATTR) {
5866 
5867                         error =
5868                             ufs_xattr_getattrdir(vp, &sip, LOOKUP_XATTR, cr);
5869                         if (error ==  0 && sip != NULL) {
5870                                 /* Start transaction */
5871                                 if (ulp) {
5872                                         TRANS_BEGIN_CSYNC(ufsvfsp, issync,
5873                                             TOP_RMDIR, TOP_RMDIR_SIZE);
5874                                 }
5875                                 /*
5876                                  * Is directory empty
5877                                  */
5878                                 rw_enter(&sip->i_rwlock, RW_WRITER);
5879                                 rw_enter(&sip->i_contents, RW_WRITER);
5880                                 if (ufs_xattrdirempty(sip,
5881                                     sip->i_number, CRED())) {
5882                                         rw_enter(&ip->i_contents, RW_WRITER);
5883                                         ufs_unhook_shadow(ip, sip);
5884                                         rw_exit(&ip->i_contents);
5885 
5886                                         *valp = 0;
5887 
5888                                 } else
5889                                         *valp = 1;
5890                                 rw_exit(&sip->i_contents);
5891                                 rw_exit(&sip->i_rwlock);
5892                                 if (ulp) {
5893                                         TRANS_END_CSYNC(ufsvfsp, error, issync,
5894                                             TOP_RMDIR, TOP_RMDIR_SIZE);
5895                                 }
5896                                 VN_RELE(ITOV(sip));
5897                         } else if (error == ENOENT) {
5898                                 *valp = 0;
5899                                 error = 0;
5900                         }
5901                 } else {
5902                         error = fs_pathconf(vp, cmd, valp, cr, ct);
5903                 }
5904                 break;
5905 
5906         case _PC_ACL_ENABLED:
5907                 *valp = _ACL_ACLENT_ENABLED;
5908                 break;
5909 
5910         case _PC_MIN_HOLE_SIZE:
5911                 *valp = (ulong_t)ip->i_fs->fs_bsize;
5912                 break;
5913 
5914         case _PC_SATTR_ENABLED:
5915         case _PC_SATTR_EXISTS:
5916                 *valp = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
5917                     (vp->v_type == VREG || vp->v_type == VDIR);
5918                 break;
5919 
5920         case _PC_TIMESTAMP_RESOLUTION:
5921                 /*
5922                  * UFS keeps only microsecond timestamp resolution.
5923                  * This is historical and will probably never change.
5924                  */
5925                 *valp = 1000L;
5926                 break;
5927 
5928         default:
5929                 error = fs_pathconf(vp, cmd, valp, cr, ct);
5930                 break;
5931         }
5932 
5933         if (ulp != NULL) {
5934                 ufs_lockfs_end(ulp);
5935         }
5936         return (error);
5937 }
5938 
5939 int ufs_pageio_writes, ufs_pageio_reads;
5940 
5941 /*ARGSUSED*/
5942 static int
5943 ufs_pageio(struct vnode *vp, page_t *pp, u_offset_t io_off, size_t io_len,
5944         int flags, struct cred *cr, caller_context_t *ct)
5945 {
5946         struct inode *ip = VTOI(vp);
5947         struct ufsvfs *ufsvfsp;
5948         page_t *npp = NULL, *opp = NULL, *cpp = pp;
5949         struct buf *bp;
5950         daddr_t bn;
5951         size_t done_len = 0, cur_len = 0;
5952         int err = 0;
5953         int contig = 0;
5954         int dolock;
5955         int vmpss = 0;
5956         struct ulockfs *ulp;
5957 
5958         if ((flags & B_READ) && pp != NULL && pp->p_vnode == vp &&
5959             vp->v_mpssdata != NULL) {
5960                 vmpss = 1;
5961         }
5962 
5963         dolock = (rw_owner(&ip->i_contents) != curthread);
5964         /*
5965          * We need a better check.  Ideally, we would use another
5966          * vnodeops so that hlocked and forcibly unmounted file
5967          * systems would return EIO where appropriate and w/o the
5968          * need for these checks.
5969          */
5970         if ((ufsvfsp = ip->i_ufsvfs) == NULL)
5971                 return (EIO);
5972 
5973         /*
5974          * For vmpss (pp can be NULL) case respect the quiesce protocol.
5975          * ul_lock must be taken before locking pages so we can't use it here
5976          * if pp is non NULL because segvn already locked pages
5977          * SE_EXCL. Instead we rely on the fact that a forced umount or
5978          * applying a filesystem lock via ufs_fiolfs() will block in the
5979          * implicit call to ufs_flush() until we unlock the pages after the
5980          * return to segvn. Other ufs_quiesce() callers keep ufs_quiesce_pend
5981          * above 0 until they are done. We have to be careful not to increment
5982          * ul_vnops_cnt here after forceful unmount hlocks the file system.
5983          *
5984          * If pp is NULL use ul_lock to make sure we don't increment
5985          * ul_vnops_cnt after forceful unmount hlocks the file system.
5986          */
5987         if (vmpss || pp == NULL) {
5988                 ulp = &ufsvfsp->vfs_ulockfs;
5989                 if (pp == NULL)
5990                         mutex_enter(&ulp->ul_lock);
5991                 if (ulp->ul_fs_lock & ULOCKFS_GETREAD_MASK) {
5992                         if (pp == NULL) {
5993                                 mutex_exit(&ulp->ul_lock);
5994                         }
5995                         return (vmpss ? EIO : EINVAL);
5996                 }
5997                 atomic_inc_ulong(&ulp->ul_vnops_cnt);
5998                 if (pp == NULL)
5999                         mutex_exit(&ulp->ul_lock);
6000                 if (ufs_quiesce_pend) {
6001                         if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
6002                                 cv_broadcast(&ulp->ul_cv);
6003                         return (vmpss ? EIO : EINVAL);
6004                 }
6005         }
6006 
6007         if (dolock) {
6008                 /*
6009                  * segvn may call VOP_PAGEIO() instead of VOP_GETPAGE() to
6010                  * handle a fault against a segment that maps vnode pages with
6011                  * large mappings.  Segvn creates pages and holds them locked
6012                  * SE_EXCL during VOP_PAGEIO() call. In this case we have to
6013                  * use rw_tryenter() to avoid a potential deadlock since in
6014                  * lock order i_contents needs to be taken first.
6015                  * Segvn will retry via VOP_GETPAGE() if VOP_PAGEIO() fails.
6016                  */
6017                 if (!vmpss) {
6018                         rw_enter(&ip->i_contents, RW_READER);
6019                 } else if (!rw_tryenter(&ip->i_contents, RW_READER)) {
6020                         if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
6021                                 cv_broadcast(&ulp->ul_cv);
6022                         return (EDEADLK);
6023                 }
6024         }
6025 
6026         /*
6027          * Return an error to segvn because the pagefault request is beyond
6028          * PAGESIZE rounded EOF.
6029          */
6030         if (vmpss && btopr(io_off + io_len) > btopr(ip->i_size)) {
6031                 if (dolock)
6032                         rw_exit(&ip->i_contents);
6033                 if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
6034                         cv_broadcast(&ulp->ul_cv);
6035                 return (EFAULT);
6036         }
6037 
6038         if (pp == NULL) {
6039                 if (bmap_has_holes(ip)) {
6040                         err = ENOSYS;
6041                 } else {
6042                         err = EINVAL;
6043                 }
6044                 if (dolock)
6045                         rw_exit(&ip->i_contents);
6046                 if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
6047                         cv_broadcast(&ulp->ul_cv);
6048                 return (err);
6049         }
6050 
6051         /*
6052          * Break the io request into chunks, one for each contiguous
6053          * stretch of disk blocks in the target file.
6054          */
6055         while (done_len < io_len) {
6056                 ASSERT(cpp);
6057                 contig = 0;
6058                 if (err = bmap_read(ip, (u_offset_t)(io_off + done_len),
6059                     &bn, &contig))
6060                         break;
6061 
6062                 if (bn == UFS_HOLE) {   /* No holey swapfiles */
6063                         if (vmpss) {
6064                                 err = EFAULT;
6065                                 break;
6066                         }
6067                         err = ufs_fault(ITOV(ip), "ufs_pageio: bn == UFS_HOLE");
6068                         break;
6069                 }
6070 
6071                 cur_len = MIN(io_len - done_len, contig);
6072                 /*
6073                  * Zero out a page beyond EOF, when the last block of
6074                  * a file is a UFS fragment so that ufs_pageio() can be used
6075                  * instead of ufs_getpage() to handle faults against
6076                  * segvn segments that use large pages.
6077                  */
6078                 page_list_break(&cpp, &npp, btopr(cur_len));
6079                 if ((flags & B_READ) && (cur_len & PAGEOFFSET)) {
6080                         size_t xlen = cur_len & PAGEOFFSET;
6081                         pagezero(cpp->p_prev, xlen, PAGESIZE - xlen);
6082                 }
6083 
6084                 bp = pageio_setup(cpp, cur_len, ip->i_devvp, flags);
6085                 ASSERT(bp != NULL);
6086 
6087                 bp->b_edev = ip->i_dev;
6088                 bp->b_dev = cmpdev(ip->i_dev);
6089                 bp->b_blkno = bn;
6090                 bp->b_un.b_addr = (caddr_t)0;
6091                 bp->b_file = ip->i_vnode;
6092 
6093                 ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
6094                 ub.ub_pageios.value.ul++;
6095                 if (ufsvfsp->vfs_snapshot)
6096                         fssnap_strategy(&(ufsvfsp->vfs_snapshot), bp);
6097                 else
6098                         (void) bdev_strategy(bp);
6099 
6100                 if (flags & B_READ)
6101                         ufs_pageio_reads++;
6102                 else
6103                         ufs_pageio_writes++;
6104                 if (flags & B_READ)
6105                         lwp_stat_update(LWP_STAT_INBLK, 1);
6106                 else
6107                         lwp_stat_update(LWP_STAT_OUBLK, 1);
6108                 /*
6109                  * If the request is not B_ASYNC, wait for i/o to complete
6110                  * and re-assemble the page list to return to the caller.
6111                  * If it is B_ASYNC we leave the page list in pieces and
6112                  * cleanup() will dispose of them.
6113                  */
6114                 if ((flags & B_ASYNC) == 0) {
6115                         err = biowait(bp);
6116                         pageio_done(bp);
6117                         if (err)
6118                                 break;
6119                         page_list_concat(&opp, &cpp);
6120                 }
6121                 cpp = npp;
6122                 npp = NULL;
6123                 if (flags & B_READ)
6124                         cur_len = P2ROUNDUP_TYPED(cur_len, PAGESIZE, size_t);
6125                 done_len += cur_len;
6126         }
6127         ASSERT(err || (cpp == NULL && npp == NULL && done_len == io_len));
6128         if (err) {
6129                 if (flags & B_ASYNC) {
6130                         /* Cleanup unprocessed parts of list */
6131                         page_list_concat(&cpp, &npp);
6132                         if (flags & B_READ)
6133                                 pvn_read_done(cpp, B_ERROR);
6134                         else
6135                                 pvn_write_done(cpp, B_ERROR);
6136                 } else {
6137                         /* Re-assemble list and let caller clean up */
6138                         page_list_concat(&opp, &cpp);
6139                         page_list_concat(&opp, &npp);
6140                 }
6141         }
6142 
6143         if (vmpss && !(ip->i_flag & IACC) && !ULOCKFS_IS_NOIACC(ulp) &&
6144             ufsvfsp->vfs_fs->fs_ronly == 0 && !ufsvfsp->vfs_noatime) {
6145                 mutex_enter(&ip->i_tlock);
6146                 ip->i_flag |= IACC;
6147                 ITIMES_NOLOCK(ip);
6148                 mutex_exit(&ip->i_tlock);
6149         }
6150 
6151         if (dolock)
6152                 rw_exit(&ip->i_contents);
6153         if (vmpss && !atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
6154                 cv_broadcast(&ulp->ul_cv);
6155         return (err);
6156 }
6157 
6158 /*
6159  * Called when the kernel is in a frozen state to dump data
6160  * directly to the device. It uses a private dump data structure,
6161  * set up by dump_ctl, to locate the correct disk block to which to dump.
6162  */
6163 /*ARGSUSED*/
6164 static int
6165 ufs_dump(vnode_t *vp, caddr_t addr, offset_t ldbn, offset_t dblks,
6166     caller_context_t *ct)
6167 {
6168         u_offset_t      file_size;
6169         struct inode    *ip = VTOI(vp);
6170         struct fs       *fs = ip->i_fs;
6171         daddr_t         dbn, lfsbn;
6172         int             disk_blks = fs->fs_bsize >> DEV_BSHIFT;
6173         int             error = 0;
6174         int             ndbs, nfsbs;
6175 
6176         /*
6177          * forced unmount case
6178          */
6179         if (ip->i_ufsvfs == NULL)
6180                 return (EIO);
6181         /*
6182          * Validate the inode that it has not been modified since
6183          * the dump structure is allocated.
6184          */
6185         mutex_enter(&ip->i_tlock);
6186         if ((dump_info == NULL) ||
6187             (dump_info->ip != ip) ||
6188             (dump_info->time.tv_sec != ip->i_mtime.tv_sec) ||
6189             (dump_info->time.tv_usec != ip->i_mtime.tv_usec)) {
6190                 mutex_exit(&ip->i_tlock);
6191                 return (-1);
6192         }
6193         mutex_exit(&ip->i_tlock);
6194 
6195         /*
6196          * See that the file has room for this write
6197          */
6198         UFS_GET_ISIZE(&file_size, ip);
6199 
6200         if (ldbtob(ldbn + dblks) > file_size)
6201                 return (ENOSPC);
6202 
6203         /*
6204          * Find the physical disk block numbers from the dump
6205          * private data structure directly and write out the data
6206          * in contiguous block lumps
6207          */
6208         while (dblks > 0 && !error) {
6209                 lfsbn = (daddr_t)lblkno(fs, ldbtob(ldbn));
6210                 dbn = fsbtodb(fs, dump_info->dblk[lfsbn]) + ldbn % disk_blks;
6211                 nfsbs = 1;
6212                 ndbs = disk_blks - ldbn % disk_blks;
6213                 while (ndbs < dblks && fsbtodb(fs, dump_info->dblk[lfsbn +
6214                     nfsbs]) == dbn + ndbs) {
6215                         nfsbs++;
6216                         ndbs += disk_blks;
6217                 }
6218                 if (ndbs > dblks)
6219                         ndbs = dblks;
6220                 error = bdev_dump(ip->i_dev, addr, dbn, ndbs);
6221                 addr += ldbtob((offset_t)ndbs);
6222                 dblks -= ndbs;
6223                 ldbn += ndbs;
6224         }
6225         return (error);
6226 
6227 }
6228 
6229 /*
6230  * Prepare the file system before and after the dump operation.
6231  *
6232  * action = DUMP_ALLOC:
6233  * Preparation before dump, allocate dump private data structure
6234  * to hold all the direct and indirect block info for dump.
6235  *
6236  * action = DUMP_FREE:
6237  * Clean up after dump, deallocate the dump private data structure.
6238  *
6239  * action = DUMP_SCAN:
6240  * Scan dump_info for *blkp DEV_BSIZE blocks of contig fs space;
6241  * if found, the starting file-relative DEV_BSIZE lbn is written
6242  * to *bklp; that lbn is intended for use with VOP_DUMP()
6243  */
6244 /*ARGSUSED*/
6245 static int
6246 ufs_dumpctl(vnode_t *vp, int action, offset_t *blkp, caller_context_t *ct)
6247 {
6248         struct inode    *ip = VTOI(vp);
6249         ufsvfs_t        *ufsvfsp = ip->i_ufsvfs;
6250         struct fs       *fs;
6251         daddr32_t       *dblk, *storeblk;
6252         daddr32_t       *nextblk, *endblk;
6253         struct buf      *bp;
6254         int             i, entry, entries;
6255         int             n, ncontig;
6256 
6257         /*
6258          * check for forced unmount
6259          */
6260         if (ufsvfsp == NULL)
6261                 return (EIO);
6262 
6263         if (action == DUMP_ALLOC) {
6264                 /*
6265                  * alloc and record dump_info
6266                  */
6267                 if (dump_info != NULL)
6268                         return (EINVAL);
6269 
6270                 ASSERT(vp->v_type == VREG);
6271                 fs = ufsvfsp->vfs_fs;
6272 
6273                 rw_enter(&ip->i_contents, RW_READER);
6274 
6275                 if (bmap_has_holes(ip)) {
6276                         rw_exit(&ip->i_contents);
6277                         return (EFAULT);
6278                 }
6279 
6280                 /*
6281                  * calculate and allocate space needed according to i_size
6282                  */
6283                 entries = (int)lblkno(fs, blkroundup(fs, ip->i_size));
6284                 dump_info = kmem_alloc(sizeof (struct dump) +
6285                     (entries - 1) * sizeof (daddr32_t), KM_NOSLEEP);
6286                 if (dump_info == NULL) {
6287                         rw_exit(&ip->i_contents);
6288                         return (ENOMEM);
6289                 }
6290 
6291                 /* Start saving the info */
6292                 dump_info->fsbs = entries;
6293                 dump_info->ip = ip;
6294                 storeblk = &dump_info->dblk[0];
6295 
6296                 /* Direct Blocks */
6297                 for (entry = 0; entry < NDADDR && entry < entries; entry++)
6298                         *storeblk++ = ip->i_db[entry];
6299 
6300                 /* Indirect Blocks */
6301                 for (i = 0; i < NIADDR; i++) {
6302                         int error = 0;
6303 
6304                         bp = UFS_BREAD(ufsvfsp,
6305                             ip->i_dev, fsbtodb(fs, ip->i_ib[i]), fs->fs_bsize);
6306                         if (bp->b_flags & B_ERROR)
6307                                 error = EIO;
6308                         else {
6309                                 dblk = bp->b_un.b_daddr;
6310                                 if ((storeblk = save_dblks(ip, ufsvfsp,
6311                                     storeblk, dblk, i, entries)) == NULL)
6312                                         error = EIO;
6313                         }
6314 
6315                         brelse(bp);
6316 
6317                         if (error != 0) {
6318                                 kmem_free(dump_info, sizeof (struct dump) +
6319                                     (entries - 1) * sizeof (daddr32_t));
6320                                 rw_exit(&ip->i_contents);
6321                                 dump_info = NULL;
6322                                 return (error);
6323                         }
6324                 }
6325                 /* and time stamp the information */
6326                 mutex_enter(&ip->i_tlock);
6327                 dump_info->time = ip->i_mtime;
6328                 mutex_exit(&ip->i_tlock);
6329 
6330                 rw_exit(&ip->i_contents);
6331         } else if (action == DUMP_FREE) {
6332                 /*
6333                  * free dump_info
6334                  */
6335                 if (dump_info == NULL)
6336                         return (EINVAL);
6337                 entries = dump_info->fsbs - 1;
6338                 kmem_free(dump_info, sizeof (struct dump) +
6339                     entries * sizeof (daddr32_t));
6340                 dump_info = NULL;
6341         } else if (action == DUMP_SCAN) {
6342                 /*
6343                  * scan dump_info
6344                  */
6345                 if (dump_info == NULL)
6346                         return (EINVAL);
6347 
6348                 dblk = dump_info->dblk;
6349                 nextblk = dblk + 1;
6350                 endblk = dblk + dump_info->fsbs - 1;
6351                 fs = ufsvfsp->vfs_fs;
6352                 ncontig = *blkp >> (fs->fs_bshift - DEV_BSHIFT);
6353 
6354                 /*
6355                  * scan dblk[] entries; contig fs space is found when:
6356                  * ((current blkno + frags per block) == next blkno)
6357                  */
6358                 n = 0;
6359                 while (n < ncontig && dblk < endblk) {
6360                         if ((*dblk + fs->fs_frag) == *nextblk)
6361                                 n++;
6362                         else
6363                                 n = 0;
6364                         dblk++;
6365                         nextblk++;
6366                 }
6367 
6368                 /*
6369                  * index is where size bytes of contig space begins;
6370                  * conversion from index to the file's DEV_BSIZE lbn
6371                  * is equivalent to:  (index * fs_bsize) / DEV_BSIZE
6372                  */
6373                 if (n == ncontig) {
6374                         i = (dblk - dump_info->dblk) - ncontig;
6375                         *blkp = i << (fs->fs_bshift - DEV_BSHIFT);
6376                 } else
6377                         return (EFAULT);
6378         }
6379         return (0);
6380 }
6381 
6382 /*
6383  * Recursive helper function for ufs_dumpctl().  It follows the indirect file
6384  * system  blocks until it reaches the the disk block addresses, which are
6385  * then stored into the given buffer, storeblk.
6386  */
6387 static daddr32_t *
6388 save_dblks(struct inode *ip, struct ufsvfs *ufsvfsp,  daddr32_t *storeblk,
6389     daddr32_t *dblk, int level, int entries)
6390 {
6391         struct fs       *fs = ufsvfsp->vfs_fs;
6392         struct buf      *bp;
6393         int             i;
6394 
6395         if (level == 0) {
6396                 for (i = 0; i < NINDIR(fs); i++) {
6397                         if (storeblk - dump_info->dblk >= entries)
6398                                 break;
6399                         *storeblk++ = dblk[i];
6400                 }
6401                 return (storeblk);
6402         }
6403         for (i = 0; i < NINDIR(fs); i++) {
6404                 if (storeblk - dump_info->dblk >= entries)
6405                         break;
6406                 bp = UFS_BREAD(ufsvfsp,
6407                     ip->i_dev, fsbtodb(fs, dblk[i]), fs->fs_bsize);
6408                 if (bp->b_flags & B_ERROR) {
6409                         brelse(bp);
6410                         return (NULL);
6411                 }
6412                 storeblk = save_dblks(ip, ufsvfsp, storeblk, bp->b_un.b_daddr,
6413                     level - 1, entries);
6414                 brelse(bp);
6415 
6416                 if (storeblk == NULL)
6417                         return (NULL);
6418         }
6419         return (storeblk);
6420 }
6421 
6422 /* ARGSUSED */
6423 static int
6424 ufs_getsecattr(struct vnode *vp, vsecattr_t *vsap, int flag,
6425         struct cred *cr, caller_context_t *ct)
6426 {
6427         struct inode    *ip = VTOI(vp);
6428         struct ulockfs  *ulp;
6429         struct ufsvfs   *ufsvfsp = ip->i_ufsvfs;
6430         ulong_t         vsa_mask = vsap->vsa_mask;
6431         int             err = EINVAL;
6432 
6433         vsa_mask &= (VSA_ACL | VSA_ACLCNT | VSA_DFACL | VSA_DFACLCNT);
6434 
6435         /*
6436          * Only grab locks if needed - they're not needed to check vsa_mask
6437          * or if the mask contains no acl flags.
6438          */
6439         if (vsa_mask != 0) {
6440                 if (err = ufs_lockfs_begin(ufsvfsp, &ulp,
6441                     ULOCKFS_GETATTR_MASK))
6442                         return (err);
6443 
6444                 rw_enter(&ip->i_contents, RW_READER);
6445                 err = ufs_acl_get(ip, vsap, flag, cr);
6446                 rw_exit(&ip->i_contents);
6447 
6448                 if (ulp)
6449                         ufs_lockfs_end(ulp);
6450         }
6451         return (err);
6452 }
6453 
6454 /* ARGSUSED */
6455 static int
6456 ufs_setsecattr(struct vnode *vp, vsecattr_t *vsap, int flag, struct cred *cr,
6457         caller_context_t *ct)
6458 {
6459         struct inode    *ip = VTOI(vp);
6460         struct ulockfs  *ulp = NULL;
6461         struct ufsvfs   *ufsvfsp = VTOI(vp)->i_ufsvfs;
6462         ulong_t         vsa_mask = vsap->vsa_mask;
6463         int             err;
6464         int             haverwlock = 1;
6465         int             trans_size;
6466         int             donetrans = 0;
6467         int             retry = 1;
6468 
6469         ASSERT(RW_LOCK_HELD(&ip->i_rwlock));
6470 
6471         /* Abort now if the request is either empty or invalid. */
6472         vsa_mask &= (VSA_ACL | VSA_ACLCNT | VSA_DFACL | VSA_DFACLCNT);
6473         if ((vsa_mask == 0) ||
6474             ((vsap->vsa_aclentp == NULL) &&
6475             (vsap->vsa_dfaclentp == NULL))) {
6476                 err = EINVAL;
6477                 goto out;
6478         }
6479 
6480         /*
6481          * Following convention, if this is a directory then we acquire the
6482          * inode's i_rwlock after starting a UFS logging transaction;
6483          * otherwise, we acquire it beforehand. Since we were called (and
6484          * must therefore return) with the lock held, we will have to drop it,
6485          * and later reacquire it, if operating on a directory.
6486          */
6487         if (vp->v_type == VDIR) {
6488                 rw_exit(&ip->i_rwlock);
6489                 haverwlock = 0;
6490         } else {
6491                 /* Upgrade the lock if required. */
6492                 if (!rw_write_held(&ip->i_rwlock)) {
6493                         rw_exit(&ip->i_rwlock);
6494                         rw_enter(&ip->i_rwlock, RW_WRITER);
6495                 }
6496         }
6497 
6498 again:
6499         ASSERT(!(vp->v_type == VDIR && haverwlock));
6500         if (err = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_SETATTR_MASK)) {
6501                 ulp = NULL;
6502                 retry = 0;
6503                 goto out;
6504         }
6505 
6506         /*
6507          * Check that the file system supports this operation. Note that
6508          * ufs_lockfs_begin() will have checked that the file system had
6509          * not been forcibly unmounted.
6510          */
6511         if (ufsvfsp->vfs_fs->fs_ronly) {
6512                 err = EROFS;
6513                 goto out;
6514         }
6515         if (ufsvfsp->vfs_nosetsec) {
6516                 err = ENOSYS;
6517                 goto out;
6518         }
6519 
6520         if (ulp) {
6521                 TRANS_BEGIN_ASYNC(ufsvfsp, TOP_SETSECATTR,
6522                     trans_size = TOP_SETSECATTR_SIZE(VTOI(vp)));
6523                 donetrans = 1;
6524         }
6525 
6526         if (vp->v_type == VDIR) {
6527                 rw_enter(&ip->i_rwlock, RW_WRITER);
6528                 haverwlock = 1;
6529         }
6530 
6531         ASSERT(haverwlock);
6532 
6533         /* Do the actual work. */
6534         rw_enter(&ip->i_contents, RW_WRITER);
6535         /*
6536          * Suppress out of inodes messages if we will retry.
6537          */
6538         if (retry)
6539                 ip->i_flag |= IQUIET;
6540         err = ufs_acl_set(ip, vsap, flag, cr);
6541         ip->i_flag &= ~IQUIET;
6542         rw_exit(&ip->i_contents);
6543 
6544 out:
6545         if (ulp) {
6546                 if (donetrans) {
6547                         /*
6548                          * top_end_async() can eventually call
6549                          * top_end_sync(), which can block. We must
6550                          * therefore observe the lock-ordering protocol
6551                          * here as well.
6552                          */
6553                         if (vp->v_type == VDIR) {
6554                                 rw_exit(&ip->i_rwlock);
6555                                 haverwlock = 0;
6556                         }
6557                         TRANS_END_ASYNC(ufsvfsp, TOP_SETSECATTR, trans_size);
6558                 }
6559                 ufs_lockfs_end(ulp);
6560         }
6561         /*
6562          * If no inodes available, try scaring a logically-
6563          * free one out of the delete queue to someplace
6564          * that we can find it.
6565          */
6566         if ((err == ENOSPC) && retry && TRANS_ISTRANS(ufsvfsp)) {
6567                 ufs_delete_drain_wait(ufsvfsp, 1);
6568                 retry = 0;
6569                 if (vp->v_type == VDIR && haverwlock) {
6570                         rw_exit(&ip->i_rwlock);
6571                         haverwlock = 0;
6572                 }
6573                 goto again;
6574         }
6575         /*
6576          * If we need to reacquire the lock then it is safe to do so
6577          * as a reader. This is because ufs_rwunlock(), which will be
6578          * called by our caller after we return, does not differentiate
6579          * between shared and exclusive locks.
6580          */
6581         if (!haverwlock) {
6582                 ASSERT(vp->v_type == VDIR);
6583                 rw_enter(&ip->i_rwlock, RW_READER);
6584         }
6585 
6586         return (err);
6587 }
6588 
6589 /*
6590  * Locate the vnode to be used for an event notification. As this will
6591  * be called prior to the name space change perform basic verification
6592  * that the change will be allowed.
6593  */
6594 
6595 static int
6596 ufs_eventlookup(struct vnode *dvp, char *nm, struct cred *cr,
6597     struct vnode **vpp)
6598 {
6599         int     namlen;
6600         int     error;
6601         struct vnode    *vp;
6602         struct inode    *ip;
6603         struct inode    *xip;
6604         struct ufsvfs   *ufsvfsp;
6605         struct ulockfs  *ulp;
6606 
6607         ip = VTOI(dvp);
6608         *vpp = NULL;
6609 
6610         if ((namlen = strlen(nm)) == 0)
6611                 return (EINVAL);
6612 
6613         if (nm[0] == '.') {
6614                 if (namlen == 1)
6615                         return (EINVAL);
6616                 else if ((namlen == 2) && nm[1] == '.') {
6617                         return (EEXIST);
6618                 }
6619         }
6620 
6621         /*
6622          * Check accessibility and write access of parent directory as we
6623          * only want to post the event if we're able to make a change.
6624          */
6625         if (error = ufs_diraccess(ip, IEXEC|IWRITE, cr))
6626                 return (error);
6627 
6628         if (vp = dnlc_lookup(dvp, nm)) {
6629                 if (vp == DNLC_NO_VNODE) {
6630                         VN_RELE(vp);
6631                         return (ENOENT);
6632                 }
6633 
6634                 *vpp = vp;
6635                 return (0);
6636         }
6637 
6638         /*
6639          * Keep the idle queue from getting too long by idling two
6640          * inodes before attempting to allocate another.
6641          * This operation must be performed before entering lockfs
6642          * or a transaction.
6643          */
6644         if (ufs_idle_q.uq_ne > ufs_idle_q.uq_hiwat)
6645                 if ((curthread->t_flag & T_DONTBLOCK) == 0) {
6646                         ins.in_lidles.value.ul += ufs_lookup_idle_count;
6647                         ufs_idle_some(ufs_lookup_idle_count);
6648                 }
6649 
6650         ufsvfsp = ip->i_ufsvfs;
6651 
6652 retry_lookup:
6653         if (error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_LOOKUP_MASK))
6654                 return (error);
6655 
6656         if ((error = ufs_dirlook(ip, nm, &xip, cr, 1, 1)) == 0) {
6657                 vp = ITOV(xip);
6658                 *vpp = vp;
6659         }
6660 
6661         if (ulp) {
6662                 ufs_lockfs_end(ulp);
6663         }
6664 
6665         if (error == EAGAIN)
6666                 goto retry_lookup;
6667 
6668         return (error);
6669 }