1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  * Copyright 2016 Nexenta Systems, Inc.
  26  * Copyright (c) 2017 by Delphix. All rights reserved.
  27  */
  28 /*
  29  * Copyright 2013 Nexenta Systems, Inc.  All rights reserved.
  30  */
  31 
  32 /*      Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
  33 /*        All Rights Reserved   */
  34 
  35 /*
  36  * University Copyright- Copyright (c) 1982, 1986, 1988
  37  * The Regents of the University of California
  38  * All Rights Reserved
  39  *
  40  * University Acknowledgment- Portions of this document are derived from
  41  * software developed by the University of California, Berkeley, and its
  42  * contributors.
  43  */
  44 
  45 #include <sys/types.h>
  46 #include <sys/t_lock.h>
  47 #include <sys/param.h>
  48 #include <sys/systm.h>
  49 #include <sys/bitmap.h>
  50 #include <sys/sysmacros.h>
  51 #include <sys/kmem.h>
  52 #include <sys/signal.h>
  53 #include <sys/user.h>
  54 #include <sys/proc.h>
  55 #include <sys/disp.h>
  56 #include <sys/buf.h>
  57 #include <sys/pathname.h>
  58 #include <sys/vfs.h>
  59 #include <sys/vfs_opreg.h>
  60 #include <sys/vnode.h>
  61 #include <sys/file.h>
  62 #include <sys/atomic.h>
  63 #include <sys/uio.h>
  64 #include <sys/dkio.h>
  65 #include <sys/cred.h>
  66 #include <sys/conf.h>
  67 #include <sys/dnlc.h>
  68 #include <sys/kstat.h>
  69 #include <sys/acl.h>
  70 #include <sys/fs/ufs_fsdir.h>
  71 #include <sys/fs/ufs_fs.h>
  72 #include <sys/fs/ufs_inode.h>
  73 #include <sys/fs/ufs_mount.h>
  74 #include <sys/fs/ufs_acl.h>
  75 #include <sys/fs/ufs_panic.h>
  76 #include <sys/fs/ufs_bio.h>
  77 #include <sys/fs/ufs_quota.h>
  78 #include <sys/fs/ufs_log.h>
  79 #undef NFS
  80 #include <sys/statvfs.h>
  81 #include <sys/mount.h>
  82 #include <sys/mntent.h>
  83 #include <sys/swap.h>
  84 #include <sys/errno.h>
  85 #include <sys/debug.h>
  86 #include "fs/fs_subr.h"
  87 #include <sys/cmn_err.h>
  88 #include <sys/dnlc.h>
  89 #include <sys/fssnap_if.h>
  90 #include <sys/sunddi.h>
  91 #include <sys/bootconf.h>
  92 #include <sys/policy.h>
  93 #include <sys/zone.h>
  94 
  95 /*
  96  * This is the loadable module wrapper.
  97  */
  98 #include <sys/modctl.h>
  99 
 100 int                     ufsfstype;
 101 vfsops_t                *ufs_vfsops;
 102 static int              ufsinit(int, char *);
 103 static int              mountfs();
 104 extern int              highbit();
 105 extern struct instats   ins;
 106 extern struct vnode *common_specvp(struct vnode *vp);
 107 extern vfs_t            EIO_vfs;
 108 
 109 struct  dquot *dquot, *dquotNDQUOT;
 110 
 111 /*
 112  * Cylinder group summary information handling tunable.
 113  * This defines when these deltas get logged.
 114  * If the number of cylinders in the file system is over the
 115  * tunable then we log csum updates. Otherwise the updates are only
 116  * done for performance on unmount. After a panic they can be
 117  * quickly constructed during mounting. See ufs_construct_si()
 118  * called from ufs_getsummaryinfo().
 119  *
 120  * This performance feature can of course be disabled by setting
 121  * ufs_ncg_log to 0, and fully enabled by setting it to 0xffffffff.
 122  */
 123 #define UFS_LOG_NCG_DEFAULT 10000
 124 uint32_t ufs_ncg_log = UFS_LOG_NCG_DEFAULT;
 125 
 126 /*
 127  * ufs_clean_root indicates whether the root fs went down cleanly
 128  */
 129 static int ufs_clean_root = 0;
 130 
 131 /*
 132  * UFS Mount options table
 133  */
 134 static char *intr_cancel[] = { MNTOPT_NOINTR, NULL };
 135 static char *nointr_cancel[] = { MNTOPT_INTR, NULL };
 136 static char *forcedirectio_cancel[] = { MNTOPT_NOFORCEDIRECTIO, NULL };
 137 static char *noforcedirectio_cancel[] = { MNTOPT_FORCEDIRECTIO, NULL };
 138 static char *largefiles_cancel[] = { MNTOPT_NOLARGEFILES, NULL };
 139 static char *nolargefiles_cancel[] = { MNTOPT_LARGEFILES, NULL };
 140 static char *logging_cancel[] = { MNTOPT_NOLOGGING, NULL };
 141 static char *nologging_cancel[] = { MNTOPT_LOGGING, NULL };
 142 static char *xattr_cancel[] = { MNTOPT_NOXATTR, NULL };
 143 static char *noxattr_cancel[] = { MNTOPT_XATTR, NULL };
 144 static char *quota_cancel[] = { MNTOPT_NOQUOTA, NULL };
 145 static char *noquota_cancel[] = { MNTOPT_QUOTA, NULL };
 146 static char *dfratime_cancel[] = { MNTOPT_NODFRATIME, NULL };
 147 static char *nodfratime_cancel[] = { MNTOPT_DFRATIME, NULL };
 148 
 149 static mntopt_t mntopts[] = {
 150 /*
 151  *      option name             cancel option   default arg     flags
 152  *              ufs arg flag
 153  */
 154         { MNTOPT_INTR,          intr_cancel,    NULL,           MO_DEFAULT,
 155                 (void *)0 },
 156         { MNTOPT_NOINTR,        nointr_cancel,  NULL,           0,
 157                 (void *)UFSMNT_NOINTR },
 158         { MNTOPT_SYNCDIR,       NULL,           NULL,           0,
 159                 (void *)UFSMNT_SYNCDIR },
 160         { MNTOPT_FORCEDIRECTIO, forcedirectio_cancel, NULL,     0,
 161                 (void *)UFSMNT_FORCEDIRECTIO },
 162         { MNTOPT_NOFORCEDIRECTIO, noforcedirectio_cancel, NULL, 0,
 163                 (void *)UFSMNT_NOFORCEDIRECTIO },
 164         { MNTOPT_NOSETSEC,      NULL,           NULL,           0,
 165                 (void *)UFSMNT_NOSETSEC },
 166         { MNTOPT_LARGEFILES,    largefiles_cancel, NULL,        MO_DEFAULT,
 167                 (void *)UFSMNT_LARGEFILES },
 168         { MNTOPT_NOLARGEFILES,  nolargefiles_cancel, NULL,      0,
 169                 (void *)0 },
 170         { MNTOPT_LOGGING,       logging_cancel, NULL,           MO_TAG,
 171                 (void *)UFSMNT_LOGGING },
 172         { MNTOPT_NOLOGGING,     nologging_cancel, NULL,
 173                 MO_NODISPLAY|MO_DEFAULT|MO_TAG, (void *)0 },
 174         { MNTOPT_QUOTA,         quota_cancel, NULL,             MO_IGNORE,
 175                 (void *)0 },
 176         { MNTOPT_NOQUOTA,       noquota_cancel, NULL,
 177                 MO_NODISPLAY|MO_DEFAULT, (void *)0 },
 178         { MNTOPT_GLOBAL,        NULL,           NULL,           0,
 179                 (void *)0 },
 180         { MNTOPT_XATTR, xattr_cancel,           NULL,           MO_DEFAULT,
 181                 (void *)0 },
 182         { MNTOPT_NOXATTR,       noxattr_cancel,         NULL,           0,
 183                 (void *)0 },
 184         { MNTOPT_NOATIME,       NULL,           NULL,           0,
 185                 (void *)UFSMNT_NOATIME },
 186         { MNTOPT_DFRATIME,      dfratime_cancel, NULL,          0,
 187                 (void *)0 },
 188         { MNTOPT_NODFRATIME,    nodfratime_cancel, NULL,
 189                 MO_NODISPLAY|MO_DEFAULT, (void *)UFSMNT_NODFRATIME },
 190         { MNTOPT_ONERROR,       NULL,           UFSMNT_ONERROR_PANIC_STR,
 191                 MO_DEFAULT|MO_HASVALUE, (void *)0 },
 192 };
 193 
 194 static mntopts_t ufs_mntopts = {
 195         sizeof (mntopts) / sizeof (mntopt_t),
 196         mntopts
 197 };
 198 
 199 static vfsdef_t vfw = {
 200         VFSDEF_VERSION,
 201         "ufs",
 202         ufsinit,
 203         VSW_HASPROTO|VSW_CANREMOUNT|VSW_STATS|VSW_CANLOFI|VSW_MOUNTDEV,
 204         &ufs_mntopts
 205 };
 206 
 207 /*
 208  * Module linkage information for the kernel.
 209  */
 210 extern struct mod_ops mod_fsops;
 211 
 212 static struct modlfs modlfs = {
 213         &mod_fsops, "filesystem for ufs", &vfw
 214 };
 215 
 216 static struct modlinkage modlinkage = {
 217         MODREV_1, (void *)&modlfs, NULL
 218 };
 219 
 220 /*
 221  * An attempt has been made to make this module unloadable.  In order to
 222  * test it, we need a system in which the root fs is NOT ufs.  THIS HAS NOT
 223  * BEEN DONE
 224  */
 225 
 226 extern kstat_t *ufs_inode_kstat;
 227 extern uint_t ufs_lockfs_key;
 228 extern void ufs_lockfs_tsd_destructor(void *);
 229 extern uint_t bypass_snapshot_throttle_key;
 230 
 231 int
 232 _init(void)
 233 {
 234         /*
 235          * Create an index into the per thread array so that any thread doing
 236          * VOP will have a lockfs mark on it.
 237          */
 238         tsd_create(&ufs_lockfs_key, ufs_lockfs_tsd_destructor);
 239         tsd_create(&bypass_snapshot_throttle_key, NULL);
 240         return (mod_install(&modlinkage));
 241 }
 242 
 243 int
 244 _fini(void)
 245 {
 246         return (EBUSY);
 247 }
 248 
 249 int
 250 _info(struct modinfo *modinfop)
 251 {
 252         return (mod_info(&modlinkage, modinfop));
 253 }
 254 
 255 extern struct vnode *makespecvp(dev_t dev, vtype_t type);
 256 
 257 extern kmutex_t ufs_scan_lock;
 258 
 259 static int mountfs(struct vfs *, enum whymountroot, struct vnode *, char *,
 260                 struct cred *, int, void *, int);
 261 
 262 
 263 static int
 264 ufs_mount(struct vfs *vfsp, struct vnode *mvp, struct mounta *uap,
 265     struct cred *cr)
 266 {
 267         char *data = uap->dataptr;
 268         int datalen = uap->datalen;
 269         dev_t dev;
 270         struct vnode *lvp = NULL;
 271         struct vnode *svp = NULL;
 272         struct pathname dpn;
 273         int error;
 274         enum whymountroot why = ROOT_INIT;
 275         struct ufs_args args;
 276         int oflag, aflag;
 277         int fromspace = (uap->flags & MS_SYSSPACE) ?
 278             UIO_SYSSPACE : UIO_USERSPACE;
 279 
 280         if ((error = secpolicy_fs_mount(cr, mvp, vfsp)) != 0)
 281                 return (error);
 282 
 283         if (mvp->v_type != VDIR)
 284                 return (ENOTDIR);
 285 
 286         mutex_enter(&mvp->v_lock);
 287         if ((uap->flags & MS_REMOUNT) == 0 &&
 288             (uap->flags & MS_OVERLAY) == 0 &&
 289             (mvp->v_count != 1 || (mvp->v_flag & VROOT))) {
 290                 mutex_exit(&mvp->v_lock);
 291                 return (EBUSY);
 292         }
 293         mutex_exit(&mvp->v_lock);
 294 
 295         /*
 296          * Get arguments
 297          */
 298         bzero(&args, sizeof (args));
 299         if ((uap->flags & MS_DATA) && data != NULL && datalen != 0) {
 300                 int copy_result = 0;
 301 
 302                 if (datalen > sizeof (args))
 303                         return (EINVAL);
 304                 if (uap->flags & MS_SYSSPACE)
 305                         bcopy(data, &args, datalen);
 306                 else
 307                         copy_result = copyin(data, &args, datalen);
 308                 if (copy_result)
 309                         return (EFAULT);
 310                 datalen = sizeof (struct ufs_args);
 311         } else {
 312                 datalen = 0;
 313         }
 314 
 315         if ((vfsp->vfs_flag & VFS_RDONLY) != 0 ||
 316             (uap->flags & MS_RDONLY) != 0) {
 317                 oflag = FREAD;
 318                 aflag = VREAD;
 319         } else {
 320                 oflag = FREAD | FWRITE;
 321                 aflag = VREAD | VWRITE;
 322         }
 323 
 324         /*
 325          * Read in the mount point pathname
 326          * (so we can record the directory the file system was last mounted on).
 327          */
 328         if (error = pn_get(uap->dir, fromspace, &dpn))
 329                 return (error);
 330 
 331         /*
 332          * Resolve path name of special file being mounted.
 333          */
 334         if (error = lookupname(uap->spec, fromspace, FOLLOW, NULL, &svp)) {
 335                 pn_free(&dpn);
 336                 return (error);
 337         }
 338 
 339         error = vfs_get_lofi(vfsp, &lvp);
 340 
 341         if (error > 0) {
 342                 VN_RELE(svp);
 343                 pn_free(&dpn);
 344                 return (error);
 345         } else if (error == 0) {
 346                 dev = lvp->v_rdev;
 347 
 348                 if (getmajor(dev) >= devcnt) {
 349                         error = ENXIO;
 350                         goto out;
 351                 }
 352         } else {
 353                 dev = svp->v_rdev;
 354 
 355                 if (svp->v_type != VBLK) {
 356                         VN_RELE(svp);
 357                         pn_free(&dpn);
 358                         return (ENOTBLK);
 359                 }
 360 
 361                 if (getmajor(dev) >= devcnt) {
 362                         error = ENXIO;
 363                         goto out;
 364                 }
 365 
 366                 /*
 367                  * In SunCluster, requests to a global device are
 368                  * satisfied by a local device. We substitute the global
 369                  * pxfs node with a local spec node here.
 370                  */
 371                 if (IS_PXFSVP(svp)) {
 372                         ASSERT(lvp == NULL);
 373                         VN_RELE(svp);
 374                         svp = makespecvp(dev, VBLK);
 375                 }
 376 
 377                 if ((error = secpolicy_spec_open(cr, svp, oflag)) != 0) {
 378                         VN_RELE(svp);
 379                         pn_free(&dpn);
 380                         return (error);
 381                 }
 382         }
 383 
 384         if (uap->flags & MS_REMOUNT)
 385                 why = ROOT_REMOUNT;
 386 
 387         /*
 388          * Open device/file mounted on.  We need this to check whether
 389          * the caller has sufficient rights to access the resource in
 390          * question.  When bio is fixed for vnodes this can all be vnode
 391          * operations.
 392          */
 393         if ((error = VOP_ACCESS(svp, aflag, 0, cr, NULL)) != 0)
 394                 goto out;
 395 
 396         /*
 397          * Ensure that this device isn't already mounted or in progress on a
 398          * mount unless this is a REMOUNT request or we are told to suppress
 399          * mount checks. Global mounts require special handling.
 400          */
 401         if ((uap->flags & MS_NOCHECK) == 0) {
 402                 if ((uap->flags & MS_GLOBAL) == 0 &&
 403                     vfs_devmounting(dev, vfsp)) {
 404                         error = EBUSY;
 405                         goto out;
 406                 }
 407                 if (vfs_devismounted(dev)) {
 408                         if ((uap->flags & MS_REMOUNT) == 0) {
 409                                 error = EBUSY;
 410                                 goto out;
 411                         }
 412                 }
 413         }
 414 
 415         /*
 416          * If the device is a tape, mount it read only
 417          */
 418         if (devopsp[getmajor(dev)]->devo_cb_ops->cb_flag & D_TAPE) {
 419                 vfsp->vfs_flag |= VFS_RDONLY;
 420                 vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0);
 421         }
 422         if (uap->flags & MS_RDONLY)
 423                 vfsp->vfs_flag |= VFS_RDONLY;
 424 
 425         /*
 426          * Mount the filesystem, free the device vnode on error.
 427          */
 428         error = mountfs(vfsp, why, lvp != NULL ? lvp : svp,
 429             dpn.pn_path, cr, 0, &args, datalen);
 430 
 431         if (error == 0) {
 432                 vfs_set_feature(vfsp, VFSFT_SYSATTR_VIEWS);
 433 
 434                 /*
 435                  * If lofi, drop our reference to the original file.
 436                  */
 437                 if (lvp != NULL)
 438                         VN_RELE(svp);
 439         }
 440 
 441 out:
 442         pn_free(&dpn);
 443 
 444         if (error) {
 445                 if (lvp != NULL)
 446                         VN_RELE(lvp);
 447                 if (svp != NULL)
 448                         VN_RELE(svp);
 449         }
 450         return (error);
 451 }
 452 
 453 /*
 454  * Mount root file system.
 455  * "why" is ROOT_INIT on initial call ROOT_REMOUNT if called to
 456  * remount the root file system, and ROOT_UNMOUNT if called to
 457  * unmount the root (e.g., as part of a system shutdown).
 458  *
 459  * XXX - this may be partially machine-dependent; it, along with the VFS_SWAPVP
 460  * operation, goes along with auto-configuration.  A mechanism should be
 461  * provided by which machine-INdependent code in the kernel can say "get me the
 462  * right root file system" and "get me the right initial swap area", and have
 463  * that done in what may well be a machine-dependent fashion.
 464  * Unfortunately, it is also file-system-type dependent (NFS gets it via
 465  * bootparams calls, UFS gets it from various and sundry machine-dependent
 466  * mechanisms, as SPECFS does for swap).
 467  */
 468 static int
 469 ufs_mountroot(struct vfs *vfsp, enum whymountroot why)
 470 {
 471         struct fs *fsp;
 472         int error;
 473         static int ufsrootdone = 0;
 474         dev_t rootdev;
 475         struct vnode *vp;
 476         struct vnode *devvp = 0;
 477         int ovflags;
 478         int doclkset;
 479         ufsvfs_t *ufsvfsp;
 480 
 481         if (why == ROOT_INIT) {
 482                 if (ufsrootdone++)
 483                         return (EBUSY);
 484                 rootdev = getrootdev();
 485                 if (rootdev == (dev_t)NODEV)
 486                         return (ENODEV);
 487                 vfsp->vfs_dev = rootdev;
 488                 vfsp->vfs_flag |= VFS_RDONLY;
 489         } else if (why == ROOT_REMOUNT) {
 490                 vp = ((struct ufsvfs *)vfsp->vfs_data)->vfs_devvp;
 491                 (void) dnlc_purge_vfsp(vfsp, 0);
 492                 vp = common_specvp(vp);
 493                 (void) VOP_PUTPAGE(vp, (offset_t)0, (size_t)0, B_INVAL,
 494                     CRED(), NULL);
 495                 (void) bfinval(vfsp->vfs_dev, 0);
 496                 fsp = getfs(vfsp);
 497 
 498                 ovflags = vfsp->vfs_flag;
 499                 vfsp->vfs_flag &= ~VFS_RDONLY;
 500                 vfsp->vfs_flag |= VFS_REMOUNT;
 501                 rootdev = vfsp->vfs_dev;
 502         } else if (why == ROOT_UNMOUNT) {
 503                 if (vfs_lock(vfsp) == 0) {
 504                         (void) ufs_flush(vfsp);
 505                         /*
 506                          * Mark the log as fully rolled
 507                          */
 508                         ufsvfsp = (ufsvfs_t *)vfsp->vfs_data;
 509                         fsp = ufsvfsp->vfs_fs;
 510                         if (TRANS_ISTRANS(ufsvfsp) &&
 511                             !TRANS_ISERROR(ufsvfsp) &&
 512                             (fsp->fs_rolled == FS_NEED_ROLL)) {
 513                                 ml_unit_t *ul = ufsvfsp->vfs_log;
 514 
 515                                 error = ufs_putsummaryinfo(ul->un_dev,
 516                                     ufsvfsp, fsp);
 517                                 if (error == 0) {
 518                                         fsp->fs_rolled = FS_ALL_ROLLED;
 519                                         UFS_BWRITE2(NULL, ufsvfsp->vfs_bufp);
 520                                 }
 521                         }
 522                         vfs_unlock(vfsp);
 523                 } else {
 524                         ufs_update(0);
 525                 }
 526 
 527                 vp = ((struct ufsvfs *)vfsp->vfs_data)->vfs_devvp;
 528                 (void) VOP_CLOSE(vp, FREAD|FWRITE, 1,
 529                     (offset_t)0, CRED(), NULL);
 530                 return (0);
 531         }
 532         error = vfs_lock(vfsp);
 533         if (error)
 534                 return (error);
 535 
 536         devvp = makespecvp(rootdev, VBLK);
 537 
 538         /* If RO media, don't call clkset() (see below) */
 539         doclkset = 1;
 540         if (why == ROOT_INIT) {
 541                 error = VOP_OPEN(&devvp, FREAD|FWRITE, CRED(), NULL);
 542                 if (error == 0) {
 543                         (void) VOP_CLOSE(devvp, FREAD|FWRITE, 1,
 544                             (offset_t)0, CRED(), NULL);
 545                 } else {
 546                         doclkset = 0;
 547                 }
 548         }
 549 
 550         error = mountfs(vfsp, why, devvp, "/", CRED(), 1, NULL, 0);
 551         /*
 552          * XXX - assumes root device is not indirect, because we don't set
 553          * rootvp.  Is rootvp used for anything?  If so, make another arg
 554          * to mountfs.
 555          */
 556         if (error) {
 557                 vfs_unlock(vfsp);
 558                 if (why == ROOT_REMOUNT)
 559                         vfsp->vfs_flag = ovflags;
 560                 if (rootvp) {
 561                         VN_RELE(rootvp);
 562                         rootvp = (struct vnode *)0;
 563                 }
 564                 VN_RELE(devvp);
 565                 return (error);
 566         }
 567         if (why == ROOT_INIT)
 568                 vfs_add((struct vnode *)0, vfsp,
 569                     (vfsp->vfs_flag & VFS_RDONLY) ? MS_RDONLY : 0);
 570         vfs_unlock(vfsp);
 571         fsp = getfs(vfsp);
 572         clkset(doclkset ? fsp->fs_time : -1);
 573         ufsvfsp = (ufsvfs_t *)vfsp->vfs_data;
 574         if (ufsvfsp->vfs_log) {
 575                 vfs_setmntopt(vfsp, MNTOPT_LOGGING, NULL, 0);
 576         }
 577         return (0);
 578 }
 579 
 580 static int
 581 remountfs(struct vfs *vfsp, dev_t dev, void *raw_argsp, int args_len)
 582 {
 583         struct ufsvfs *ufsvfsp = (struct ufsvfs *)vfsp->vfs_data;
 584         struct ulockfs *ulp = &ufsvfsp->vfs_ulockfs;
 585         struct buf *bp = ufsvfsp->vfs_bufp;
 586         struct fs *fsp = (struct fs *)bp->b_un.b_addr;
 587         struct fs *fspt;
 588         struct buf *tpt = 0;
 589         int error = 0;
 590         int flags = 0;
 591 
 592         if (args_len == sizeof (struct ufs_args) && raw_argsp)
 593                 flags = ((struct ufs_args *)raw_argsp)->flags;
 594 
 595         /* cannot remount to RDONLY */
 596         if (vfsp->vfs_flag & VFS_RDONLY)
 597                 return (ENOTSUP);
 598 
 599         /* whoops, wrong dev */
 600         if (vfsp->vfs_dev != dev)
 601                 return (EINVAL);
 602 
 603         /*
 604          * synchronize w/ufs ioctls
 605          */
 606         mutex_enter(&ulp->ul_lock);
 607         atomic_inc_ulong(&ufs_quiesce_pend);
 608 
 609         /*
 610          * reset options
 611          */
 612         ufsvfsp->vfs_nointr  = flags & UFSMNT_NOINTR;
 613         ufsvfsp->vfs_syncdir = flags & UFSMNT_SYNCDIR;
 614         ufsvfsp->vfs_nosetsec = flags & UFSMNT_NOSETSEC;
 615         ufsvfsp->vfs_noatime = flags & UFSMNT_NOATIME;
 616         if ((flags & UFSMNT_NODFRATIME) || ufsvfsp->vfs_noatime)
 617                 ufsvfsp->vfs_dfritime &= ~UFS_DFRATIME;
 618         else    /* dfratime, default behavior */
 619                 ufsvfsp->vfs_dfritime |= UFS_DFRATIME;
 620         if (flags & UFSMNT_FORCEDIRECTIO)
 621                 ufsvfsp->vfs_forcedirectio = 1;
 622         else    /* default is no direct I/O */
 623                 ufsvfsp->vfs_forcedirectio = 0;
 624         ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
 625 
 626         /*
 627          * set largefiles flag in ufsvfs equal to the
 628          * value passed in by the mount command. If
 629          * it is "nolargefiles", and the flag is set
 630          * in the superblock, the mount fails.
 631          */
 632         if (!(flags & UFSMNT_LARGEFILES)) {  /* "nolargefiles" */
 633                 if (fsp->fs_flags & FSLARGEFILES) {
 634                         error = EFBIG;
 635                         goto remounterr;
 636                 }
 637                 ufsvfsp->vfs_lfflags &= ~UFS_LARGEFILES;
 638         } else  /* "largefiles" */
 639                 ufsvfsp->vfs_lfflags |= UFS_LARGEFILES;
 640         /*
 641          * read/write to read/write; all done
 642          */
 643         if (fsp->fs_ronly == 0)
 644                 goto remounterr;
 645 
 646         /*
 647          * fix-on-panic assumes RO->RW remount implies system-critical fs
 648          * if it is shortly after boot; so, don't attempt to lock and fix
 649          * (unless the user explicitly asked for another action on error)
 650          * XXX UFSMNT_ONERROR_RDONLY rather than UFSMNT_ONERROR_PANIC
 651          */
 652 #define BOOT_TIME_LIMIT (180*hz)
 653         if (!(flags & UFSMNT_ONERROR_FLGMASK) &&
 654             ddi_get_lbolt() < BOOT_TIME_LIMIT) {
 655                 cmn_err(CE_WARN, "%s is required to be mounted onerror=%s",
 656                     ufsvfsp->vfs_fs->fs_fsmnt, UFSMNT_ONERROR_PANIC_STR);
 657                 flags |= UFSMNT_ONERROR_PANIC;
 658         }
 659 
 660         if ((error = ufsfx_mount(ufsvfsp, flags)) != 0)
 661                 goto remounterr;
 662 
 663         /*
 664          * quiesce the file system
 665          */
 666         error = ufs_quiesce(ulp);
 667         if (error)
 668                 goto remounterr;
 669 
 670         tpt = UFS_BREAD(ufsvfsp, ufsvfsp->vfs_dev, SBLOCK, SBSIZE);
 671         if (tpt->b_flags & B_ERROR) {
 672                 error = EIO;
 673                 goto remounterr;
 674         }
 675         fspt = (struct fs *)tpt->b_un.b_addr;
 676         if (((fspt->fs_magic != FS_MAGIC) &&
 677             (fspt->fs_magic != MTB_UFS_MAGIC)) ||
 678             (fspt->fs_magic == FS_MAGIC &&
 679             (fspt->fs_version != UFS_EFISTYLE4NONEFI_VERSION_2 &&
 680             fspt->fs_version != UFS_VERSION_MIN)) ||
 681             (fspt->fs_magic == MTB_UFS_MAGIC &&
 682             (fspt->fs_version > MTB_UFS_VERSION_1 ||
 683             fspt->fs_version < MTB_UFS_VERSION_MIN)) ||
 684             fspt->fs_bsize > MAXBSIZE || fspt->fs_frag > MAXFRAG ||
 685             fspt->fs_bsize < sizeof (struct fs) || fspt->fs_bsize < PAGESIZE) {
 686                 tpt->b_flags |= B_STALE | B_AGE;
 687                 error = EINVAL;
 688                 goto remounterr;
 689         }
 690 
 691         if (ufsvfsp->vfs_log && (ufsvfsp->vfs_log->un_flags & LDL_NOROLL)) {
 692                 ufsvfsp->vfs_log->un_flags &= ~LDL_NOROLL;
 693                 logmap_start_roll(ufsvfsp->vfs_log);
 694         }
 695 
 696         if (TRANS_ISERROR(ufsvfsp))
 697                 goto remounterr;
 698         TRANS_DOMATAMAP(ufsvfsp);
 699 
 700         if ((fspt->fs_state + fspt->fs_time == FSOKAY) &&
 701             fspt->fs_clean == FSLOG && !TRANS_ISTRANS(ufsvfsp)) {
 702                 ufsvfsp->vfs_log = NULL;
 703                 ufsvfsp->vfs_domatamap = 0;
 704                 error = ENOSPC;
 705                 goto remounterr;
 706         }
 707 
 708         if (fspt->fs_state + fspt->fs_time == FSOKAY &&
 709             (fspt->fs_clean == FSCLEAN ||
 710             fspt->fs_clean == FSSTABLE ||
 711             fspt->fs_clean == FSLOG)) {
 712 
 713                 /*
 714                  * Ensure that ufs_getsummaryinfo doesn't reconstruct
 715                  * the summary info.
 716                  */
 717                 error = ufs_getsummaryinfo(vfsp->vfs_dev, ufsvfsp, fspt);
 718                 if (error)
 719                         goto remounterr;
 720 
 721                 /* preserve mount name */
 722                 (void) strncpy(fspt->fs_fsmnt, fsp->fs_fsmnt, MAXMNTLEN);
 723                 /* free the old cg space */
 724                 kmem_free(fsp->fs_u.fs_csp, fsp->fs_cssize);
 725                 /* switch in the new superblock */
 726                 fspt->fs_rolled = FS_NEED_ROLL;
 727                 bcopy(tpt->b_un.b_addr, bp->b_un.b_addr, fspt->fs_sbsize);
 728 
 729                 fsp->fs_clean = FSSTABLE;
 730         } /* superblock updated in memory */
 731         tpt->b_flags |= B_STALE | B_AGE;
 732         brelse(tpt);
 733         tpt = 0;
 734 
 735         if (fsp->fs_clean != FSSTABLE) {
 736                 error = ENOSPC;
 737                 goto remounterr;
 738         }
 739 
 740 
 741         if (TRANS_ISTRANS(ufsvfsp)) {
 742                 fsp->fs_clean = FSLOG;
 743                 ufsvfsp->vfs_dio = 0;
 744         } else
 745                 if (ufsvfsp->vfs_dio)
 746                         fsp->fs_clean = FSSUSPEND;
 747 
 748         TRANS_MATA_MOUNT(ufsvfsp);
 749 
 750         fsp->fs_fmod = 0;
 751         fsp->fs_ronly = 0;
 752 
 753         atomic_dec_ulong(&ufs_quiesce_pend);
 754         cv_broadcast(&ulp->ul_cv);
 755         mutex_exit(&ulp->ul_lock);
 756 
 757         if (TRANS_ISTRANS(ufsvfsp)) {
 758 
 759                 /*
 760                  * start the delete thread
 761                  */
 762                 ufs_thread_start(&ufsvfsp->vfs_delete, ufs_thread_delete, vfsp);
 763 
 764                 /*
 765                  * start the reclaim thread
 766                  */
 767                 if (fsp->fs_reclaim & (FS_RECLAIM|FS_RECLAIMING)) {
 768                         fsp->fs_reclaim &= ~FS_RECLAIM;
 769                         fsp->fs_reclaim |=  FS_RECLAIMING;
 770                         ufs_thread_start(&ufsvfsp->vfs_reclaim,
 771                             ufs_thread_reclaim, vfsp);
 772                 }
 773         }
 774 
 775         TRANS_SBWRITE(ufsvfsp, TOP_MOUNT);
 776 
 777         return (0);
 778 
 779 remounterr:
 780         if (tpt)
 781                 brelse(tpt);
 782         atomic_dec_ulong(&ufs_quiesce_pend);
 783         cv_broadcast(&ulp->ul_cv);
 784         mutex_exit(&ulp->ul_lock);
 785         return (error);
 786 }
 787 
 788 /*
 789  * If the device maxtransfer size is not available, we use ufs_maxmaxphys
 790  * along with the system value for maxphys to determine the value for
 791  * maxtransfer.
 792  */
 793 int ufs_maxmaxphys = (1024 * 1024);
 794 
 795 #include <sys/ddi.h>              /* for delay(9f) */
 796 
 797 int ufs_mount_error_delay = 20; /* default to 20ms */
 798 int ufs_mount_timeout = 60000;  /* default to 1 minute */
 799 
 800 static int
 801 mountfs(struct vfs *vfsp, enum whymountroot why, struct vnode *devvp,
 802     char *path, cred_t *cr, int isroot, void *raw_argsp, int args_len)
 803 {
 804         dev_t dev = devvp->v_rdev;
 805         struct fs *fsp;
 806         struct ufsvfs *ufsvfsp = 0;
 807         struct buf *bp = 0;
 808         struct buf *tp = 0;
 809         struct dk_cinfo ci;
 810         int error = 0;
 811         size_t len;
 812         int needclose = 0;
 813         int needtrans = 0;
 814         struct inode *rip;
 815         struct vnode *rvp = NULL;
 816         int flags = 0;
 817         kmutex_t *ihm;
 818         int elapsed;
 819         int status;
 820 
 821         if (args_len == sizeof (struct ufs_args) && raw_argsp)
 822                 flags = ((struct ufs_args *)raw_argsp)->flags;
 823 
 824         ASSERT(vfs_lock_held(vfsp));
 825 
 826         if (why == ROOT_INIT) {
 827                 /*
 828                  * Open block device mounted on.
 829                  * When bio is fixed for vnodes this can all be vnode
 830                  * operations.
 831                  */
 832                 error = VOP_OPEN(&devvp,
 833                     (vfsp->vfs_flag & VFS_RDONLY) ? FREAD : FREAD|FWRITE,
 834                     cr, NULL);
 835                 if (error)
 836                         goto out;
 837                 needclose = 1;
 838 
 839                 /*
 840                  * Refuse to go any further if this
 841                  * device is being used for swapping.
 842                  */
 843                 if (IS_SWAPVP(devvp)) {
 844                         error = EBUSY;
 845                         goto out;
 846                 }
 847         }
 848 
 849         /*
 850          * check for dev already mounted on
 851          */
 852         if (vfsp->vfs_flag & VFS_REMOUNT) {
 853                 error = remountfs(vfsp, dev, raw_argsp, args_len);
 854                 if (error == 0)
 855                         VN_RELE(devvp);
 856                 return (error);
 857         }
 858 
 859         ASSERT(devvp != 0);
 860 
 861         /*
 862          * Flush back any dirty pages on the block device to
 863          * try and keep the buffer cache in sync with the page
 864          * cache if someone is trying to use block devices when
 865          * they really should be using the raw device.
 866          */
 867         (void) VOP_PUTPAGE(common_specvp(devvp), (offset_t)0,
 868             (size_t)0, B_INVAL, cr, NULL);
 869 
 870         /*
 871          * read in superblock
 872          */
 873         ufsvfsp = kmem_zalloc(sizeof (struct ufsvfs), KM_SLEEP);
 874         tp = UFS_BREAD(ufsvfsp, dev, SBLOCK, SBSIZE);
 875         if (tp->b_flags & B_ERROR)
 876                 goto out;
 877         fsp = (struct fs *)tp->b_un.b_addr;
 878 
 879         if ((fsp->fs_magic != FS_MAGIC) && (fsp->fs_magic != MTB_UFS_MAGIC)) {
 880                 cmn_err(CE_NOTE,
 881                     "mount: not a UFS magic number (0x%x)", fsp->fs_magic);
 882                 error = EINVAL;
 883                 goto out;
 884         }
 885 
 886         if ((fsp->fs_magic == FS_MAGIC) &&
 887             (fsp->fs_version != UFS_EFISTYLE4NONEFI_VERSION_2 &&
 888             fsp->fs_version != UFS_VERSION_MIN)) {
 889                 cmn_err(CE_NOTE,
 890                     "mount: unrecognized version of UFS on-disk format: %d",
 891                     fsp->fs_version);
 892                 error = EINVAL;
 893                 goto out;
 894         }
 895 
 896         if ((fsp->fs_magic == MTB_UFS_MAGIC) &&
 897             (fsp->fs_version > MTB_UFS_VERSION_1 ||
 898             fsp->fs_version < MTB_UFS_VERSION_MIN)) {
 899                 cmn_err(CE_NOTE,
 900                     "mount: unrecognized version of UFS on-disk format: %d",
 901                     fsp->fs_version);
 902                 error = EINVAL;
 903                 goto out;
 904         }
 905 
 906 #ifndef _LP64
 907         if (fsp->fs_magic == MTB_UFS_MAGIC) {
 908                 /*
 909                  * Find the size of the device in sectors.  If the
 910                  * the size in sectors is greater than INT_MAX, it's
 911                  * a multi-terabyte file system, which can't be
 912                  * mounted by a 32-bit kernel.  We can't use the
 913                  * fsbtodb() macro in the next line because the macro
 914                  * casts the intermediate values to daddr_t, which is
 915                  * a 32-bit quantity in a 32-bit kernel.  Here we
 916                  * really do need the intermediate values to be held
 917                  * in 64-bit quantities because we're checking for
 918                  * overflow of a 32-bit field.
 919                  */
 920                 if ((((diskaddr_t)(fsp->fs_size)) << fsp->fs_fsbtodb)
 921                     > INT_MAX) {
 922                         cmn_err(CE_NOTE,
 923                             "mount: multi-terabyte UFS cannot be"
 924                             " mounted by a 32-bit kernel");
 925                         error = EINVAL;
 926                         goto out;
 927                 }
 928 
 929         }
 930 #endif
 931 
 932         if (fsp->fs_bsize > MAXBSIZE || fsp->fs_frag > MAXFRAG ||
 933             fsp->fs_bsize < sizeof (struct fs) || fsp->fs_bsize < PAGESIZE) {
 934                 error = EINVAL; /* also needs translation */
 935                 goto out;
 936         }
 937 
 938         /*
 939          * Allocate VFS private data.
 940          */
 941         vfsp->vfs_bcount = 0;
 942         vfsp->vfs_data = (caddr_t)ufsvfsp;
 943         vfsp->vfs_fstype = ufsfstype;
 944         vfsp->vfs_dev = dev;
 945         vfsp->vfs_flag |= VFS_NOTRUNC;
 946         vfs_make_fsid(&vfsp->vfs_fsid, dev, ufsfstype);
 947         ufsvfsp->vfs_devvp = devvp;
 948 
 949         /*
 950          * Cross-link with vfs and add to instance list.
 951          */
 952         ufsvfsp->vfs_vfs = vfsp;
 953         ufs_vfs_add(ufsvfsp);
 954 
 955         ufsvfsp->vfs_dev = dev;
 956         ufsvfsp->vfs_bufp = tp;
 957 
 958         ufsvfsp->vfs_dirsize = INODESIZE + (4 * ALLOCSIZE) + fsp->fs_fsize;
 959         ufsvfsp->vfs_minfrags =
 960             (int)((int64_t)fsp->fs_dsize * fsp->fs_minfree / 100);
 961         /*
 962          * if mount allows largefiles, indicate so in ufsvfs
 963          */
 964         if (flags & UFSMNT_LARGEFILES)
 965                 ufsvfsp->vfs_lfflags |= UFS_LARGEFILES;
 966         /*
 967          * Initialize threads
 968          */
 969         ufs_delete_init(ufsvfsp, 1);
 970         ufs_thread_init(&ufsvfsp->vfs_reclaim, 0);
 971 
 972         /*
 973          * Chicken and egg problem. The superblock may have deltas
 974          * in the log.  So after the log is scanned we reread the
 975          * superblock. We guarantee that the fields needed to
 976          * scan the log will not be in the log.
 977          */
 978         if (fsp->fs_logbno && fsp->fs_clean == FSLOG &&
 979             (fsp->fs_state + fsp->fs_time == FSOKAY)) {
 980                 error = lufs_snarf(ufsvfsp, fsp, (vfsp->vfs_flag & VFS_RDONLY));
 981                 if (error) {
 982                         /*
 983                          * Allow a ro mount to continue even if the
 984                          * log cannot be processed - yet.
 985                          */
 986                         if (!(vfsp->vfs_flag & VFS_RDONLY)) {
 987                                 cmn_err(CE_WARN, "Error accessing ufs "
 988                                     "log for %s; Please run fsck(1M)", path);
 989                                 goto out;
 990                         }
 991                 }
 992                 tp->b_flags |= (B_AGE | B_STALE);
 993                 brelse(tp);
 994                 tp = UFS_BREAD(ufsvfsp, dev, SBLOCK, SBSIZE);
 995                 fsp = (struct fs *)tp->b_un.b_addr;
 996                 ufsvfsp->vfs_bufp = tp;
 997                 if (tp->b_flags & B_ERROR)
 998                         goto out;
 999         }
1000 
1001         /*
1002          * Set logging mounted flag used by lockfs
1003          */
1004         ufsvfsp->vfs_validfs = UT_MOUNTED;
1005 
1006         /*
1007          * Copy the super block into a buffer in its native size.
1008          * Use ngeteblk to allocate the buffer
1009          */
1010         bp = ngeteblk(fsp->fs_bsize);
1011         ufsvfsp->vfs_bufp = bp;
1012         bp->b_edev = dev;
1013         bp->b_dev = cmpdev(dev);
1014         bp->b_blkno = SBLOCK;
1015         bp->b_bcount = fsp->fs_sbsize;
1016         bcopy(tp->b_un.b_addr, bp->b_un.b_addr, fsp->fs_sbsize);
1017         tp->b_flags |= B_STALE | B_AGE;
1018         brelse(tp);
1019         tp = 0;
1020 
1021         fsp = (struct fs *)bp->b_un.b_addr;
1022         /*
1023          * Mount fails if superblock flag indicates presence of large
1024          * files and filesystem is attempted to be mounted 'nolargefiles'.
1025          * The exception is for a read only mount of root, which we
1026          * always want to succeed, so fsck can fix potential problems.
1027          * The assumption is that we will remount root at some point,
1028          * and the remount will enforce the mount option.
1029          */
1030         if (!(isroot & (vfsp->vfs_flag & VFS_RDONLY)) &&
1031             (fsp->fs_flags & FSLARGEFILES) &&
1032             !(flags & UFSMNT_LARGEFILES)) {
1033                 error = EFBIG;
1034                 goto out;
1035         }
1036 
1037         if (vfsp->vfs_flag & VFS_RDONLY) {
1038                 fsp->fs_ronly = 1;
1039                 fsp->fs_fmod = 0;
1040                 if (((fsp->fs_state + fsp->fs_time) == FSOKAY) &&
1041                     ((fsp->fs_clean == FSCLEAN) ||
1042                     (fsp->fs_clean == FSSTABLE) ||
1043                     (fsp->fs_clean == FSLOG))) {
1044                         if (isroot) {
1045                                 if (fsp->fs_clean == FSLOG) {
1046                                         if (fsp->fs_rolled == FS_ALL_ROLLED) {
1047                                                 ufs_clean_root = 1;
1048                                         }
1049                                 } else {
1050                                         ufs_clean_root = 1;
1051                                 }
1052                         }
1053                         fsp->fs_clean = FSSTABLE;
1054                 } else {
1055                         fsp->fs_clean = FSBAD;
1056                 }
1057         } else {
1058 
1059                 fsp->fs_fmod = 0;
1060                 fsp->fs_ronly = 0;
1061 
1062                 TRANS_DOMATAMAP(ufsvfsp);
1063 
1064                 if ((TRANS_ISERROR(ufsvfsp)) ||
1065                     (((fsp->fs_state + fsp->fs_time) == FSOKAY) &&
1066                     fsp->fs_clean == FSLOG && !TRANS_ISTRANS(ufsvfsp))) {
1067                         ufsvfsp->vfs_log = NULL;
1068                         ufsvfsp->vfs_domatamap = 0;
1069                         error = ENOSPC;
1070                         goto out;
1071                 }
1072 
1073                 if (((fsp->fs_state + fsp->fs_time) == FSOKAY) &&
1074                     (fsp->fs_clean == FSCLEAN ||
1075                     fsp->fs_clean == FSSTABLE ||
1076                     fsp->fs_clean == FSLOG))
1077                         fsp->fs_clean = FSSTABLE;
1078                 else {
1079                         if (isroot) {
1080                                 /*
1081                                  * allow root partition to be mounted even
1082                                  * when fs_state is not ok
1083                                  * will be fixed later by a remount root
1084                                  */
1085                                 fsp->fs_clean = FSBAD;
1086                                 ufsvfsp->vfs_log = NULL;
1087                                 ufsvfsp->vfs_domatamap = 0;
1088                         } else {
1089                                 error = ENOSPC;
1090                                 goto out;
1091                         }
1092                 }
1093 
1094                 if (fsp->fs_clean == FSSTABLE && TRANS_ISTRANS(ufsvfsp))
1095                         fsp->fs_clean = FSLOG;
1096         }
1097         TRANS_MATA_MOUNT(ufsvfsp);
1098         needtrans = 1;
1099 
1100         vfsp->vfs_bsize = fsp->fs_bsize;
1101 
1102         /*
1103          * Read in summary info
1104          */
1105         if (error = ufs_getsummaryinfo(dev, ufsvfsp, fsp))
1106                 goto out;
1107 
1108         /*
1109          * lastwhinetime is set to zero rather than lbolt, so that after
1110          * mounting if the filesystem is found to be full, then immediately the
1111          * "file system message" will be logged.
1112          */
1113         ufsvfsp->vfs_lastwhinetime = 0L;
1114 
1115 
1116         mutex_init(&ufsvfsp->vfs_lock, NULL, MUTEX_DEFAULT, NULL);
1117         (void) copystr(path, fsp->fs_fsmnt, sizeof (fsp->fs_fsmnt) - 1, &len);
1118         bzero(fsp->fs_fsmnt + len, sizeof (fsp->fs_fsmnt) - len);
1119 
1120         /*
1121          * Sanity checks for old file systems
1122          */
1123         if (fsp->fs_postblformat == FS_42POSTBLFMT)
1124                 ufsvfsp->vfs_nrpos = 8;
1125         else
1126                 ufsvfsp->vfs_nrpos = fsp->fs_nrpos;
1127 
1128         /*
1129          * Initialize lockfs structure to support file system locking
1130          */
1131         bzero(&ufsvfsp->vfs_ulockfs.ul_lockfs,
1132             sizeof (struct lockfs));
1133         ufsvfsp->vfs_ulockfs.ul_fs_lock = ULOCKFS_ULOCK;
1134         mutex_init(&ufsvfsp->vfs_ulockfs.ul_lock, NULL,
1135             MUTEX_DEFAULT, NULL);
1136         cv_init(&ufsvfsp->vfs_ulockfs.ul_cv, NULL, CV_DEFAULT, NULL);
1137 
1138         /*
1139          * We don't need to grab vfs_dqrwlock for this ufs_iget() call.
1140          * We are in the process of mounting the file system so there
1141          * is no need to grab the quota lock. If a quota applies to the
1142          * root inode, then it will be updated when quotas are enabled.
1143          *
1144          * However, we have an ASSERT(RW_LOCK_HELD(&ufsvfsp->vfs_dqrwlock))
1145          * in getinoquota() that we want to keep so grab it anyway.
1146          */
1147         rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
1148 
1149         error = ufs_iget_alloced(vfsp, UFSROOTINO, &rip, cr);
1150 
1151         rw_exit(&ufsvfsp->vfs_dqrwlock);
1152 
1153         if (error)
1154                 goto out;
1155 
1156         /*
1157          * make sure root inode is a directory.  Returning ENOTDIR might
1158          * be confused with the mount point not being a directory, so
1159          * we use EIO instead.
1160          */
1161         if ((rip->i_mode & IFMT) != IFDIR) {
1162                 /*
1163                  * Mark this inode as subject for cleanup
1164                  * to avoid stray inodes in the cache.
1165                  */
1166                 rvp = ITOV(rip);
1167                 error = EIO;
1168                 goto out;
1169         }
1170 
1171         rvp = ITOV(rip);
1172         mutex_enter(&rvp->v_lock);
1173         rvp->v_flag |= VROOT;
1174         mutex_exit(&rvp->v_lock);
1175         ufsvfsp->vfs_root = rvp;
1176         /* The buffer for the root inode does not contain a valid b_vp */
1177         (void) bfinval(dev, 0);
1178 
1179         /* options */
1180         ufsvfsp->vfs_nosetsec = flags & UFSMNT_NOSETSEC;
1181         ufsvfsp->vfs_nointr  = flags & UFSMNT_NOINTR;
1182         ufsvfsp->vfs_syncdir = flags & UFSMNT_SYNCDIR;
1183         ufsvfsp->vfs_noatime = flags & UFSMNT_NOATIME;
1184         if ((flags & UFSMNT_NODFRATIME) || ufsvfsp->vfs_noatime)
1185                 ufsvfsp->vfs_dfritime &= ~UFS_DFRATIME;
1186         else    /* dfratime, default behavior */
1187                 ufsvfsp->vfs_dfritime |= UFS_DFRATIME;
1188         if (flags & UFSMNT_FORCEDIRECTIO)
1189                 ufsvfsp->vfs_forcedirectio = 1;
1190         else if (flags & UFSMNT_NOFORCEDIRECTIO)
1191                 ufsvfsp->vfs_forcedirectio = 0;
1192         ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
1193 
1194         ufsvfsp->vfs_nindiroffset = fsp->fs_nindir - 1;
1195         ufsvfsp->vfs_nindirshift = highbit(ufsvfsp->vfs_nindiroffset);
1196         ufsvfsp->vfs_ioclustsz = fsp->fs_bsize * fsp->fs_maxcontig;
1197 
1198         if (cdev_ioctl(dev, DKIOCINFO, (intptr_t)&ci,
1199             FKIOCTL|FNATIVE|FREAD, CRED(), &status) == 0) {
1200                 ufsvfsp->vfs_iotransz = ci.dki_maxtransfer * DEV_BSIZE;
1201         } else {
1202                 ufsvfsp->vfs_iotransz = MIN(maxphys, ufs_maxmaxphys);
1203         }
1204 
1205         if (ufsvfsp->vfs_iotransz <= 0) {
1206                 ufsvfsp->vfs_iotransz = MIN(maxphys, ufs_maxmaxphys);
1207         }
1208 
1209         /*
1210          * When logging, used to reserve log space for writes and truncs
1211          */
1212         ufsvfsp->vfs_avgbfree = fsp->fs_cstotal.cs_nbfree / fsp->fs_ncg;
1213 
1214         /*
1215          * Determine whether to log cylinder group summary info.
1216          */
1217         ufsvfsp->vfs_nolog_si = (fsp->fs_ncg < ufs_ncg_log);
1218 
1219         if (TRANS_ISTRANS(ufsvfsp)) {
1220                 /*
1221                  * start the delete thread
1222                  */
1223                 ufs_thread_start(&ufsvfsp->vfs_delete, ufs_thread_delete, vfsp);
1224 
1225                 /*
1226                  * start reclaim thread if the filesystem was not mounted
1227                  * read only.
1228                  */
1229                 if (!fsp->fs_ronly && (fsp->fs_reclaim &
1230                     (FS_RECLAIM|FS_RECLAIMING))) {
1231                         fsp->fs_reclaim &= ~FS_RECLAIM;
1232                         fsp->fs_reclaim |=  FS_RECLAIMING;
1233                         ufs_thread_start(&ufsvfsp->vfs_reclaim,
1234                             ufs_thread_reclaim, vfsp);
1235                 }
1236 
1237                 /* Mark the fs as unrolled */
1238                 fsp->fs_rolled = FS_NEED_ROLL;
1239         } else if (!fsp->fs_ronly && (fsp->fs_reclaim &
1240             (FS_RECLAIM|FS_RECLAIMING))) {
1241                 /*
1242                  * If a file system that is mounted nologging, after
1243                  * having previously been mounted logging, becomes
1244                  * unmounted whilst the reclaim thread is in the throes
1245                  * of reclaiming open/deleted inodes, a subsequent mount
1246                  * of such a file system with logging disabled could lead
1247                  * to inodes becoming lost.  So, start reclaim now, even
1248                  * though logging was disabled for the previous mount, to
1249                  * tidy things up.
1250                  */
1251                 fsp->fs_reclaim &= ~FS_RECLAIM;
1252                 fsp->fs_reclaim |=  FS_RECLAIMING;
1253                 ufs_thread_start(&ufsvfsp->vfs_reclaim,
1254                     ufs_thread_reclaim, vfsp);
1255         }
1256 
1257         if (!fsp->fs_ronly) {
1258                 TRANS_SBWRITE(ufsvfsp, TOP_MOUNT);
1259                 if (error = geterror(ufsvfsp->vfs_bufp))
1260                         goto out;
1261         }
1262 
1263         /* fix-on-panic initialization */
1264         if (isroot && !(flags & UFSMNT_ONERROR_FLGMASK))
1265                 flags |= UFSMNT_ONERROR_PANIC;  /* XXX ..._RDONLY */
1266 
1267         if ((error = ufsfx_mount(ufsvfsp, flags)) != 0)
1268                 goto out;
1269 
1270         if (why == ROOT_INIT && isroot)
1271                 rootvp = devvp;
1272 
1273         return (0);
1274 out:
1275         if (error == 0)
1276                 error = EIO;
1277         if (rvp) {
1278                 /* the following sequence is similar to ufs_unmount() */
1279 
1280                 /*
1281                  * There's a problem that ufs_iget() puts inodes into
1282                  * the inode cache before it returns them.  If someone
1283                  * traverses that cache and gets a reference to our
1284                  * inode, there's a chance they'll still be using it
1285                  * after we've destroyed it.  This is a hard race to
1286                  * hit, but it's happened (putting in a medium delay
1287                  * here, and a large delay in ufs_scan_inodes() for
1288                  * inodes on the device we're bailing out on, makes
1289                  * the race easy to demonstrate).  The symptom is some
1290                  * other part of UFS faulting on bad inode contents,
1291                  * or when grabbing one of the locks inside the inode,
1292                  * etc.  The usual victim is ufs_scan_inodes() or
1293                  * someone called by it.
1294                  */
1295 
1296                 /*
1297                  * First, isolate it so that no new references can be
1298                  * gotten via the inode cache.
1299                  */
1300                 ihm = &ih_lock[INOHASH(UFSROOTINO)];
1301                 mutex_enter(ihm);
1302                 remque(rip);
1303                 mutex_exit(ihm);
1304 
1305                 /*
1306                  * Now wait for all outstanding references except our
1307                  * own to drain.  This could, in theory, take forever,
1308                  * so don't wait *too* long.  If we time out, mark
1309                  * it stale and leak it, so we don't hit the problem
1310                  * described above.
1311                  *
1312                  * Note that v_count is an int, which means we can read
1313                  * it in one operation.  Thus, there's no need to lock
1314                  * around our tests.
1315                  */
1316                 elapsed = 0;
1317                 while ((rvp->v_count > 1) && (elapsed < ufs_mount_timeout)) {
1318                         delay(ufs_mount_error_delay * drv_usectohz(1000));
1319                         elapsed += ufs_mount_error_delay;
1320                 }
1321 
1322                 if (rvp->v_count > 1) {
1323                         mutex_enter(&rip->i_tlock);
1324                         rip->i_flag |= ISTALE;
1325                         mutex_exit(&rip->i_tlock);
1326                         cmn_err(CE_WARN,
1327                             "Timed out while cleaning up after "
1328                             "failed mount of %s", path);
1329                 } else {
1330 
1331                         /*
1332                          * Now we're the only one with a handle left, so tear
1333                          * it down the rest of the way.
1334                          */
1335                         if (ufs_rmidle(rip))
1336                                 VN_RELE(rvp);
1337                         ufs_si_del(rip);
1338                         rip->i_ufsvfs = NULL;
1339                         rvp->v_vfsp = NULL;
1340                         rvp->v_type = VBAD;
1341                         VN_RELE(rvp);
1342                 }
1343         }
1344         if (needtrans) {
1345                 TRANS_MATA_UMOUNT(ufsvfsp);
1346         }
1347         if (ufsvfsp) {
1348                 ufs_vfs_remove(ufsvfsp);
1349                 ufs_thread_exit(&ufsvfsp->vfs_delete);
1350                 ufs_thread_exit(&ufsvfsp->vfs_reclaim);
1351                 mutex_destroy(&ufsvfsp->vfs_lock);
1352                 if (ufsvfsp->vfs_log) {
1353                         lufs_unsnarf(ufsvfsp);
1354                 }
1355                 kmem_free(ufsvfsp, sizeof (struct ufsvfs));
1356         }
1357         if (bp) {
1358                 bp->b_flags |= (B_STALE|B_AGE);
1359                 brelse(bp);
1360         }
1361         if (tp) {
1362                 tp->b_flags |= (B_STALE|B_AGE);
1363                 brelse(tp);
1364         }
1365         if (needclose) {
1366                 (void) VOP_CLOSE(devvp, (vfsp->vfs_flag & VFS_RDONLY) ?
1367                     FREAD : FREAD|FWRITE, 1, (offset_t)0, cr, NULL);
1368                 bflush(dev);
1369                 (void) bfinval(dev, 1);
1370         }
1371         return (error);
1372 }
1373 
1374 /*
1375  * vfs operations
1376  */
1377 static int
1378 ufs_unmount(struct vfs *vfsp, int fflag, struct cred *cr)
1379 {
1380         dev_t           dev             = vfsp->vfs_dev;
1381         struct ufsvfs   *ufsvfsp        = (struct ufsvfs *)vfsp->vfs_data;
1382         struct fs       *fs             = ufsvfsp->vfs_fs;
1383         struct ulockfs  *ulp            = &ufsvfsp->vfs_ulockfs;
1384         struct vnode    *bvp, *vp;
1385         struct buf      *bp;
1386         struct inode    *ip, *inext, *rip;
1387         union ihead     *ih;
1388         int             error, flag, i;
1389         struct lockfs   lockfs;
1390         int             poll_events = POLLPRI;
1391         extern struct pollhead ufs_pollhd;
1392         refstr_t        *mountpoint;
1393 
1394         ASSERT(vfs_lock_held(vfsp));
1395 
1396         if (secpolicy_fs_unmount(cr, vfsp) != 0)
1397                 return (EPERM);
1398         /*
1399          * Forced unmount is now supported through the
1400          * lockfs protocol.
1401          */
1402         if (fflag & MS_FORCE) {
1403                 /*
1404                  * Mark the filesystem as being unmounted now in
1405                  * case of a forcible umount before we take any
1406                  * locks inside UFS to prevent racing with a VFS_VGET()
1407                  * request. Throw these VFS_VGET() requests away for
1408                  * the duration of the forcible umount so they won't
1409                  * use stale or even freed data later on when we're done.
1410                  * It may happen that the VFS has had a additional hold
1411                  * placed on it by someone other than UFS and thus will
1412                  * not get freed immediately once we're done with the
1413                  * umount by dounmount() - use VFS_UNMOUNTED to inform
1414                  * users of this still-alive VFS that its corresponding
1415                  * filesystem being gone so they can detect that and error
1416                  * out.
1417                  */
1418                 vfsp->vfs_flag |= VFS_UNMOUNTED;
1419 
1420                 ufs_thread_suspend(&ufsvfsp->vfs_delete);
1421                 mutex_enter(&ulp->ul_lock);
1422                 /*
1423                  * If file system is already hard locked,
1424                  * unmount the file system, otherwise
1425                  * hard lock it before unmounting.
1426                  */
1427                 if (!ULOCKFS_IS_HLOCK(ulp)) {
1428                         atomic_inc_ulong(&ufs_quiesce_pend);
1429                         lockfs.lf_lock = LOCKFS_HLOCK;
1430                         lockfs.lf_flags = 0;
1431                         lockfs.lf_key = ulp->ul_lockfs.lf_key + 1;
1432                         lockfs.lf_comlen = 0;
1433                         lockfs.lf_comment = NULL;
1434                         ufs_freeze(ulp, &lockfs);
1435                         ULOCKFS_SET_BUSY(ulp);
1436                         LOCKFS_SET_BUSY(&ulp->ul_lockfs);
1437                         (void) ufs_quiesce(ulp);
1438                         (void) ufs_flush(vfsp);
1439                         (void) ufs_thaw(vfsp, ufsvfsp, ulp);
1440                         atomic_dec_ulong(&ufs_quiesce_pend);
1441                         ULOCKFS_CLR_BUSY(ulp);
1442                         LOCKFS_CLR_BUSY(&ulp->ul_lockfs);
1443                         poll_events |= POLLERR;
1444                         pollwakeup(&ufs_pollhd, poll_events);
1445                 }
1446                 ufs_thread_continue(&ufsvfsp->vfs_delete);
1447                 mutex_exit(&ulp->ul_lock);
1448         }
1449 
1450         /* let all types of writes go through */
1451         ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
1452 
1453         /* coordinate with global hlock thread */
1454         if (TRANS_ISTRANS(ufsvfsp) && (ufsvfsp->vfs_validfs == UT_HLOCKING)) {
1455                 /*
1456                  * last possibility for a forced umount to fail hence clear
1457                  * VFS_UNMOUNTED if appropriate.
1458                  */
1459                 if (fflag & MS_FORCE)
1460                         vfsp->vfs_flag &= ~VFS_UNMOUNTED;
1461                 return (EAGAIN);
1462         }
1463 
1464         ufsvfsp->vfs_validfs = UT_UNMOUNTED;
1465 
1466         /* kill the reclaim thread */
1467         ufs_thread_exit(&ufsvfsp->vfs_reclaim);
1468 
1469         /* suspend the delete thread */
1470         ufs_thread_suspend(&ufsvfsp->vfs_delete);
1471 
1472         /*
1473          * drain the delete and idle queues
1474          */
1475         ufs_delete_drain(vfsp, -1, 1);
1476         ufs_idle_drain(vfsp);
1477 
1478         /*
1479          * use the lockfs protocol to prevent new ops from starting
1480          * a forcible umount can not fail beyond this point as
1481          * we hard-locked the filesystem and drained all current consumers
1482          * before.
1483          */
1484         mutex_enter(&ulp->ul_lock);
1485 
1486         /*
1487          * if the file system is busy; return EBUSY
1488          */
1489         if (ulp->ul_vnops_cnt || ulp->ul_falloc_cnt || ULOCKFS_IS_SLOCK(ulp)) {
1490                 error = EBUSY;
1491                 goto out;
1492         }
1493 
1494         /*
1495          * if this is not a forced unmount (!hard/error locked), then
1496          * get rid of every inode except the root and quota inodes
1497          * also, commit any outstanding transactions
1498          */
1499         if (!ULOCKFS_IS_HLOCK(ulp) && !ULOCKFS_IS_ELOCK(ulp))
1500                 if (error = ufs_flush(vfsp))
1501                         goto out;
1502 
1503         /*
1504          * ignore inodes in the cache if fs is hard locked or error locked
1505          */
1506         rip = VTOI(ufsvfsp->vfs_root);
1507         if (!ULOCKFS_IS_HLOCK(ulp) && !ULOCKFS_IS_ELOCK(ulp)) {
1508                 /*
1509                  * Otherwise, only the quota and root inodes are in the cache.
1510                  *
1511                  * Avoid racing with ufs_update() and ufs_sync().
1512                  */
1513                 mutex_enter(&ufs_scan_lock);
1514 
1515                 for (i = 0, ih = ihead; i < inohsz; i++, ih++) {
1516                         mutex_enter(&ih_lock[i]);
1517                         for (ip = ih->ih_chain[0];
1518                             ip != (struct inode *)ih;
1519                             ip = ip->i_forw) {
1520                                 if (ip->i_ufsvfs != ufsvfsp)
1521                                         continue;
1522                                 if (ip == ufsvfsp->vfs_qinod)
1523                                         continue;
1524                                 if (ip == rip && ITOV(ip)->v_count == 1)
1525                                         continue;
1526                                 mutex_exit(&ih_lock[i]);
1527                                 mutex_exit(&ufs_scan_lock);
1528                                 error = EBUSY;
1529                                 goto out;
1530                         }
1531                         mutex_exit(&ih_lock[i]);
1532                 }
1533                 mutex_exit(&ufs_scan_lock);
1534         }
1535 
1536         /*
1537          * if a snapshot exists and this is a forced unmount, then delete
1538          * the snapshot.  Otherwise return EBUSY.  This will insure the
1539          * snapshot always belongs to a valid file system.
1540          */
1541         if (ufsvfsp->vfs_snapshot) {
1542                 if (ULOCKFS_IS_HLOCK(ulp) || ULOCKFS_IS_ELOCK(ulp)) {
1543                         (void) fssnap_delete(&ufsvfsp->vfs_snapshot);
1544                 } else {
1545                         error = EBUSY;
1546                         goto out;
1547                 }
1548         }
1549 
1550         /*
1551          * Close the quota file and invalidate anything left in the quota
1552          * cache for this file system.  Pass kcred to allow all quota
1553          * manipulations.
1554          */
1555         (void) closedq(ufsvfsp, kcred);
1556         invalidatedq(ufsvfsp);
1557         /*
1558          * drain the delete and idle queues
1559          */
1560         ufs_delete_drain(vfsp, -1, 0);
1561         ufs_idle_drain(vfsp);
1562 
1563         /*
1564          * discard the inodes for this fs (including root, shadow, and quota)
1565          */
1566         for (i = 0, ih = ihead; i < inohsz; i++, ih++) {
1567                 mutex_enter(&ih_lock[i]);
1568                 for (inext = 0, ip = ih->ih_chain[0];
1569                     ip != (struct inode *)ih;
1570                     ip = inext) {
1571                         inext = ip->i_forw;
1572                         if (ip->i_ufsvfs != ufsvfsp)
1573                                 continue;
1574 
1575                         /*
1576                          * We've found the inode in the cache and as we
1577                          * hold the hash mutex the inode can not
1578                          * disappear from underneath us.
1579                          * We also know it must have at least a vnode
1580                          * reference count of 1.
1581                          * We perform an additional VN_HOLD so the VN_RELE
1582                          * in case we take the inode off the idle queue
1583                          * can not be the last one.
1584                          * It is safe to grab the writer contents lock here
1585                          * to prevent a race with ufs_iinactive() putting
1586                          * inodes into the idle queue while we operate on
1587                          * this inode.
1588                          */
1589                         rw_enter(&ip->i_contents, RW_WRITER);
1590 
1591                         vp = ITOV(ip);
1592                         VN_HOLD(vp)
1593                         remque(ip);
1594                         if (ufs_rmidle(ip))
1595                                 VN_RELE(vp);
1596                         ufs_si_del(ip);
1597                         /*
1598                          * rip->i_ufsvfsp is needed by bflush()
1599                          */
1600                         if (ip != rip)
1601                                 ip->i_ufsvfs = NULL;
1602                         /*
1603                          * Set vnode's vfsops to dummy ops, which return
1604                          * EIO. This is needed to forced unmounts to work
1605                          * with lofs/nfs properly.
1606                          */
1607                         if (ULOCKFS_IS_HLOCK(ulp) || ULOCKFS_IS_ELOCK(ulp))
1608                                 vp->v_vfsp = &EIO_vfs;
1609                         else
1610                                 vp->v_vfsp = NULL;
1611                         vp->v_type = VBAD;
1612 
1613                         rw_exit(&ip->i_contents);
1614 
1615                         VN_RELE(vp);
1616                 }
1617                 mutex_exit(&ih_lock[i]);
1618         }
1619         ufs_si_cache_flush(dev);
1620 
1621         /*
1622          * kill the delete thread and drain the idle queue
1623          */
1624         ufs_thread_exit(&ufsvfsp->vfs_delete);
1625         ufs_idle_drain(vfsp);
1626 
1627         bp = ufsvfsp->vfs_bufp;
1628         bvp = ufsvfsp->vfs_devvp;
1629         flag = !fs->fs_ronly;
1630         if (flag) {
1631                 bflush(dev);
1632                 if (fs->fs_clean != FSBAD) {
1633                         if (fs->fs_clean == FSSTABLE)
1634                                 fs->fs_clean = FSCLEAN;
1635                         fs->fs_reclaim &= ~FS_RECLAIM;
1636                 }
1637                 if (TRANS_ISTRANS(ufsvfsp) &&
1638                     !TRANS_ISERROR(ufsvfsp) &&
1639                     !ULOCKFS_IS_HLOCK(ulp) &&
1640                     (fs->fs_rolled == FS_NEED_ROLL)) {
1641                         /*
1642                          * ufs_flush() above has flushed the last Moby.
1643                          * This is needed to ensure the following superblock
1644                          * update really is the last metadata update
1645                          */
1646                         error = ufs_putsummaryinfo(dev, ufsvfsp, fs);
1647                         if (error == 0) {
1648                                 fs->fs_rolled = FS_ALL_ROLLED;
1649                         }
1650                 }
1651                 TRANS_SBUPDATE(ufsvfsp, vfsp, TOP_SBUPDATE_UNMOUNT);
1652                 /*
1653                  * push this last transaction
1654                  */
1655                 curthread->t_flag |= T_DONTBLOCK;
1656                 TRANS_BEGIN_SYNC(ufsvfsp, TOP_COMMIT_UNMOUNT, TOP_COMMIT_SIZE,
1657                     error);
1658                 if (!error)
1659                         TRANS_END_SYNC(ufsvfsp, error, TOP_COMMIT_UNMOUNT,
1660                             TOP_COMMIT_SIZE);
1661                 curthread->t_flag &= ~T_DONTBLOCK;
1662         }
1663 
1664         TRANS_MATA_UMOUNT(ufsvfsp);
1665         lufs_unsnarf(ufsvfsp);          /* Release the in-memory structs */
1666         ufsfx_unmount(ufsvfsp);         /* fix-on-panic bookkeeping */
1667         kmem_free(fs->fs_u.fs_csp, fs->fs_cssize);
1668 
1669         bp->b_flags |= B_STALE|B_AGE;
1670         ufsvfsp->vfs_bufp = NULL;    /* don't point at freed buf */
1671         brelse(bp);                     /* free the superblock buf */
1672 
1673         (void) VOP_PUTPAGE(common_specvp(bvp), (offset_t)0, (size_t)0,
1674             B_INVAL, cr, NULL);
1675         (void) VOP_CLOSE(bvp, flag, 1, (offset_t)0, cr, NULL);
1676         bflush(dev);
1677         (void) bfinval(dev, 1);
1678         VN_RELE(bvp);
1679 
1680         /*
1681          * It is now safe to NULL out the ufsvfs pointer and discard
1682          * the root inode.
1683          */
1684         rip->i_ufsvfs = NULL;
1685         VN_RELE(ITOV(rip));
1686 
1687         /* free up lockfs comment structure, if any */
1688         if (ulp->ul_lockfs.lf_comlen && ulp->ul_lockfs.lf_comment)
1689                 kmem_free(ulp->ul_lockfs.lf_comment, ulp->ul_lockfs.lf_comlen);
1690 
1691         /*
1692          * Remove from instance list.
1693          */
1694         ufs_vfs_remove(ufsvfsp);
1695 
1696         /*
1697          * For a forcible unmount, threads may be asleep in
1698          * ufs_lockfs_begin/ufs_check_lockfs.  These threads will need
1699          * the ufsvfs structure so we don't free it, yet.  ufs_update
1700          * will free it up after awhile.
1701          */
1702         if (ULOCKFS_IS_HLOCK(ulp) || ULOCKFS_IS_ELOCK(ulp)) {
1703                 extern kmutex_t         ufsvfs_mutex;
1704                 extern struct ufsvfs    *ufsvfslist;
1705 
1706                 mutex_enter(&ufsvfs_mutex);
1707                 ufsvfsp->vfs_dontblock = 1;
1708                 ufsvfsp->vfs_next = ufsvfslist;
1709                 ufsvfslist = ufsvfsp;
1710                 mutex_exit(&ufsvfs_mutex);
1711                 /* wakeup any suspended threads */
1712                 cv_broadcast(&ulp->ul_cv);
1713                 mutex_exit(&ulp->ul_lock);
1714         } else {
1715                 mutex_destroy(&ufsvfsp->vfs_lock);
1716                 kmem_free(ufsvfsp, sizeof (struct ufsvfs));
1717         }
1718 
1719         /*
1720          * Now mark the filesystem as unmounted since we're done with it.
1721          */
1722         vfsp->vfs_flag |= VFS_UNMOUNTED;
1723 
1724         return (0);
1725 out:
1726         /* open the fs to new ops */
1727         cv_broadcast(&ulp->ul_cv);
1728         mutex_exit(&ulp->ul_lock);
1729 
1730         if (TRANS_ISTRANS(ufsvfsp)) {
1731                 /* allow the delete thread to continue */
1732                 ufs_thread_continue(&ufsvfsp->vfs_delete);
1733                 /* restart the reclaim thread */
1734                 ufs_thread_start(&ufsvfsp->vfs_reclaim, ufs_thread_reclaim,
1735                     vfsp);
1736                 /* coordinate with global hlock thread */
1737                 ufsvfsp->vfs_validfs = UT_MOUNTED;
1738                 /* check for trans errors during umount */
1739                 ufs_trans_onerror();
1740 
1741                 /*
1742                  * if we have a separate /usr it will never unmount
1743                  * when halting. In order to not re-read all the
1744                  * cylinder group summary info on mounting after
1745                  * reboot the logging of summary info is re-enabled
1746                  * and the super block written out.
1747                  */
1748                 mountpoint = vfs_getmntpoint(vfsp);
1749                 if ((fs->fs_si == FS_SI_OK) &&
1750                     (strcmp("/usr", refstr_value(mountpoint)) == 0)) {
1751                         ufsvfsp->vfs_nolog_si = 0;
1752                         UFS_BWRITE2(NULL, ufsvfsp->vfs_bufp);
1753                 }
1754                 refstr_rele(mountpoint);
1755         }
1756 
1757         return (error);
1758 }
1759 
1760 static int
1761 ufs_root(struct vfs *vfsp, struct vnode **vpp)
1762 {
1763         struct ufsvfs *ufsvfsp;
1764         struct vnode *vp;
1765 
1766         if (!vfsp)
1767                 return (EIO);
1768 
1769         ufsvfsp = (struct ufsvfs *)vfsp->vfs_data;
1770         if (!ufsvfsp || !ufsvfsp->vfs_root)
1771                 return (EIO);   /* forced unmount */
1772 
1773         vp = ufsvfsp->vfs_root;
1774         VN_HOLD(vp);
1775         *vpp = vp;
1776         return (0);
1777 }
1778 
1779 /*
1780  * Get file system statistics.
1781  */
1782 static int
1783 ufs_statvfs(struct vfs *vfsp, struct statvfs64 *sp)
1784 {
1785         struct fs *fsp;
1786         struct ufsvfs *ufsvfsp;
1787         int blk, i;
1788         long max_avail, used;
1789         dev32_t d32;
1790 
1791         if (vfsp->vfs_flag & VFS_UNMOUNTED)
1792                 return (EIO);
1793 
1794         ufsvfsp = (struct ufsvfs *)vfsp->vfs_data;
1795         fsp = ufsvfsp->vfs_fs;
1796         if ((fsp->fs_magic != FS_MAGIC) && (fsp->fs_magic != MTB_UFS_MAGIC))
1797                 return (EINVAL);
1798         if (fsp->fs_magic == FS_MAGIC &&
1799             (fsp->fs_version != UFS_EFISTYLE4NONEFI_VERSION_2 &&
1800             fsp->fs_version != UFS_VERSION_MIN))
1801                 return (EINVAL);
1802         if (fsp->fs_magic == MTB_UFS_MAGIC &&
1803             (fsp->fs_version > MTB_UFS_VERSION_1 ||
1804             fsp->fs_version < MTB_UFS_VERSION_MIN))
1805                 return (EINVAL);
1806 
1807         /*
1808          * get the basic numbers
1809          */
1810         (void) bzero(sp, sizeof (*sp));
1811 
1812         sp->f_bsize = fsp->fs_bsize;
1813         sp->f_frsize = fsp->fs_fsize;
1814         sp->f_blocks = (fsblkcnt64_t)fsp->fs_dsize;
1815         sp->f_bfree = (fsblkcnt64_t)fsp->fs_cstotal.cs_nbfree * fsp->fs_frag +
1816             fsp->fs_cstotal.cs_nffree;
1817 
1818         sp->f_files = (fsfilcnt64_t)fsp->fs_ncg * fsp->fs_ipg;
1819         sp->f_ffree = (fsfilcnt64_t)fsp->fs_cstotal.cs_nifree;
1820 
1821         /*
1822          * Adjust the numbers based on things waiting to be deleted.
1823          * modifies f_bfree and f_ffree.  Afterwards, everything we
1824          * come up with will be self-consistent.  By definition, this
1825          * is a point-in-time snapshot, so the fact that the delete
1826          * thread's probably already invalidated the results is not a
1827          * problem.  Note that if the delete thread is ever extended to
1828          * non-logging ufs, this adjustment must always be made.
1829          */
1830         if (TRANS_ISTRANS(ufsvfsp))
1831                 ufs_delete_adjust_stats(ufsvfsp, sp);
1832 
1833         /*
1834          * avail = MAX(max_avail - used, 0)
1835          */
1836         max_avail = fsp->fs_dsize - ufsvfsp->vfs_minfrags;
1837 
1838         used = (fsp->fs_dsize - sp->f_bfree);
1839 
1840         if (max_avail > used)
1841                 sp->f_bavail = (fsblkcnt64_t)max_avail - used;
1842         else
1843                 sp->f_bavail = (fsblkcnt64_t)0;
1844 
1845         sp->f_favail = sp->f_ffree;
1846         (void) cmpldev(&d32, vfsp->vfs_dev);
1847         sp->f_fsid = d32;
1848         (void) strcpy(sp->f_basetype, vfssw[vfsp->vfs_fstype].vsw_name);
1849         sp->f_flag = vf_to_stf(vfsp->vfs_flag);
1850 
1851         /* keep coordinated with ufs_l_pathconf() */
1852         sp->f_namemax = MAXNAMLEN;
1853 
1854         if (fsp->fs_cpc == 0) {
1855                 bzero(sp->f_fstr, 14);
1856                 return (0);
1857         }
1858         blk = fsp->fs_spc * fsp->fs_cpc / NSPF(fsp);
1859         for (i = 0; i < blk; i += fsp->fs_frag) /* CSTYLED */
1860                 /* void */;
1861         i -= fsp->fs_frag;
1862         blk = i / fsp->fs_frag;
1863         bcopy(&(fs_rotbl(fsp)[blk]), sp->f_fstr, 14);
1864         return (0);
1865 }
1866 
1867 /*
1868  * Flush any pending I/O to file system vfsp.
1869  * The ufs_update() routine will only flush *all* ufs files.
1870  * If vfsp is non-NULL, only sync this ufs (in preparation
1871  * for a umount).
1872  */
1873 /*ARGSUSED*/
1874 static int
1875 ufs_sync(struct vfs *vfsp, short flag, struct cred *cr)
1876 {
1877         struct ufsvfs *ufsvfsp;
1878         struct fs *fs;
1879         int cheap = flag & SYNC_ATTR;
1880         int error;
1881 
1882         /*
1883          * SYNC_CLOSE means we're rebooting.  Toss everything
1884          * on the idle queue so we don't have to slog through
1885          * a bunch of uninteresting inodes over and over again.
1886          */
1887         if (flag & SYNC_CLOSE)
1888                 ufs_idle_drain(NULL);
1889 
1890         if (vfsp == NULL) {
1891                 ufs_update(flag);
1892                 return (0);
1893         }
1894 
1895         /* Flush a single ufs */
1896         if (!vfs_matchops(vfsp, ufs_vfsops) || vfs_lock(vfsp) != 0)
1897                 return (0);
1898 
1899         ufsvfsp = (struct ufsvfs *)vfsp->vfs_data;
1900         if (!ufsvfsp)
1901                 return (EIO);
1902         fs = ufsvfsp->vfs_fs;
1903         mutex_enter(&ufsvfsp->vfs_lock);
1904 
1905         if (ufsvfsp->vfs_dio &&
1906             fs->fs_ronly == 0 &&
1907             fs->fs_clean != FSBAD &&
1908             fs->fs_clean != FSLOG) {
1909                 /* turn off fast-io on unmount, so no fsck needed (4029401) */
1910                 ufsvfsp->vfs_dio = 0;
1911                 fs->fs_clean = FSACTIVE;
1912                 fs->fs_fmod = 1;
1913         }
1914 
1915         /* Write back modified superblock */
1916         if (fs->fs_fmod == 0) {
1917                 mutex_exit(&ufsvfsp->vfs_lock);
1918         } else {
1919                 if (fs->fs_ronly != 0) {
1920                         mutex_exit(&ufsvfsp->vfs_lock);
1921                         vfs_unlock(vfsp);
1922                         return (ufs_fault(ufsvfsp->vfs_root,
1923                             "fs = %s update: ro fs mod\n", fs->fs_fsmnt));
1924                 }
1925                 fs->fs_fmod = 0;
1926                 mutex_exit(&ufsvfsp->vfs_lock);
1927 
1928                 TRANS_SBUPDATE(ufsvfsp, vfsp, TOP_SBUPDATE_UPDATE);
1929         }
1930         vfs_unlock(vfsp);
1931 
1932         /*
1933          * Avoid racing with ufs_update() and ufs_unmount().
1934          *
1935          */
1936         mutex_enter(&ufs_scan_lock);
1937 
1938         (void) ufs_scan_inodes(1, ufs_sync_inode,
1939             (void *)(uintptr_t)cheap, ufsvfsp);
1940 
1941         mutex_exit(&ufs_scan_lock);
1942 
1943         bflush((dev_t)vfsp->vfs_dev);
1944 
1945         /*
1946          * commit any outstanding async transactions
1947          */
1948         curthread->t_flag |= T_DONTBLOCK;
1949         TRANS_BEGIN_SYNC(ufsvfsp, TOP_COMMIT_UPDATE, TOP_COMMIT_SIZE, error);
1950         if (!error) {
1951                 TRANS_END_SYNC(ufsvfsp, error, TOP_COMMIT_UPDATE,
1952                     TOP_COMMIT_SIZE);
1953         }
1954         curthread->t_flag &= ~T_DONTBLOCK;
1955 
1956         return (0);
1957 }
1958 
1959 
1960 void
1961 sbupdate(struct vfs *vfsp)
1962 {
1963         struct ufsvfs *ufsvfsp = (struct ufsvfs *)vfsp->vfs_data;
1964         struct fs *fs = ufsvfsp->vfs_fs;
1965         struct buf *bp;
1966         int blks;
1967         caddr_t space;
1968         int i;
1969         size_t size;
1970 
1971         /*
1972          * for ulockfs processing, limit the superblock writes
1973          */
1974         if ((ufsvfsp->vfs_ulockfs.ul_sbowner) &&
1975             (curthread != ufsvfsp->vfs_ulockfs.ul_sbowner)) {
1976                 /* process later */
1977                 fs->fs_fmod = 1;
1978                 return;
1979         }
1980         ULOCKFS_SET_MOD((&ufsvfsp->vfs_ulockfs));
1981 
1982         if (TRANS_ISTRANS(ufsvfsp)) {
1983                 mutex_enter(&ufsvfsp->vfs_lock);
1984                 ufs_sbwrite(ufsvfsp);
1985                 mutex_exit(&ufsvfsp->vfs_lock);
1986                 return;
1987         }
1988 
1989         blks = howmany(fs->fs_cssize, fs->fs_fsize);
1990         space = (caddr_t)fs->fs_u.fs_csp;
1991         for (i = 0; i < blks; i += fs->fs_frag) {
1992                 size = fs->fs_bsize;
1993                 if (i + fs->fs_frag > blks)
1994                         size = (blks - i) * fs->fs_fsize;
1995                 bp = UFS_GETBLK(ufsvfsp, ufsvfsp->vfs_dev,
1996                     (daddr_t)(fsbtodb(fs, fs->fs_csaddr + i)),
1997                     fs->fs_bsize);
1998                 bcopy(space, bp->b_un.b_addr, size);
1999                 space += size;
2000                 bp->b_bcount = size;
2001                 UFS_BRWRITE(ufsvfsp, bp);
2002         }
2003         mutex_enter(&ufsvfsp->vfs_lock);
2004         ufs_sbwrite(ufsvfsp);
2005         mutex_exit(&ufsvfsp->vfs_lock);
2006 }
2007 
2008 int ufs_vget_idle_count = 2;    /* Number of inodes to idle each time */
2009 static int
2010 ufs_vget(struct vfs *vfsp, struct vnode **vpp, struct fid *fidp)
2011 {
2012         int error = 0;
2013         struct ufid *ufid;
2014         struct inode *ip;
2015         struct ufsvfs *ufsvfsp = (struct ufsvfs *)vfsp->vfs_data;
2016         struct ulockfs *ulp;
2017 
2018         /*
2019          * Check for unmounted filesystem.
2020          */
2021         if (vfsp->vfs_flag & VFS_UNMOUNTED) {
2022                 error = EIO;
2023                 goto errout;
2024         }
2025 
2026         /*
2027          * Keep the idle queue from getting too long by
2028          * idling an inode before attempting to allocate another.
2029          *    This operation must be performed before entering
2030          *    lockfs or a transaction.
2031          */
2032         if (ufs_idle_q.uq_ne > ufs_idle_q.uq_hiwat)
2033                 if ((curthread->t_flag & T_DONTBLOCK) == 0) {
2034                         ins.in_vidles.value.ul += ufs_vget_idle_count;
2035                         ufs_idle_some(ufs_vget_idle_count);
2036                 }
2037 
2038         ufid = (struct ufid *)fidp;
2039 
2040         if (error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_VGET_MASK))
2041                 goto errout;
2042 
2043         rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
2044 
2045         error = ufs_iget(vfsp, ufid->ufid_ino, &ip, CRED());
2046 
2047         rw_exit(&ufsvfsp->vfs_dqrwlock);
2048 
2049         ufs_lockfs_end(ulp);
2050 
2051         if (error)
2052                 goto errout;
2053 
2054         /*
2055          * Check if the inode has been deleted or freed or is in transient state
2056          * since the last VFS_VGET() request for it, release it and don't return
2057          * it to the caller, presumably NFS, as it's no longer valid.
2058          */
2059         if (ip->i_gen != ufid->ufid_gen || ip->i_mode == 0 ||
2060             (ip->i_nlink <= 0)) {
2061                 VN_RELE(ITOV(ip));
2062                 error = EINVAL;
2063                 goto errout;
2064         }
2065 
2066         *vpp = ITOV(ip);
2067         return (0);
2068 
2069 errout:
2070         *vpp = NULL;
2071         return (error);
2072 }
2073 
2074 static int
2075 ufsinit(int fstype, char *name)
2076 {
2077         static const fs_operation_def_t ufs_vfsops_template[] = {
2078                 VFSNAME_MOUNT,          { .vfs_mount = ufs_mount },
2079                 VFSNAME_UNMOUNT,        { .vfs_unmount = ufs_unmount },
2080                 VFSNAME_ROOT,           { .vfs_root = ufs_root },
2081                 VFSNAME_STATVFS,        { .vfs_statvfs = ufs_statvfs },
2082                 VFSNAME_SYNC,           { .vfs_sync = ufs_sync },
2083                 VFSNAME_VGET,           { .vfs_vget = ufs_vget },
2084                 VFSNAME_MOUNTROOT,      { .vfs_mountroot = ufs_mountroot },
2085                 NULL,                   NULL
2086         };
2087         int error;
2088 
2089         ufsfstype = fstype;
2090 
2091         error = vfs_setfsops(fstype, ufs_vfsops_template, &ufs_vfsops);
2092         if (error != 0) {
2093                 cmn_err(CE_WARN, "ufsinit: bad vfs ops template");
2094                 return (error);
2095         }
2096 
2097         error = vn_make_ops(name, ufs_vnodeops_template, &ufs_vnodeops);
2098         if (error != 0) {
2099                 (void) vfs_freevfsops_by_type(fstype);
2100                 cmn_err(CE_WARN, "ufsinit: bad vnode ops template");
2101                 return (error);
2102         }
2103 
2104         ufs_iinit();
2105         return (0);
2106 }