Print this page
    
OS-5148 ftruncate at offset should emit proper events
Reviewed by: Bryan Cantrill <bryan@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
OS-5291 lxbrand inotify02 LTP regression
Reviewed by: Patrick Mooney <patrick.mooney@joyent.com>
OS-3294 add support for inotify
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/common/fs/tmpfs/tmp_vnops.c
          +++ new/usr/src/uts/common/fs/tmpfs/tmp_vnops.c
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  
    | 
      ↓ open down ↓ | 
    17 lines elided | 
    
      ↑ open up ↑ | 
  
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  
  22   22  /*
  23   23   * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  24   24   * Use is subject to license terms.
  25   25   */
  26   26  
  27   27  /*
  28      - * Copyright (c) 2015, Joyent, Inc. All rights reserved.
       28 + * Copyright 2016, Joyent, Inc.
  29   29   * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
  30   30   * Copyright 2016 RackTop Systems.
  31   31   */
  32   32  
  33   33  #include <sys/types.h>
  34   34  #include <sys/param.h>
  35   35  #include <sys/t_lock.h>
  36   36  #include <sys/systm.h>
  37   37  #include <sys/sysmacros.h>
  38   38  #include <sys/user.h>
  39   39  #include <sys/time.h>
  40   40  #include <sys/vfs.h>
  41   41  #include <sys/vfs_opreg.h>
  42   42  #include <sys/vnode.h>
  43   43  #include <sys/file.h>
  44   44  #include <sys/fcntl.h>
  45   45  #include <sys/flock.h>
  46   46  #include <sys/kmem.h>
  47   47  #include <sys/uio.h>
  48   48  #include <sys/errno.h>
  49   49  #include <sys/stat.h>
  50   50  #include <sys/cred.h>
  51   51  #include <sys/dirent.h>
  52   52  #include <sys/pathname.h>
  53   53  #include <sys/vmsystm.h>
  54   54  #include <sys/fs/tmp.h>
  55   55  #include <sys/fs/tmpnode.h>
  56   56  #include <sys/mman.h>
  57   57  #include <vm/hat.h>
  58   58  #include <vm/seg_vn.h>
  59   59  #include <vm/seg_map.h>
  60   60  #include <vm/seg.h>
  61   61  #include <vm/anon.h>
  62   62  #include <vm/as.h>
  63   63  #include <vm/page.h>
  64   64  #include <vm/pvn.h>
  65   65  #include <sys/cmn_err.h>
  66   66  #include <sys/debug.h>
  67   67  #include <sys/swap.h>
  68   68  #include <sys/buf.h>
  69   69  #include <sys/vm.h>
  70   70  #include <sys/vtrace.h>
  71   71  #include <sys/policy.h>
  72   72  #include <fs/fs_subr.h>
  73   73  
  74   74  static int      tmp_getapage(struct vnode *, u_offset_t, size_t, uint_t *,
  75   75          page_t **, size_t, struct seg *, caddr_t, enum seg_rw, struct cred *);
  76   76  static int      tmp_putapage(struct vnode *, page_t *, u_offset_t *, size_t *,
  77   77          int, struct cred *);
  78   78  
  79   79  /* ARGSUSED1 */
  80   80  static int
  81   81  tmp_open(struct vnode **vpp, int flag, struct cred *cred, caller_context_t *ct)
  82   82  {
  83   83          /*
  84   84           * swapon to a tmpfs file is not supported so access
  85   85           * is denied on open if VISSWAP is set.
  86   86           */
  87   87          if ((*vpp)->v_flag & VISSWAP)
  88   88                  return (EINVAL);
  89   89          return (0);
  90   90  }
  91   91  
  92   92  /* ARGSUSED1 */
  93   93  static int
  94   94  tmp_close(
  95   95          struct vnode *vp,
  96   96          int flag,
  97   97          int count,
  98   98          offset_t offset,
  99   99          struct cred *cred,
 100  100          caller_context_t *ct)
 101  101  {
 102  102          cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
 103  103          cleanshares(vp, ttoproc(curthread)->p_pid);
 104  104          return (0);
 105  105  }
 106  106  
 107  107  /*
 108  108   * wrtmp does the real work of write requests for tmpfs.
 109  109   */
 110  110  static int
 111  111  wrtmp(
 112  112          struct tmount *tm,
 113  113          struct tmpnode *tp,
 114  114          struct uio *uio,
 115  115          struct cred *cr,
 116  116          struct caller_context *ct)
 117  117  {
 118  118          pgcnt_t pageoffset;     /* offset in pages */
 119  119          ulong_t segmap_offset;  /* pagesize byte offset into segmap */
 120  120          caddr_t base;           /* base of segmap */
 121  121          ssize_t bytes;          /* bytes to uiomove */
 122  122          pfn_t pagenumber;       /* offset in pages into tmp file */
 123  123          struct vnode *vp;
 124  124          int error = 0;
 125  125          int     pagecreate;     /* == 1 if we allocated a page */
 126  126          int     newpage;
 127  127          rlim64_t limit = uio->uio_llimit;
 128  128          long oresid = uio->uio_resid;
 129  129          timestruc_t now;
 130  130  
 131  131          long tn_size_changed = 0;
 132  132          long old_tn_size;
 133  133          long new_tn_size;
 134  134  
 135  135          vp = TNTOV(tp);
 136  136          ASSERT(vp->v_type == VREG);
 137  137  
 138  138          TRACE_1(TR_FAC_TMPFS, TR_TMPFS_RWTMP_START,
 139  139              "tmp_wrtmp_start:vp %p", vp);
 140  140  
 141  141          ASSERT(RW_WRITE_HELD(&tp->tn_contents));
 142  142          ASSERT(RW_WRITE_HELD(&tp->tn_rwlock));
 143  143  
 144  144          if (MANDLOCK(vp, tp->tn_mode)) {
 145  145                  rw_exit(&tp->tn_contents);
 146  146                  /*
 147  147                   * tmp_getattr ends up being called by chklock
 148  148                   */
 149  149                  error = chklock(vp, FWRITE, uio->uio_loffset, uio->uio_resid,
 150  150                      uio->uio_fmode, ct);
 151  151                  rw_enter(&tp->tn_contents, RW_WRITER);
 152  152                  if (error != 0) {
 153  153                          TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
 154  154                              "tmp_wrtmp_end:vp %p error %d", vp, error);
 155  155                          return (error);
 156  156                  }
 157  157          }
 158  158  
 159  159          if (uio->uio_loffset < 0)
 160  160                  return (EINVAL);
 161  161  
 162  162          if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
 163  163                  limit = MAXOFFSET_T;
 164  164  
 165  165          if (uio->uio_loffset >= limit) {
 166  166                  proc_t *p = ttoproc(curthread);
 167  167  
 168  168                  mutex_enter(&p->p_lock);
 169  169                  (void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE], p->p_rctls,
 170  170                      p, RCA_UNSAFE_SIGINFO);
 171  171                  mutex_exit(&p->p_lock);
 172  172                  return (EFBIG);
 173  173          }
 174  174  
 175  175          if (uio->uio_loffset >= MAXOFF_T) {
 176  176                  TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
 177  177                      "tmp_wrtmp_end:vp %p error %d", vp, EINVAL);
 178  178                  return (EFBIG);
 179  179          }
 180  180  
 181  181          if (uio->uio_resid == 0) {
 182  182                  TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
 183  183                      "tmp_wrtmp_end:vp %p error %d", vp, 0);
 184  184                  return (0);
 185  185          }
 186  186  
 187  187          if (limit > MAXOFF_T)
 188  188                  limit = MAXOFF_T;
 189  189  
 190  190          do {
 191  191                  long    offset;
 192  192                  long    delta;
 193  193  
 194  194                  offset = (long)uio->uio_offset;
 195  195                  pageoffset = offset & PAGEOFFSET;
 196  196                  /*
 197  197                   * A maximum of PAGESIZE bytes of data is transferred
 198  198                   * each pass through this loop
 199  199                   */
 200  200                  bytes = MIN(PAGESIZE - pageoffset, uio->uio_resid);
 201  201  
 202  202                  if (offset + bytes >= limit) {
 203  203                          if (offset >= limit) {
 204  204                                  error = EFBIG;
 205  205                                  goto out;
 206  206                          }
 207  207                          bytes = limit - offset;
 208  208                  }
 209  209                  pagenumber = btop(offset);
 210  210  
 211  211                  /*
 212  212                   * delta is the amount of anonymous memory
 213  213                   * to reserve for the file.
 214  214                   * We always reserve in pagesize increments so
 215  215                   * unless we're extending the file into a new page,
 216  216                   * we don't need to call tmp_resv.
 217  217                   */
 218  218                  delta = offset + bytes -
 219  219                      P2ROUNDUP_TYPED(tp->tn_size, PAGESIZE, u_offset_t);
 220  220                  if (delta > 0) {
 221  221                          pagecreate = 1;
 222  222                          if (tmp_resv(tm, tp, delta, pagecreate)) {
 223  223                                  /*
 224  224                                   * Log file system full in the zone that owns
 225  225                                   * the tmpfs mount, as well as in the global
 226  226                                   * zone if necessary.
 227  227                                   */
 228  228                                  zcmn_err(tm->tm_vfsp->vfs_zone->zone_id,
 229  229                                      CE_WARN, "%s: File system full, "
 230  230                                      "swap space limit exceeded",
 231  231                                      tm->tm_mntpath);
 232  232  
 233  233                                  if (tm->tm_vfsp->vfs_zone->zone_id !=
 234  234                                      GLOBAL_ZONEID) {
 235  235  
 236  236                                          vfs_t *vfs = tm->tm_vfsp;
 237  237  
 238  238                                          zcmn_err(GLOBAL_ZONEID,
 239  239                                              CE_WARN, "%s: File system full, "
 240  240                                              "swap space limit exceeded",
 241  241                                              vfs->vfs_vnodecovered->v_path);
 242  242                                  }
 243  243                                  error = ENOSPC;
 244  244                                  break;
 245  245                          }
 246  246                          tmpnode_growmap(tp, (ulong_t)offset + bytes);
 247  247                  }
 248  248                  /* grow the file to the new length */
 249  249                  if (offset + bytes > tp->tn_size) {
 250  250                          tn_size_changed = 1;
 251  251                          old_tn_size = tp->tn_size;
 252  252                          /*
 253  253                           * Postpone updating tp->tn_size until uiomove() is
 254  254                           * done.
 255  255                           */
 256  256                          new_tn_size = offset + bytes;
 257  257                  }
 258  258                  if (bytes == PAGESIZE) {
 259  259                          /*
 260  260                           * Writing whole page so reading from disk
 261  261                           * is a waste
 262  262                           */
 263  263                          pagecreate = 1;
 264  264                  } else {
 265  265                          pagecreate = 0;
 266  266                  }
 267  267                  /*
 268  268                   * If writing past EOF or filling in a hole
 269  269                   * we need to allocate an anon slot.
 270  270                   */
 271  271                  if (anon_get_ptr(tp->tn_anon, pagenumber) == NULL) {
 272  272                          (void) anon_set_ptr(tp->tn_anon, pagenumber,
 273  273                              anon_alloc(vp, ptob(pagenumber)), ANON_SLEEP);
 274  274                          pagecreate = 1;
 275  275                          tp->tn_nblocks++;
 276  276                  }
 277  277  
 278  278                  /*
 279  279                   * We have to drop the contents lock to allow the VM
 280  280                   * system to reacquire it in tmp_getpage()
 281  281                   */
 282  282                  rw_exit(&tp->tn_contents);
 283  283  
 284  284                  /*
 285  285                   * Touch the page and fault it in if it is not in core
 286  286                   * before segmap_getmapflt or vpm_data_copy can lock it.
 287  287                   * This is to avoid the deadlock if the buffer is mapped
 288  288                   * to the same file through mmap which we want to write.
 289  289                   */
 290  290                  uio_prefaultpages((long)bytes, uio);
 291  291  
 292  292                  newpage = 0;
 293  293                  if (vpm_enable) {
 294  294                          /*
 295  295                           * Copy data. If new pages are created, part of
 296  296                           * the page that is not written will be initizliazed
 297  297                           * with zeros.
 298  298                           */
 299  299                          error = vpm_data_copy(vp, offset, bytes, uio,
 300  300                              !pagecreate, &newpage, 1, S_WRITE);
 301  301                  } else {
 302  302                          /* Get offset within the segmap mapping */
 303  303                          segmap_offset = (offset & PAGEMASK) & MAXBOFFSET;
 304  304                          base = segmap_getmapflt(segkmap, vp,
 305  305                              (offset &  MAXBMASK), PAGESIZE, !pagecreate,
 306  306                              S_WRITE);
 307  307                  }
 308  308  
 309  309  
 310  310                  if (!vpm_enable && pagecreate) {
 311  311                          /*
 312  312                           * segmap_pagecreate() returns 1 if it calls
 313  313                           * page_create_va() to allocate any pages.
 314  314                           */
 315  315                          newpage = segmap_pagecreate(segkmap,
 316  316                              base + segmap_offset, (size_t)PAGESIZE, 0);
 317  317                          /*
 318  318                           * Clear from the beginning of the page to the starting
 319  319                           * offset of the data.
 320  320                           */
 321  321                          if (pageoffset != 0)
 322  322                                  (void) kzero(base + segmap_offset,
 323  323                                      (size_t)pageoffset);
 324  324                  }
 325  325  
 326  326                  if (!vpm_enable) {
 327  327                          error = uiomove(base + segmap_offset + pageoffset,
 328  328                              (long)bytes, UIO_WRITE, uio);
 329  329                  }
 330  330  
 331  331                  if (!vpm_enable && pagecreate &&
 332  332                      uio->uio_offset < P2ROUNDUP(offset + bytes, PAGESIZE)) {
 333  333                          long    zoffset; /* zero from offset into page */
 334  334                          /*
 335  335                           * We created pages w/o initializing them completely,
 336  336                           * thus we need to zero the part that wasn't set up.
 337  337                           * This happens on most EOF write cases and if
 338  338                           * we had some sort of error during the uiomove.
 339  339                           */
 340  340                          long nmoved;
 341  341  
 342  342                          nmoved = uio->uio_offset - offset;
 343  343                          ASSERT((nmoved + pageoffset) <= PAGESIZE);
 344  344  
 345  345                          /*
 346  346                           * Zero from the end of data in the page to the
 347  347                           * end of the page.
 348  348                           */
 349  349                          if ((zoffset = pageoffset + nmoved) < PAGESIZE)
 350  350                                  (void) kzero(base + segmap_offset + zoffset,
 351  351                                      (size_t)PAGESIZE - zoffset);
 352  352                  }
 353  353  
 354  354                  /*
 355  355                   * Unlock the pages which have been allocated by
 356  356                   * page_create_va() in segmap_pagecreate()
 357  357                   */
 358  358                  if (!vpm_enable && newpage) {
 359  359                          segmap_pageunlock(segkmap, base + segmap_offset,
 360  360                              (size_t)PAGESIZE, S_WRITE);
 361  361                  }
 362  362  
 363  363                  if (error) {
 364  364                          /*
 365  365                           * If we failed on a write, we must
 366  366                           * be sure to invalidate any pages that may have
 367  367                           * been allocated.
 368  368                           */
 369  369                          if (vpm_enable) {
 370  370                                  (void) vpm_sync_pages(vp, offset, PAGESIZE,
 371  371                                      SM_INVAL);
 372  372                          } else {
 373  373                                  (void) segmap_release(segkmap, base, SM_INVAL);
 374  374                          }
 375  375                  } else {
 376  376                          if (vpm_enable) {
 377  377                                  error = vpm_sync_pages(vp, offset, PAGESIZE,
 378  378                                      0);
 379  379                          } else {
 380  380                                  error = segmap_release(segkmap, base, 0);
 381  381                          }
 382  382                  }
 383  383  
 384  384                  /*
 385  385                   * Re-acquire contents lock.
 386  386                   */
 387  387                  rw_enter(&tp->tn_contents, RW_WRITER);
 388  388  
 389  389                  /*
 390  390                   * Update tn_size.
 391  391                   */
 392  392                  if (tn_size_changed)
 393  393                          tp->tn_size = new_tn_size;
 394  394  
 395  395                  /*
 396  396                   * If the uiomove failed, fix up tn_size.
 397  397                   */
 398  398                  if (error) {
 399  399                          if (tn_size_changed) {
 400  400                                  /*
 401  401                                   * The uiomove failed, and we
 402  402                                   * allocated blocks,so get rid
 403  403                                   * of them.
 404  404                                   */
 405  405                                  (void) tmpnode_trunc(tm, tp,
 406  406                                      (ulong_t)old_tn_size);
 407  407                          }
 408  408                  } else {
 409  409                          /*
 410  410                           * XXX - Can this be out of the loop?
 411  411                           */
 412  412                          if ((tp->tn_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) &&
 413  413                              (tp->tn_mode & (S_ISUID | S_ISGID)) &&
 414  414                              secpolicy_vnode_setid_retain(cr,
 415  415                              (tp->tn_mode & S_ISUID) != 0 && tp->tn_uid == 0)) {
 416  416                                  /*
 417  417                                   * Clear Set-UID & Set-GID bits on
 418  418                                   * successful write if not privileged
 419  419                                   * and at least one of the execute bits
 420  420                                   * is set.  If we always clear Set-GID,
 421  421                                   * mandatory file and record locking is
 422  422                                   * unuseable.
 423  423                                   */
 424  424                                  tp->tn_mode &= ~(S_ISUID | S_ISGID);
 425  425                          }
 426  426                          gethrestime(&now);
 427  427                          tp->tn_mtime = now;
 428  428                          tp->tn_ctime = now;
 429  429                  }
 430  430          } while (error == 0 && uio->uio_resid > 0 && bytes != 0);
 431  431  
 432  432  out:
 433  433          /*
 434  434           * If we've already done a partial-write, terminate
 435  435           * the write but return no error.
 436  436           */
 437  437          if (oresid != uio->uio_resid)
 438  438                  error = 0;
 439  439          TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
 440  440              "tmp_wrtmp_end:vp %p error %d", vp, error);
 441  441          return (error);
 442  442  }
 443  443  
 444  444  /*
 445  445   * rdtmp does the real work of read requests for tmpfs.
 446  446   */
 447  447  static int
 448  448  rdtmp(
 449  449          struct tmount *tm,
 450  450          struct tmpnode *tp,
 451  451          struct uio *uio,
 452  452          struct caller_context *ct)
 453  453  {
 454  454          ulong_t pageoffset;     /* offset in tmpfs file (uio_offset) */
 455  455          ulong_t segmap_offset;  /* pagesize byte offset into segmap */
 456  456          caddr_t base;           /* base of segmap */
 457  457          ssize_t bytes;          /* bytes to uiomove */
 458  458          struct vnode *vp;
 459  459          int error;
 460  460          long oresid = uio->uio_resid;
 461  461  
 462  462  #if defined(lint)
 463  463          tm = tm;
 464  464  #endif
 465  465          vp = TNTOV(tp);
 466  466  
 467  467          TRACE_1(TR_FAC_TMPFS, TR_TMPFS_RWTMP_START, "tmp_rdtmp_start:vp %p",
 468  468              vp);
 469  469  
 470  470          ASSERT(RW_LOCK_HELD(&tp->tn_contents));
 471  471  
 472  472          if (MANDLOCK(vp, tp->tn_mode)) {
 473  473                  rw_exit(&tp->tn_contents);
 474  474                  /*
 475  475                   * tmp_getattr ends up being called by chklock
 476  476                   */
 477  477                  error = chklock(vp, FREAD, uio->uio_loffset, uio->uio_resid,
 478  478                      uio->uio_fmode, ct);
 479  479                  rw_enter(&tp->tn_contents, RW_READER);
 480  480                  if (error != 0) {
 481  481                          TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
 482  482                              "tmp_rdtmp_end:vp %p error %d", vp, error);
 483  483                          return (error);
 484  484                  }
 485  485          }
 486  486          ASSERT(tp->tn_type == VREG);
 487  487  
 488  488          if (uio->uio_loffset >= MAXOFF_T) {
 489  489                  TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
 490  490                      "tmp_rdtmp_end:vp %p error %d", vp, EINVAL);
 491  491                  return (0);
 492  492          }
 493  493          if (uio->uio_loffset < 0)
 494  494                  return (EINVAL);
 495  495          if (uio->uio_resid == 0) {
 496  496                  TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
 497  497                      "tmp_rdtmp_end:vp %p error %d", vp, 0);
 498  498                  return (0);
 499  499          }
 500  500  
 501  501          vp = TNTOV(tp);
 502  502  
 503  503          do {
 504  504                  long diff;
 505  505                  long offset;
 506  506  
 507  507                  offset = uio->uio_offset;
 508  508                  pageoffset = offset & PAGEOFFSET;
 509  509                  bytes = MIN(PAGESIZE - pageoffset, uio->uio_resid);
 510  510  
 511  511                  diff = tp->tn_size - offset;
 512  512  
 513  513                  if (diff <= 0) {
 514  514                          error = 0;
 515  515                          goto out;
 516  516                  }
 517  517                  if (diff < bytes)
 518  518                          bytes = diff;
 519  519  
 520  520                  /*
 521  521                   * We have to drop the contents lock to allow the VM system
 522  522                   * to reacquire it in tmp_getpage() should the uiomove cause a
 523  523                   * pagefault.
 524  524                   */
 525  525                  rw_exit(&tp->tn_contents);
 526  526  
 527  527                  if (vpm_enable) {
 528  528                          /*
 529  529                           * Copy data.
 530  530                           */
 531  531                          error = vpm_data_copy(vp, offset, bytes, uio, 1, NULL,
 532  532                              0, S_READ);
 533  533                  } else {
 534  534                          segmap_offset = (offset & PAGEMASK) & MAXBOFFSET;
 535  535                          base = segmap_getmapflt(segkmap, vp, offset & MAXBMASK,
 536  536                              bytes, 1, S_READ);
 537  537  
 538  538                          error = uiomove(base + segmap_offset + pageoffset,
 539  539                              (long)bytes, UIO_READ, uio);
 540  540                  }
 541  541  
 542  542                  if (error) {
 543  543                          if (vpm_enable) {
 544  544                                  (void) vpm_sync_pages(vp, offset, PAGESIZE, 0);
 545  545                          } else {
 546  546                                  (void) segmap_release(segkmap, base, 0);
 547  547                          }
 548  548                  } else {
 549  549                          if (vpm_enable) {
 550  550                                  error = vpm_sync_pages(vp, offset, PAGESIZE,
 551  551                                      0);
 552  552                          } else {
 553  553                                  error = segmap_release(segkmap, base, 0);
 554  554                          }
 555  555                  }
 556  556  
 557  557                  /*
 558  558                   * Re-acquire contents lock.
 559  559                   */
 560  560                  rw_enter(&tp->tn_contents, RW_READER);
 561  561  
 562  562          } while (error == 0 && uio->uio_resid > 0);
 563  563  
 564  564  out:
 565  565          gethrestime(&tp->tn_atime);
 566  566  
 567  567          /*
 568  568           * If we've already done a partial read, terminate
 569  569           * the read but return no error.
 570  570           */
 571  571          if (oresid != uio->uio_resid)
 572  572                  error = 0;
 573  573  
 574  574          TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
 575  575              "tmp_rdtmp_end:vp %x error %d", vp, error);
 576  576          return (error);
 577  577  }
 578  578  
 579  579  /* ARGSUSED2 */
 580  580  static int
 581  581  tmp_read(struct vnode *vp, struct uio *uiop, int ioflag, cred_t *cred,
 582  582      struct caller_context *ct)
 583  583  {
 584  584          struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
 585  585          struct tmount *tm = (struct tmount *)VTOTM(vp);
 586  586          int error;
 587  587  
 588  588          /*
 589  589           * We don't currently support reading non-regular files
 590  590           */
 591  591          if (vp->v_type == VDIR)
 592  592                  return (EISDIR);
 593  593          if (vp->v_type != VREG)
 594  594                  return (EINVAL);
 595  595          /*
 596  596           * tmp_rwlock should have already been called from layers above
 597  597           */
 598  598          ASSERT(RW_READ_HELD(&tp->tn_rwlock));
 599  599  
 600  600          rw_enter(&tp->tn_contents, RW_READER);
 601  601  
 602  602          error = rdtmp(tm, tp, uiop, ct);
 603  603  
 604  604          rw_exit(&tp->tn_contents);
 605  605  
 606  606          return (error);
 607  607  }
 608  608  
 609  609  static int
 610  610  tmp_write(struct vnode *vp, struct uio *uiop, int ioflag, struct cred *cred,
 611  611      struct caller_context *ct)
 612  612  {
 613  613          struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
 614  614          struct tmount *tm = (struct tmount *)VTOTM(vp);
 615  615          int error;
 616  616  
 617  617          /*
 618  618           * We don't currently support writing to non-regular files
 619  619           */
 620  620          if (vp->v_type != VREG)
 621  621                  return (EINVAL);        /* XXX EISDIR? */
 622  622  
 623  623          /*
 624  624           * tmp_rwlock should have already been called from layers above
 625  625           */
 626  626          ASSERT(RW_WRITE_HELD(&tp->tn_rwlock));
 627  627  
 628  628          rw_enter(&tp->tn_contents, RW_WRITER);
 629  629  
 630  630          if (ioflag & FAPPEND) {
 631  631                  /*
 632  632                   * In append mode start at end of file.
 633  633                   */
 634  634                  uiop->uio_loffset = tp->tn_size;
 635  635          }
 636  636  
 637  637          error = wrtmp(tm, tp, uiop, cred, ct);
 638  638  
 639  639          rw_exit(&tp->tn_contents);
 640  640  
 641  641          return (error);
 642  642  }
 643  643  
 644  644  /* ARGSUSED */
 645  645  static int
 646  646  tmp_ioctl(
 647  647          struct vnode *vp,
 648  648          int com,
 649  649          intptr_t data,
 650  650          int flag,
 651  651          struct cred *cred,
 652  652          int *rvalp,
 653  653          caller_context_t *ct)
 654  654  {
 655  655          return (ENOTTY);
 656  656  }
 657  657  
 658  658  /* ARGSUSED2 */
 659  659  static int
 660  660  tmp_getattr(
 661  661          struct vnode *vp,
 662  662          struct vattr *vap,
 663  663          int flags,
 664  664          struct cred *cred,
 665  665          caller_context_t *ct)
 666  666  {
 667  667          struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
 668  668          struct vnode *mvp;
 669  669          struct vattr va;
 670  670          int attrs = 1;
 671  671  
 672  672          /*
 673  673           * A special case to handle the root tnode on a diskless nfs
 674  674           * client who may have had its uid and gid inherited
 675  675           * from an nfs vnode with nobody ownership.  Likely the
 676  676           * root filesystem. After nfs is fully functional the uid/gid
 677  677           * may be mapable so ask again.
 678  678           * vfsp can't get unmounted because we hold vp.
 679  679           */
 680  680          if (vp->v_flag & VROOT &&
 681  681              (mvp = vp->v_vfsp->vfs_vnodecovered) != NULL) {
 682  682                  mutex_enter(&tp->tn_tlock);
 683  683                  if (tp->tn_uid == UID_NOBODY || tp->tn_gid == GID_NOBODY) {
 684  684                          mutex_exit(&tp->tn_tlock);
 685  685                          bzero(&va, sizeof (struct vattr));
 686  686                          va.va_mask = AT_UID|AT_GID;
 687  687                          attrs = VOP_GETATTR(mvp, &va, 0, cred, ct);
 688  688                  } else {
 689  689                          mutex_exit(&tp->tn_tlock);
 690  690                  }
 691  691          }
 692  692          mutex_enter(&tp->tn_tlock);
 693  693          if (attrs == 0) {
 694  694                  tp->tn_uid = va.va_uid;
 695  695                  tp->tn_gid = va.va_gid;
 696  696          }
 697  697          vap->va_type = vp->v_type;
 698  698          vap->va_mode = tp->tn_mode & MODEMASK;
 699  699          vap->va_uid = tp->tn_uid;
 700  700          vap->va_gid = tp->tn_gid;
 701  701          vap->va_fsid = tp->tn_fsid;
 702  702          vap->va_nodeid = (ino64_t)tp->tn_nodeid;
 703  703          vap->va_nlink = tp->tn_nlink;
 704  704          vap->va_size = (u_offset_t)tp->tn_size;
 705  705          vap->va_atime = tp->tn_atime;
 706  706          vap->va_mtime = tp->tn_mtime;
 707  707          vap->va_ctime = tp->tn_ctime;
 708  708          vap->va_blksize = PAGESIZE;
 709  709          vap->va_rdev = tp->tn_rdev;
 710  710          vap->va_seq = tp->tn_seq;
 711  711  
 712  712          /*
 713  713           * XXX Holes are not taken into account.  We could take the time to
 714  714           * run through the anon array looking for allocated slots...
 715  715           */
 716  716          vap->va_nblocks = (fsblkcnt64_t)btodb(ptob(btopr(vap->va_size)));
 717  717          mutex_exit(&tp->tn_tlock);
 718  718          return (0);
 719  719  }
 720  720  
 721  721  /*ARGSUSED4*/
 722  722  static int
 723  723  tmp_setattr(
 724  724          struct vnode *vp,
 725  725          struct vattr *vap,
 726  726          int flags,
 727  727          struct cred *cred,
 728  728          caller_context_t *ct)
 729  729  {
 730  730          struct tmount *tm = (struct tmount *)VTOTM(vp);
 731  731          struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
 732  732          int error = 0;
 733  733          struct vattr *get;
 734  734          long mask;
 735  735  
 736  736          /*
 737  737           * Cannot set these attributes
 738  738           */
 739  739          if ((vap->va_mask & AT_NOSET) || (vap->va_mask & AT_XVATTR))
 740  740                  return (EINVAL);
 741  741  
 742  742          mutex_enter(&tp->tn_tlock);
 743  743  
 744  744          get = &tp->tn_attr;
 745  745          /*
 746  746           * Change file access modes. Must be owner or have sufficient
 747  747           * privileges.
 748  748           */
 749  749          error = secpolicy_vnode_setattr(cred, vp, vap, get, flags, tmp_taccess,
 750  750              tp);
 751  751  
 752  752          if (error)
 753  753                  goto out;
 754  754  
 755  755          mask = vap->va_mask;
 756  756  
 757  757          if (mask & AT_MODE) {
 758  758                  get->va_mode &= S_IFMT;
 759  759                  get->va_mode |= vap->va_mode & ~S_IFMT;
 760  760          }
 761  761  
 762  762          if (mask & AT_UID)
 763  763                  get->va_uid = vap->va_uid;
 764  764          if (mask & AT_GID)
 765  765                  get->va_gid = vap->va_gid;
 766  766          if (mask & AT_ATIME)
 767  767                  get->va_atime = vap->va_atime;
 768  768          if (mask & AT_MTIME)
 769  769                  get->va_mtime = vap->va_mtime;
 770  770  
 771  771          if (mask & (AT_UID | AT_GID | AT_MODE | AT_MTIME))
 772  772                  gethrestime(&tp->tn_ctime);
 773  773  
 774  774          if (mask & AT_SIZE) {
 775  775                  ASSERT(vp->v_type != VDIR);
 776  776  
 777  777                  /* Don't support large files. */
 778  778                  if (vap->va_size > MAXOFF_T) {
 779  779                          error = EFBIG;
  
    | 
      ↓ open down ↓ | 
    741 lines elided | 
    
      ↑ open up ↑ | 
  
 780  780                          goto out;
 781  781                  }
 782  782                  mutex_exit(&tp->tn_tlock);
 783  783  
 784  784                  rw_enter(&tp->tn_rwlock, RW_WRITER);
 785  785                  rw_enter(&tp->tn_contents, RW_WRITER);
 786  786                  error = tmpnode_trunc(tm, tp, (ulong_t)vap->va_size);
 787  787                  rw_exit(&tp->tn_contents);
 788  788                  rw_exit(&tp->tn_rwlock);
 789  789  
 790      -                if (error == 0 && vap->va_size == 0)
 791      -                        vnevent_truncate(vp, ct);
      790 +                if (error == 0) {
      791 +                        if (vap->va_size == 0) {
      792 +                                vnevent_truncate(vp, ct);
      793 +                        } else {
      794 +                                vnevent_resize(vp, ct);
      795 +                        }
      796 +                }
 792  797  
 793  798                  goto out1;
 794  799          }
 795  800  out:
 796  801          mutex_exit(&tp->tn_tlock);
 797  802  out1:
 798  803          return (error);
 799  804  }
 800  805  
 801  806  /* ARGSUSED2 */
 802  807  static int
 803  808  tmp_access(
 804  809          struct vnode *vp,
 805  810          int mode,
 806  811          int flags,
 807  812          struct cred *cred,
 808  813          caller_context_t *ct)
 809  814  {
 810  815          struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
 811  816          int error;
 812  817  
 813  818          mutex_enter(&tp->tn_tlock);
 814  819          error = tmp_taccess(tp, mode, cred);
 815  820          mutex_exit(&tp->tn_tlock);
 816  821          return (error);
 817  822  }
 818  823  
 819  824  /* ARGSUSED3 */
 820  825  static int
 821  826  tmp_lookup(
 822  827          struct vnode *dvp,
 823  828          char *nm,
 824  829          struct vnode **vpp,
 825  830          struct pathname *pnp,
 826  831          int flags,
 827  832          struct vnode *rdir,
 828  833          struct cred *cred,
 829  834          caller_context_t *ct,
 830  835          int *direntflags,
 831  836          pathname_t *realpnp)
 832  837  {
 833  838          struct tmpnode *tp = (struct tmpnode *)VTOTN(dvp);
 834  839          struct tmpnode *ntp = NULL;
 835  840          int error;
 836  841  
 837  842  
 838  843          /* allow cd into @ dir */
 839  844          if (flags & LOOKUP_XATTR) {
 840  845                  struct tmpnode *xdp;
 841  846                  struct tmount *tm;
 842  847  
 843  848                  /*
 844  849                   * don't allow attributes if not mounted XATTR support
 845  850                   */
 846  851                  if (!(dvp->v_vfsp->vfs_flag & VFS_XATTR))
 847  852                          return (EINVAL);
 848  853  
 849  854                  if (tp->tn_flags & ISXATTR)
 850  855                          /* No attributes on attributes */
 851  856                          return (EINVAL);
 852  857  
 853  858                  rw_enter(&tp->tn_rwlock, RW_WRITER);
 854  859                  if (tp->tn_xattrdp == NULL) {
 855  860                          if (!(flags & CREATE_XATTR_DIR)) {
 856  861                                  rw_exit(&tp->tn_rwlock);
 857  862                                  return (ENOENT);
 858  863                          }
 859  864  
 860  865                          /*
 861  866                           * No attribute directory exists for this
 862  867                           * node - create the attr dir as a side effect
 863  868                           * of this lookup.
 864  869                           */
 865  870  
 866  871                          /*
 867  872                           * Make sure we have adequate permission...
 868  873                           */
 869  874  
 870  875                          if ((error = tmp_taccess(tp, VWRITE, cred)) != 0) {
 871  876                                  rw_exit(&tp->tn_rwlock);
 872  877                                  return (error);
 873  878                          }
 874  879  
 875  880                          xdp = tmp_memalloc(sizeof (struct tmpnode),
 876  881                              TMP_MUSTHAVE);
 877  882                          tm = VTOTM(dvp);
 878  883                          tmpnode_init(tm, xdp, &tp->tn_attr, NULL);
 879  884                          /*
 880  885                           * Fix-up fields unique to attribute directories.
 881  886                           */
 882  887                          xdp->tn_flags = ISXATTR;
 883  888                          xdp->tn_type = VDIR;
 884  889                          if (tp->tn_type == VDIR) {
 885  890                                  xdp->tn_mode = tp->tn_attr.va_mode;
 886  891                          } else {
 887  892                                  xdp->tn_mode = 0700;
 888  893                                  if (tp->tn_attr.va_mode & 0040)
 889  894                                          xdp->tn_mode |= 0750;
 890  895                                  if (tp->tn_attr.va_mode & 0004)
 891  896                                          xdp->tn_mode |= 0705;
 892  897                          }
 893  898                          xdp->tn_vnode->v_type = VDIR;
 894  899                          xdp->tn_vnode->v_flag |= V_XATTRDIR;
 895  900                          tdirinit(tp, xdp);
 896  901                          tp->tn_xattrdp = xdp;
 897  902                  } else {
 898  903                          VN_HOLD(tp->tn_xattrdp->tn_vnode);
 899  904                  }
 900  905                  *vpp = TNTOV(tp->tn_xattrdp);
 901  906                  rw_exit(&tp->tn_rwlock);
 902  907                  return (0);
 903  908          }
 904  909  
 905  910          /*
 906  911           * Null component name is a synonym for directory being searched.
 907  912           */
 908  913          if (*nm == '\0') {
 909  914                  VN_HOLD(dvp);
 910  915                  *vpp = dvp;
 911  916                  return (0);
 912  917          }
 913  918          ASSERT(tp);
 914  919  
 915  920          error = tdirlookup(tp, nm, &ntp, cred);
 916  921  
 917  922          if (error == 0) {
 918  923                  ASSERT(ntp);
 919  924                  *vpp = TNTOV(ntp);
 920  925                  /*
 921  926                   * If vnode is a device return special vnode instead
 922  927                   */
 923  928                  if (IS_DEVVP(*vpp)) {
 924  929                          struct vnode *newvp;
 925  930  
 926  931                          newvp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type,
 927  932                              cred);
 928  933                          VN_RELE(*vpp);
 929  934                          *vpp = newvp;
 930  935                  }
 931  936          }
 932  937          TRACE_4(TR_FAC_TMPFS, TR_TMPFS_LOOKUP,
 933  938              "tmpfs lookup:vp %p name %s vpp %p error %d",
 934  939              dvp, nm, vpp, error);
 935  940          return (error);
 936  941  }
 937  942  
 938  943  /*ARGSUSED7*/
 939  944  static int
 940  945  tmp_create(
 941  946          struct vnode *dvp,
 942  947          char *nm,
 943  948          struct vattr *vap,
 944  949          enum vcexcl exclusive,
 945  950          int mode,
 946  951          struct vnode **vpp,
 947  952          struct cred *cred,
 948  953          int flag,
 949  954          caller_context_t *ct,
 950  955          vsecattr_t *vsecp)
 951  956  {
 952  957          struct tmpnode *parent;
 953  958          struct tmount *tm;
 954  959          struct tmpnode *self;
 955  960          int error;
 956  961          struct tmpnode *oldtp;
 957  962  
 958  963  again:
 959  964          parent = (struct tmpnode *)VTOTN(dvp);
 960  965          tm = (struct tmount *)VTOTM(dvp);
 961  966          self = NULL;
 962  967          error = 0;
 963  968          oldtp = NULL;
 964  969  
 965  970          /* device files not allowed in ext. attr dirs */
 966  971          if ((parent->tn_flags & ISXATTR) &&
 967  972              (vap->va_type == VBLK || vap->va_type == VCHR ||
 968  973              vap->va_type == VFIFO || vap->va_type == VDOOR ||
 969  974              vap->va_type == VSOCK || vap->va_type == VPORT))
 970  975                          return (EINVAL);
 971  976  
 972  977          if (vap->va_type == VREG && (vap->va_mode & VSVTX)) {
 973  978                  /* Must be privileged to set sticky bit */
 974  979                  if (secpolicy_vnode_stky_modify(cred))
 975  980                          vap->va_mode &= ~VSVTX;
 976  981          } else if (vap->va_type == VNON) {
 977  982                  return (EINVAL);
 978  983          }
 979  984  
 980  985          /*
 981  986           * Null component name is a synonym for directory being searched.
 982  987           */
 983  988          if (*nm == '\0') {
 984  989                  VN_HOLD(dvp);
 985  990                  oldtp = parent;
 986  991          } else {
 987  992                  error = tdirlookup(parent, nm, &oldtp, cred);
 988  993          }
 989  994  
 990  995          if (error == 0) {       /* name found */
 991  996                  boolean_t trunc = B_FALSE;
 992  997  
 993  998                  ASSERT(oldtp);
 994  999  
 995 1000                  rw_enter(&oldtp->tn_rwlock, RW_WRITER);
 996 1001  
 997 1002                  /*
 998 1003                   * if create/read-only an existing
 999 1004                   * directory, allow it
1000 1005                   */
1001 1006                  if (exclusive == EXCL)
1002 1007                          error = EEXIST;
1003 1008                  else if ((oldtp->tn_type == VDIR) && (mode & VWRITE))
1004 1009                          error = EISDIR;
1005 1010                  else {
1006 1011                          error = tmp_taccess(oldtp, mode, cred);
1007 1012                  }
1008 1013  
1009 1014                  if (error) {
1010 1015                          rw_exit(&oldtp->tn_rwlock);
1011 1016                          tmpnode_rele(oldtp);
1012 1017                          return (error);
1013 1018                  }
1014 1019                  *vpp = TNTOV(oldtp);
1015 1020                  if ((*vpp)->v_type == VREG && (vap->va_mask & AT_SIZE) &&
1016 1021                      vap->va_size == 0) {
1017 1022                          rw_enter(&oldtp->tn_contents, RW_WRITER);
1018 1023                          (void) tmpnode_trunc(tm, oldtp, 0);
1019 1024                          rw_exit(&oldtp->tn_contents);
1020 1025                          trunc = B_TRUE;
1021 1026                  }
1022 1027                  rw_exit(&oldtp->tn_rwlock);
1023 1028                  if (IS_DEVVP(*vpp)) {
1024 1029                          struct vnode *newvp;
1025 1030  
1026 1031                          newvp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type,
1027 1032                              cred);
1028 1033                          VN_RELE(*vpp);
1029 1034                          if (newvp == NULL) {
1030 1035                                  return (ENOSYS);
1031 1036                          }
1032 1037                          *vpp = newvp;
1033 1038                  }
1034 1039  
1035 1040                  if (trunc)
1036 1041                          vnevent_create(*vpp, ct);
1037 1042  
1038 1043                  return (0);
1039 1044          }
1040 1045  
1041 1046          if (error != ENOENT)
1042 1047                  return (error);
1043 1048  
1044 1049          rw_enter(&parent->tn_rwlock, RW_WRITER);
1045 1050          error = tdirenter(tm, parent, nm, DE_CREATE,
1046 1051              (struct tmpnode *)NULL, (struct tmpnode *)NULL,
1047 1052              vap, &self, cred, ct);
1048 1053          rw_exit(&parent->tn_rwlock);
1049 1054  
1050 1055          if (error) {
1051 1056                  if (self)
1052 1057                          tmpnode_rele(self);
1053 1058  
1054 1059                  if (error == EEXIST) {
1055 1060                          /*
1056 1061                           * This means that the file was created sometime
1057 1062                           * after we checked and did not find it and when
1058 1063                           * we went to create it.
1059 1064                           * Since creat() is supposed to truncate a file
1060 1065                           * that already exits go back to the begining
1061 1066                           * of the function. This time we will find it
1062 1067                           * and go down the tmp_trunc() path
1063 1068                           */
1064 1069                          goto again;
1065 1070                  }
1066 1071                  return (error);
1067 1072          }
1068 1073  
1069 1074          *vpp = TNTOV(self);
1070 1075  
1071 1076          if (!error && IS_DEVVP(*vpp)) {
1072 1077                  struct vnode *newvp;
1073 1078  
1074 1079                  newvp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cred);
1075 1080                  VN_RELE(*vpp);
1076 1081                  if (newvp == NULL)
1077 1082                          return (ENOSYS);
1078 1083                  *vpp = newvp;
1079 1084          }
1080 1085          TRACE_3(TR_FAC_TMPFS, TR_TMPFS_CREATE,
1081 1086              "tmpfs create:dvp %p nm %s vpp %p", dvp, nm, vpp);
1082 1087          return (0);
1083 1088  }
1084 1089  
1085 1090  /* ARGSUSED3 */
1086 1091  static int
1087 1092  tmp_remove(
1088 1093          struct vnode *dvp,
1089 1094          char *nm,
1090 1095          struct cred *cred,
1091 1096          caller_context_t *ct,
1092 1097          int flags)
1093 1098  {
1094 1099          struct tmpnode *parent = (struct tmpnode *)VTOTN(dvp);
1095 1100          int error;
1096 1101          struct tmpnode *tp = NULL;
1097 1102  
1098 1103          error = tdirlookup(parent, nm, &tp, cred);
1099 1104          if (error)
1100 1105                  return (error);
1101 1106  
1102 1107          ASSERT(tp);
1103 1108          rw_enter(&parent->tn_rwlock, RW_WRITER);
1104 1109          rw_enter(&tp->tn_rwlock, RW_WRITER);
1105 1110  
1106 1111          if (tp->tn_type != VDIR ||
1107 1112              (error = secpolicy_fs_linkdir(cred, dvp->v_vfsp)) == 0)
1108 1113                  error = tdirdelete(parent, tp, nm, tp->tn_type == VDIR ?
1109 1114                      DR_RMDIR : DR_REMOVE, cred);
1110 1115  
1111 1116          rw_exit(&tp->tn_rwlock);
1112 1117          rw_exit(&parent->tn_rwlock);
1113 1118          vnevent_remove(TNTOV(tp), dvp, nm, ct);
1114 1119          tmpnode_rele(tp);
1115 1120  
1116 1121          TRACE_3(TR_FAC_TMPFS, TR_TMPFS_REMOVE,
1117 1122              "tmpfs remove:dvp %p nm %s error %d", dvp, nm, error);
1118 1123          return (error);
1119 1124  }
1120 1125  
1121 1126  /* ARGSUSED4 */
1122 1127  static int
1123 1128  tmp_link(
1124 1129          struct vnode *dvp,
1125 1130          struct vnode *srcvp,
1126 1131          char *tnm,
1127 1132          struct cred *cred,
1128 1133          caller_context_t *ct,
1129 1134          int flags)
1130 1135  {
1131 1136          struct tmpnode *parent;
1132 1137          struct tmpnode *from;
1133 1138          struct tmount *tm = (struct tmount *)VTOTM(dvp);
1134 1139          int error;
1135 1140          struct tmpnode *found = NULL;
1136 1141          struct vnode *realvp;
1137 1142  
1138 1143          if (VOP_REALVP(srcvp, &realvp, ct) == 0)
1139 1144                  srcvp = realvp;
1140 1145  
1141 1146          parent = (struct tmpnode *)VTOTN(dvp);
1142 1147          from = (struct tmpnode *)VTOTN(srcvp);
1143 1148  
1144 1149          if ((srcvp->v_type == VDIR &&
1145 1150              secpolicy_fs_linkdir(cred, dvp->v_vfsp)) ||
1146 1151              (from->tn_uid != crgetuid(cred) && secpolicy_basic_link(cred)))
1147 1152                  return (EPERM);
1148 1153  
1149 1154          /*
1150 1155           * Make sure link for extended attributes is valid
1151 1156           * We only support hard linking of xattr's in xattrdir to an xattrdir
1152 1157           */
1153 1158          if ((from->tn_flags & ISXATTR) != (parent->tn_flags & ISXATTR))
1154 1159                  return (EINVAL);
1155 1160  
1156 1161          error = tdirlookup(parent, tnm, &found, cred);
1157 1162          if (error == 0) {
1158 1163                  ASSERT(found);
1159 1164                  tmpnode_rele(found);
1160 1165                  return (EEXIST);
1161 1166          }
1162 1167  
1163 1168          if (error != ENOENT)
1164 1169                  return (error);
1165 1170  
1166 1171          rw_enter(&parent->tn_rwlock, RW_WRITER);
1167 1172          error = tdirenter(tm, parent, tnm, DE_LINK, (struct tmpnode *)NULL,
1168 1173              from, NULL, (struct tmpnode **)NULL, cred, ct);
1169 1174          rw_exit(&parent->tn_rwlock);
1170 1175          if (error == 0) {
1171 1176                  vnevent_link(srcvp, ct);
1172 1177          }
1173 1178          return (error);
1174 1179  }
1175 1180  
1176 1181  /* ARGSUSED5 */
1177 1182  static int
1178 1183  tmp_rename(
1179 1184          struct vnode *odvp,     /* source parent vnode */
1180 1185          char *onm,              /* source name */
1181 1186          struct vnode *ndvp,     /* destination parent vnode */
1182 1187          char *nnm,              /* destination name */
1183 1188          struct cred *cred,
1184 1189          caller_context_t *ct,
1185 1190          int flags)
1186 1191  {
1187 1192          struct tmpnode *fromparent;
1188 1193          struct tmpnode *toparent;
1189 1194          struct tmpnode *fromtp = NULL;  /* source tmpnode */
1190 1195          struct tmpnode *totp;           /* target tmpnode */
1191 1196          struct tmount *tm = (struct tmount *)VTOTM(odvp);
1192 1197          int error;
1193 1198          int samedir = 0;        /* set if odvp == ndvp */
1194 1199          struct vnode *realvp;
1195 1200  
1196 1201          if (VOP_REALVP(ndvp, &realvp, ct) == 0)
1197 1202                  ndvp = realvp;
1198 1203  
1199 1204          fromparent = (struct tmpnode *)VTOTN(odvp);
1200 1205          toparent = (struct tmpnode *)VTOTN(ndvp);
1201 1206  
1202 1207          if ((fromparent->tn_flags & ISXATTR) != (toparent->tn_flags & ISXATTR))
1203 1208                  return (EINVAL);
1204 1209  
1205 1210          mutex_enter(&tm->tm_renamelck);
1206 1211  
1207 1212          /*
1208 1213           * Look up tmpnode of file we're supposed to rename.
1209 1214           */
1210 1215          error = tdirlookup(fromparent, onm, &fromtp, cred);
1211 1216          if (error) {
1212 1217                  mutex_exit(&tm->tm_renamelck);
1213 1218                  return (error);
1214 1219          }
1215 1220  
1216 1221          /*
1217 1222           * Make sure we can delete the old (source) entry.  This
1218 1223           * requires write permission on the containing directory.  If
1219 1224           * that directory is "sticky" it requires further checks.
1220 1225           */
1221 1226          if (((error = tmp_taccess(fromparent, VWRITE, cred)) != 0) ||
1222 1227              (error = tmp_sticky_remove_access(fromparent, fromtp, cred)) != 0)
1223 1228                  goto done;
1224 1229  
1225 1230          /*
1226 1231           * Check for renaming to or from '.' or '..' or that
1227 1232           * fromtp == fromparent
1228 1233           */
1229 1234          if ((onm[0] == '.' &&
1230 1235              (onm[1] == '\0' || (onm[1] == '.' && onm[2] == '\0'))) ||
1231 1236              (nnm[0] == '.' &&
1232 1237              (nnm[1] == '\0' || (nnm[1] == '.' && nnm[2] == '\0'))) ||
1233 1238              (fromparent == fromtp)) {
1234 1239                  error = EINVAL;
1235 1240                  goto done;
1236 1241          }
1237 1242  
1238 1243          samedir = (fromparent == toparent);
1239 1244          /*
1240 1245           * Make sure we can search and rename into the new
1241 1246           * (destination) directory.
1242 1247           */
1243 1248          if (!samedir) {
1244 1249                  error = tmp_taccess(toparent, VEXEC|VWRITE, cred);
1245 1250                  if (error)
1246 1251                          goto done;
1247 1252          }
1248 1253  
1249 1254          if (tdirlookup(toparent, nnm, &totp, cred) == 0) {
1250 1255                  vnevent_pre_rename_dest(TNTOV(totp), ndvp, nnm, ct);
1251 1256                  tmpnode_rele(totp);
1252 1257          }
1253 1258  
1254 1259          /* Notify the target dir. if not the same as the source dir. */
1255 1260          if (ndvp != odvp) {
1256 1261                  vnevent_pre_rename_dest_dir(ndvp, TNTOV(fromtp), nnm, ct);
1257 1262          }
1258 1263  
1259 1264          vnevent_pre_rename_src(TNTOV(fromtp), odvp, onm, ct);
1260 1265  
1261 1266          /*
1262 1267           * Link source to new target
1263 1268           */
1264 1269          rw_enter(&toparent->tn_rwlock, RW_WRITER);
1265 1270          error = tdirenter(tm, toparent, nnm, DE_RENAME,
1266 1271              fromparent, fromtp, (struct vattr *)NULL,
1267 1272              (struct tmpnode **)NULL, cred, ct);
1268 1273          rw_exit(&toparent->tn_rwlock);
1269 1274  
1270 1275          if (error) {
1271 1276                  /*
1272 1277                   * ESAME isn't really an error; it indicates that the
1273 1278                   * operation should not be done because the source and target
1274 1279                   * are the same file, but that no error should be reported.
1275 1280                   */
1276 1281                  if (error == ESAME)
1277 1282                          error = 0;
1278 1283                  goto done;
1279 1284          }
1280 1285  
1281 1286          /*
1282 1287           * Unlink from source.
1283 1288           */
1284 1289          rw_enter(&fromparent->tn_rwlock, RW_WRITER);
1285 1290          rw_enter(&fromtp->tn_rwlock, RW_WRITER);
1286 1291  
1287 1292          error = tdirdelete(fromparent, fromtp, onm, DR_RENAME, cred);
1288 1293  
1289 1294          /*
1290 1295           * The following handles the case where our source tmpnode was
1291 1296           * removed before we got to it.
1292 1297           *
1293 1298           * XXX We should also cleanup properly in the case where tdirdelete
1294 1299           * fails for some other reason.  Currently this case shouldn't happen.
1295 1300           * (see 1184991).
1296 1301           */
  
    | 
      ↓ open down ↓ | 
    495 lines elided | 
    
      ↑ open up ↑ | 
  
1297 1302          if (error == ENOENT)
1298 1303                  error = 0;
1299 1304  
1300 1305          rw_exit(&fromtp->tn_rwlock);
1301 1306          rw_exit(&fromparent->tn_rwlock);
1302 1307  
1303 1308          if (error == 0) {
1304 1309                  vnevent_rename_src(TNTOV(fromtp), odvp, onm, ct);
1305 1310                  /*
1306 1311                   * vnevent_rename_dest is called in tdirenter().
1307      -                 * Notify the target dir if not same as source dir.
1308 1312                   */
1309      -                if (ndvp != odvp)
1310      -                        vnevent_rename_dest_dir(ndvp, ct);
     1313 +                vnevent_rename_dest_dir(ndvp, TNTOV(fromtp), nnm, ct);
1311 1314          }
1312 1315  
1313 1316  done:
1314 1317          tmpnode_rele(fromtp);
1315 1318          mutex_exit(&tm->tm_renamelck);
1316 1319  
1317 1320          TRACE_5(TR_FAC_TMPFS, TR_TMPFS_RENAME,
1318 1321              "tmpfs rename:ovp %p onm %s nvp %p nnm %s error %d", odvp, onm,
1319 1322              ndvp, nnm, error);
1320 1323          return (error);
1321 1324  }
1322 1325  
1323 1326  /* ARGSUSED5 */
1324 1327  static int
1325 1328  tmp_mkdir(
1326 1329          struct vnode *dvp,
1327 1330          char *nm,
1328 1331          struct vattr *va,
1329 1332          struct vnode **vpp,
1330 1333          struct cred *cred,
1331 1334          caller_context_t *ct,
1332 1335          int flags,
1333 1336          vsecattr_t *vsecp)
1334 1337  {
1335 1338          struct tmpnode *parent = (struct tmpnode *)VTOTN(dvp);
1336 1339          struct tmpnode *self = NULL;
1337 1340          struct tmount *tm = (struct tmount *)VTOTM(dvp);
1338 1341          int error;
1339 1342  
1340 1343          /* no new dirs allowed in xattr dirs */
1341 1344          if (parent->tn_flags & ISXATTR)
1342 1345                  return (EINVAL);
1343 1346  
1344 1347          /*
1345 1348           * Might be dangling directory.  Catch it here,
1346 1349           * because a ENOENT return from tdirlookup() is
1347 1350           * an "o.k. return".
1348 1351           */
1349 1352          if (parent->tn_nlink == 0)
1350 1353                  return (ENOENT);
1351 1354  
1352 1355          error = tdirlookup(parent, nm, &self, cred);
1353 1356          if (error == 0) {
1354 1357                  ASSERT(self);
1355 1358                  tmpnode_rele(self);
1356 1359                  return (EEXIST);
1357 1360          }
1358 1361          if (error != ENOENT)
1359 1362                  return (error);
1360 1363  
1361 1364          rw_enter(&parent->tn_rwlock, RW_WRITER);
1362 1365          error = tdirenter(tm, parent, nm, DE_MKDIR, (struct tmpnode *)NULL,
1363 1366              (struct tmpnode *)NULL, va, &self, cred, ct);
1364 1367          if (error) {
1365 1368                  rw_exit(&parent->tn_rwlock);
1366 1369                  if (self)
1367 1370                          tmpnode_rele(self);
1368 1371                  return (error);
1369 1372          }
1370 1373          rw_exit(&parent->tn_rwlock);
1371 1374          *vpp = TNTOV(self);
1372 1375          return (0);
1373 1376  }
1374 1377  
1375 1378  /* ARGSUSED4 */
1376 1379  static int
1377 1380  tmp_rmdir(
1378 1381          struct vnode *dvp,
1379 1382          char *nm,
1380 1383          struct vnode *cdir,
1381 1384          struct cred *cred,
1382 1385          caller_context_t *ct,
1383 1386          int flags)
1384 1387  {
1385 1388          struct tmpnode *parent = (struct tmpnode *)VTOTN(dvp);
1386 1389          struct tmpnode *self = NULL;
1387 1390          struct vnode *vp;
1388 1391          int error = 0;
1389 1392  
1390 1393          /*
1391 1394           * Return error when removing . and ..
1392 1395           */
1393 1396          if (strcmp(nm, ".") == 0)
1394 1397                  return (EINVAL);
1395 1398          if (strcmp(nm, "..") == 0)
1396 1399                  return (EEXIST); /* Should be ENOTEMPTY */
1397 1400          error = tdirlookup(parent, nm, &self, cred);
1398 1401          if (error)
1399 1402                  return (error);
1400 1403  
1401 1404          rw_enter(&parent->tn_rwlock, RW_WRITER);
1402 1405          rw_enter(&self->tn_rwlock, RW_WRITER);
1403 1406  
1404 1407          vp = TNTOV(self);
1405 1408          if (vp == dvp || vp == cdir) {
1406 1409                  error = EINVAL;
1407 1410                  goto done1;
1408 1411          }
1409 1412          if (self->tn_type != VDIR) {
1410 1413                  error = ENOTDIR;
1411 1414                  goto done1;
1412 1415          }
1413 1416  
1414 1417          mutex_enter(&self->tn_tlock);
1415 1418          if (self->tn_nlink > 2) {
1416 1419                  mutex_exit(&self->tn_tlock);
1417 1420                  error = EEXIST;
1418 1421                  goto done1;
1419 1422          }
1420 1423          mutex_exit(&self->tn_tlock);
1421 1424  
1422 1425          if (vn_vfswlock(vp)) {
1423 1426                  error = EBUSY;
1424 1427                  goto done1;
1425 1428          }
1426 1429          if (vn_mountedvfs(vp) != NULL) {
1427 1430                  error = EBUSY;
1428 1431                  goto done;
1429 1432          }
1430 1433  
1431 1434          /*
1432 1435           * Check for an empty directory
1433 1436           * i.e. only includes entries for "." and ".."
1434 1437           */
1435 1438          if (self->tn_dirents > 2) {
1436 1439                  error = EEXIST;         /* SIGH should be ENOTEMPTY */
1437 1440                  /*
1438 1441                   * Update atime because checking tn_dirents is logically
1439 1442                   * equivalent to reading the directory
1440 1443                   */
1441 1444                  gethrestime(&self->tn_atime);
1442 1445                  goto done;
1443 1446          }
1444 1447  
1445 1448          error = tdirdelete(parent, self, nm, DR_RMDIR, cred);
1446 1449  done:
1447 1450          vn_vfsunlock(vp);
1448 1451  done1:
1449 1452          rw_exit(&self->tn_rwlock);
1450 1453          rw_exit(&parent->tn_rwlock);
1451 1454          vnevent_rmdir(TNTOV(self), dvp, nm, ct);
1452 1455          tmpnode_rele(self);
1453 1456  
1454 1457          return (error);
1455 1458  }
1456 1459  
1457 1460  /* ARGSUSED2 */
1458 1461  static int
1459 1462  tmp_readdir(
1460 1463          struct vnode *vp,
1461 1464          struct uio *uiop,
1462 1465          struct cred *cred,
1463 1466          int *eofp,
1464 1467          caller_context_t *ct,
1465 1468          int flags)
1466 1469  {
1467 1470          struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
1468 1471          struct tdirent *tdp;
1469 1472          int error = 0;
1470 1473          size_t namelen;
1471 1474          struct dirent64 *dp;
1472 1475          ulong_t offset;
1473 1476          ulong_t total_bytes_wanted;
1474 1477          long outcount = 0;
1475 1478          long bufsize;
1476 1479          int reclen;
1477 1480          caddr_t outbuf;
1478 1481  
1479 1482          if (uiop->uio_loffset >= MAXOFF_T) {
1480 1483                  if (eofp)
1481 1484                          *eofp = 1;
1482 1485                  return (0);
1483 1486          }
1484 1487          /*
1485 1488           * assuming system call has already called tmp_rwlock
1486 1489           */
1487 1490          ASSERT(RW_READ_HELD(&tp->tn_rwlock));
1488 1491  
1489 1492          if (uiop->uio_iovcnt != 1)
1490 1493                  return (EINVAL);
1491 1494  
1492 1495          if (vp->v_type != VDIR)
1493 1496                  return (ENOTDIR);
1494 1497  
1495 1498          /*
1496 1499           * There's a window here where someone could have removed
1497 1500           * all the entries in the directory after we put a hold on the
1498 1501           * vnode but before we grabbed the rwlock.  Just return.
1499 1502           */
1500 1503          if (tp->tn_dir == NULL) {
1501 1504                  if (tp->tn_nlink) {
1502 1505                          panic("empty directory 0x%p", (void *)tp);
1503 1506                          /*NOTREACHED*/
1504 1507                  }
1505 1508                  return (0);
1506 1509          }
1507 1510  
1508 1511          /*
1509 1512           * Get space for multiple directory entries
1510 1513           */
1511 1514          total_bytes_wanted = uiop->uio_iov->iov_len;
1512 1515          bufsize = total_bytes_wanted + sizeof (struct dirent64);
1513 1516          outbuf = kmem_alloc(bufsize, KM_SLEEP);
1514 1517  
1515 1518          dp = (struct dirent64 *)outbuf;
1516 1519  
1517 1520  
1518 1521          offset = 0;
1519 1522          tdp = tp->tn_dir;
1520 1523          while (tdp) {
1521 1524                  namelen = strlen(tdp->td_name); /* no +1 needed */
1522 1525                  offset = tdp->td_offset;
1523 1526                  if (offset >= uiop->uio_offset) {
1524 1527                          reclen = (int)DIRENT64_RECLEN(namelen);
1525 1528                          if (outcount + reclen > total_bytes_wanted) {
1526 1529                                  if (!outcount)
1527 1530                                          /*
1528 1531                                           * Buffer too small for any entries.
1529 1532                                           */
1530 1533                                          error = EINVAL;
1531 1534                                  break;
1532 1535                          }
1533 1536                          ASSERT(tdp->td_tmpnode != NULL);
1534 1537  
1535 1538                          /* use strncpy(9f) to zero out uninitialized bytes */
1536 1539  
1537 1540                          (void) strncpy(dp->d_name, tdp->td_name,
1538 1541                              DIRENT64_NAMELEN(reclen));
1539 1542                          dp->d_reclen = (ushort_t)reclen;
1540 1543                          dp->d_ino = (ino64_t)tdp->td_tmpnode->tn_nodeid;
1541 1544                          dp->d_off = (offset_t)tdp->td_offset + 1;
1542 1545                          dp = (struct dirent64 *)
1543 1546                              ((uintptr_t)dp + dp->d_reclen);
1544 1547                          outcount += reclen;
1545 1548                          ASSERT(outcount <= bufsize);
1546 1549                  }
1547 1550                  tdp = tdp->td_next;
1548 1551          }
1549 1552  
1550 1553          if (!error)
1551 1554                  error = uiomove(outbuf, outcount, UIO_READ, uiop);
1552 1555  
1553 1556          if (!error) {
1554 1557                  /* If we reached the end of the list our offset */
1555 1558                  /* should now be just past the end. */
1556 1559                  if (!tdp) {
1557 1560                          offset += 1;
1558 1561                          if (eofp)
1559 1562                                  *eofp = 1;
1560 1563                  } else if (eofp)
1561 1564                          *eofp = 0;
1562 1565                  uiop->uio_offset = offset;
1563 1566          }
1564 1567          gethrestime(&tp->tn_atime);
1565 1568          kmem_free(outbuf, bufsize);
1566 1569          return (error);
1567 1570  }
1568 1571  
1569 1572  /* ARGSUSED5 */
1570 1573  static int
1571 1574  tmp_symlink(
1572 1575          struct vnode *dvp,
1573 1576          char *lnm,
1574 1577          struct vattr *tva,
1575 1578          char *tnm,
1576 1579          struct cred *cred,
1577 1580          caller_context_t *ct,
1578 1581          int flags)
1579 1582  {
1580 1583          struct tmpnode *parent = (struct tmpnode *)VTOTN(dvp);
1581 1584          struct tmpnode *self = (struct tmpnode *)NULL;
1582 1585          struct tmount *tm = (struct tmount *)VTOTM(dvp);
1583 1586          char *cp = NULL;
1584 1587          int error;
1585 1588          size_t len;
1586 1589  
1587 1590          /* no symlinks allowed to files in xattr dirs */
1588 1591          if (parent->tn_flags & ISXATTR)
1589 1592                  return (EINVAL);
1590 1593  
1591 1594          error = tdirlookup(parent, lnm, &self, cred);
1592 1595          if (error == 0) {
1593 1596                  /*
1594 1597                   * The entry already exists
1595 1598                   */
1596 1599                  tmpnode_rele(self);
1597 1600                  return (EEXIST);        /* was 0 */
1598 1601          }
1599 1602  
1600 1603          if (error != ENOENT) {
1601 1604                  if (self != NULL)
1602 1605                          tmpnode_rele(self);
1603 1606                  return (error);
1604 1607          }
1605 1608  
1606 1609          rw_enter(&parent->tn_rwlock, RW_WRITER);
1607 1610          error = tdirenter(tm, parent, lnm, DE_CREATE, (struct tmpnode *)NULL,
1608 1611              (struct tmpnode *)NULL, tva, &self, cred, ct);
1609 1612          rw_exit(&parent->tn_rwlock);
1610 1613  
1611 1614          if (error) {
1612 1615                  if (self)
1613 1616                          tmpnode_rele(self);
1614 1617                  return (error);
1615 1618          }
1616 1619          len = strlen(tnm) + 1;
1617 1620          cp = tmp_memalloc(len, 0);
1618 1621          if (cp == NULL) {
1619 1622                  tmpnode_rele(self);
1620 1623                  return (ENOSPC);
1621 1624          }
1622 1625          (void) strcpy(cp, tnm);
1623 1626  
1624 1627          self->tn_symlink = cp;
1625 1628          self->tn_size = len - 1;
1626 1629          tmpnode_rele(self);
1627 1630          return (error);
1628 1631  }
1629 1632  
1630 1633  /* ARGSUSED2 */
1631 1634  static int
1632 1635  tmp_readlink(
1633 1636          struct vnode *vp,
1634 1637          struct uio *uiop,
1635 1638          struct cred *cred,
1636 1639          caller_context_t *ct)
1637 1640  {
1638 1641          struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
1639 1642          int error = 0;
1640 1643  
1641 1644          if (vp->v_type != VLNK)
1642 1645                  return (EINVAL);
1643 1646  
1644 1647          rw_enter(&tp->tn_rwlock, RW_READER);
1645 1648          rw_enter(&tp->tn_contents, RW_READER);
1646 1649          error = uiomove(tp->tn_symlink, tp->tn_size, UIO_READ, uiop);
1647 1650          gethrestime(&tp->tn_atime);
1648 1651          rw_exit(&tp->tn_contents);
1649 1652          rw_exit(&tp->tn_rwlock);
1650 1653          return (error);
1651 1654  }
1652 1655  
1653 1656  /* ARGSUSED */
1654 1657  static int
1655 1658  tmp_fsync(
1656 1659          struct vnode *vp,
1657 1660          int syncflag,
1658 1661          struct cred *cred,
1659 1662          caller_context_t *ct)
1660 1663  {
1661 1664          return (0);
1662 1665  }
1663 1666  
1664 1667  /* ARGSUSED */
1665 1668  static void
1666 1669  tmp_inactive(struct vnode *vp, struct cred *cred, caller_context_t *ct)
1667 1670  {
1668 1671          struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
1669 1672          struct tmount *tm = (struct tmount *)VFSTOTM(vp->v_vfsp);
1670 1673  
1671 1674          rw_enter(&tp->tn_rwlock, RW_WRITER);
1672 1675  top:
1673 1676          mutex_enter(&tp->tn_tlock);
1674 1677          mutex_enter(&vp->v_lock);
1675 1678          ASSERT(vp->v_count >= 1);
1676 1679  
1677 1680          /*
1678 1681           * If we don't have the last hold or the link count is non-zero,
1679 1682           * there's little to do -- just drop our hold.
1680 1683           */
1681 1684          if (vp->v_count > 1 || tp->tn_nlink != 0) {
1682 1685                  vp->v_count--;
1683 1686                  mutex_exit(&vp->v_lock);
1684 1687                  mutex_exit(&tp->tn_tlock);
1685 1688                  rw_exit(&tp->tn_rwlock);
1686 1689                  return;
1687 1690          }
1688 1691  
1689 1692          /*
1690 1693           * We have the last hold *and* the link count is zero, so this
1691 1694           * tmpnode is dead from the filesystem's viewpoint.  However,
1692 1695           * if the tmpnode has any pages associated with it (i.e. if it's
1693 1696           * a normal file with non-zero size), the tmpnode can still be
1694 1697           * discovered by pageout or fsflush via the page vnode pointers.
1695 1698           * In this case we must drop all our locks, truncate the tmpnode,
1696 1699           * and try the whole dance again.
1697 1700           */
1698 1701          if (tp->tn_size != 0) {
1699 1702                  if (tp->tn_type == VREG) {
1700 1703                          mutex_exit(&vp->v_lock);
1701 1704                          mutex_exit(&tp->tn_tlock);
1702 1705                          rw_enter(&tp->tn_contents, RW_WRITER);
1703 1706                          (void) tmpnode_trunc(tm, tp, 0);
1704 1707                          rw_exit(&tp->tn_contents);
1705 1708                          ASSERT(tp->tn_size == 0);
1706 1709                          ASSERT(tp->tn_nblocks == 0);
1707 1710                          goto top;
1708 1711                  }
1709 1712                  if (tp->tn_type == VLNK)
1710 1713                          tmp_memfree(tp->tn_symlink, tp->tn_size + 1);
1711 1714          }
1712 1715  
1713 1716          /*
1714 1717           * Remove normal file/dir's xattr dir and xattrs.
1715 1718           */
1716 1719          if (tp->tn_xattrdp) {
1717 1720                  struct tmpnode *xtp = tp->tn_xattrdp;
1718 1721  
1719 1722                  ASSERT(xtp->tn_flags & ISXATTR);
1720 1723                  tmpnode_hold(xtp);
1721 1724                  rw_enter(&xtp->tn_rwlock, RW_WRITER);
1722 1725                  tdirtrunc(xtp);
1723 1726                  DECR_COUNT(&xtp->tn_nlink, &xtp->tn_tlock);
1724 1727                  tp->tn_xattrdp = NULL;
1725 1728                  rw_exit(&xtp->tn_rwlock);
1726 1729                  tmpnode_rele(xtp);
1727 1730          }
1728 1731  
1729 1732          mutex_exit(&vp->v_lock);
1730 1733          mutex_exit(&tp->tn_tlock);
1731 1734          /* Here's our chance to send invalid event while we're between locks */
1732 1735          vn_invalid(TNTOV(tp));
1733 1736          mutex_enter(&tm->tm_contents);
1734 1737          if (tp->tn_forw == NULL)
1735 1738                  tm->tm_rootnode->tn_back = tp->tn_back;
1736 1739          else
1737 1740                  tp->tn_forw->tn_back = tp->tn_back;
1738 1741          tp->tn_back->tn_forw = tp->tn_forw;
1739 1742          mutex_exit(&tm->tm_contents);
1740 1743          rw_exit(&tp->tn_rwlock);
1741 1744          rw_destroy(&tp->tn_rwlock);
1742 1745          mutex_destroy(&tp->tn_tlock);
1743 1746          vn_free(TNTOV(tp));
1744 1747          tmp_memfree(tp, sizeof (struct tmpnode));
1745 1748  }
1746 1749  
1747 1750  /* ARGSUSED2 */
1748 1751  static int
1749 1752  tmp_fid(struct vnode *vp, struct fid *fidp, caller_context_t *ct)
1750 1753  {
1751 1754          struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
1752 1755          struct tfid *tfid;
1753 1756  
1754 1757          if (fidp->fid_len < (sizeof (struct tfid) - sizeof (ushort_t))) {
1755 1758                  fidp->fid_len = sizeof (struct tfid) - sizeof (ushort_t);
1756 1759                  return (ENOSPC);
1757 1760          }
1758 1761  
1759 1762          tfid = (struct tfid *)fidp;
1760 1763          bzero(tfid, sizeof (struct tfid));
1761 1764          tfid->tfid_len = (int)sizeof (struct tfid) - sizeof (ushort_t);
1762 1765  
1763 1766          tfid->tfid_ino = tp->tn_nodeid;
1764 1767          tfid->tfid_gen = tp->tn_gen;
1765 1768  
1766 1769          return (0);
1767 1770  }
1768 1771  
1769 1772  
1770 1773  /*
1771 1774   * Return all the pages from [off..off+len] in given file
1772 1775   */
1773 1776  /* ARGSUSED */
1774 1777  static int
1775 1778  tmp_getpage(
1776 1779          struct vnode *vp,
1777 1780          offset_t off,
1778 1781          size_t len,
1779 1782          uint_t *protp,
1780 1783          page_t *pl[],
1781 1784          size_t plsz,
1782 1785          struct seg *seg,
1783 1786          caddr_t addr,
1784 1787          enum seg_rw rw,
1785 1788          struct cred *cr,
1786 1789          caller_context_t *ct)
1787 1790  {
1788 1791          int err = 0;
1789 1792          struct tmpnode *tp = VTOTN(vp);
1790 1793          anoff_t toff = (anoff_t)off;
1791 1794          size_t tlen = len;
1792 1795          u_offset_t tmpoff;
1793 1796          timestruc_t now;
1794 1797  
1795 1798          rw_enter(&tp->tn_contents, RW_READER);
1796 1799  
1797 1800          if (off + len  > tp->tn_size + PAGEOFFSET) {
1798 1801                  err = EFAULT;
1799 1802                  goto out;
1800 1803          }
1801 1804          /*
1802 1805           * Look for holes (no anon slot) in faulting range. If there are
1803 1806           * holes we have to switch to a write lock and fill them in. Swap
1804 1807           * space for holes was already reserved when the file was grown.
1805 1808           */
1806 1809          tmpoff = toff;
1807 1810          if (non_anon(tp->tn_anon, btop(off), &tmpoff, &tlen)) {
1808 1811                  if (!rw_tryupgrade(&tp->tn_contents)) {
1809 1812                          rw_exit(&tp->tn_contents);
1810 1813                          rw_enter(&tp->tn_contents, RW_WRITER);
1811 1814                          /* Size may have changed when lock was dropped */
1812 1815                          if (off + len  > tp->tn_size + PAGEOFFSET) {
1813 1816                                  err = EFAULT;
1814 1817                                  goto out;
1815 1818                          }
1816 1819                  }
1817 1820                  for (toff = (anoff_t)off; toff < (anoff_t)off + len;
1818 1821                      toff += PAGESIZE) {
1819 1822                          if (anon_get_ptr(tp->tn_anon, btop(toff)) == NULL) {
1820 1823                                  /* XXX - may allocate mem w. write lock held */
1821 1824                                  (void) anon_set_ptr(tp->tn_anon, btop(toff),
1822 1825                                      anon_alloc(vp, toff), ANON_SLEEP);
1823 1826                                  tp->tn_nblocks++;
1824 1827                          }
1825 1828                  }
1826 1829                  rw_downgrade(&tp->tn_contents);
1827 1830          }
1828 1831  
1829 1832  
1830 1833          err = pvn_getpages(tmp_getapage, vp, (u_offset_t)off, len, protp,
1831 1834              pl, plsz, seg, addr, rw, cr);
1832 1835  
1833 1836          gethrestime(&now);
1834 1837          tp->tn_atime = now;
1835 1838          if (rw == S_WRITE)
1836 1839                  tp->tn_mtime = now;
1837 1840  
1838 1841  out:
1839 1842          rw_exit(&tp->tn_contents);
1840 1843          return (err);
1841 1844  }
1842 1845  
1843 1846  /*
1844 1847   * Called from pvn_getpages to get a particular page.
1845 1848   */
1846 1849  /*ARGSUSED*/
1847 1850  static int
1848 1851  tmp_getapage(
1849 1852          struct vnode *vp,
1850 1853          u_offset_t off,
1851 1854          size_t len,
1852 1855          uint_t *protp,
1853 1856          page_t *pl[],
1854 1857          size_t plsz,
1855 1858          struct seg *seg,
1856 1859          caddr_t addr,
1857 1860          enum seg_rw rw,
1858 1861          struct cred *cr)
1859 1862  {
1860 1863          struct page *pp;
1861 1864          int flags;
1862 1865          int err = 0;
1863 1866          struct vnode *pvp;
1864 1867          u_offset_t poff;
1865 1868  
1866 1869          if (protp != NULL)
1867 1870                  *protp = PROT_ALL;
1868 1871  again:
1869 1872          if (pp = page_lookup(vp, off, rw == S_CREATE ? SE_EXCL : SE_SHARED)) {
1870 1873                  if (pl) {
1871 1874                          pl[0] = pp;
1872 1875                          pl[1] = NULL;
1873 1876                  } else {
1874 1877                          page_unlock(pp);
1875 1878                  }
1876 1879          } else {
1877 1880                  pp = page_create_va(vp, off, PAGESIZE,
1878 1881                      PG_WAIT | PG_EXCL, seg, addr);
1879 1882                  /*
1880 1883                   * Someone raced in and created the page after we did the
1881 1884                   * lookup but before we did the create, so go back and
1882 1885                   * try to look it up again.
1883 1886                   */
1884 1887                  if (pp == NULL)
1885 1888                          goto again;
1886 1889                  /*
1887 1890                   * Fill page from backing store, if any. If none, then
1888 1891                   * either this is a newly filled hole or page must have
1889 1892                   * been unmodified and freed so just zero it out.
1890 1893                   */
1891 1894                  err = swap_getphysname(vp, off, &pvp, &poff);
1892 1895                  if (err) {
1893 1896                          panic("tmp_getapage: no anon slot vp %p "
1894 1897                              "off %llx pp %p\n", (void *)vp, off, (void *)pp);
1895 1898                  }
1896 1899                  if (pvp) {
1897 1900                          flags = (pl == NULL ? B_ASYNC|B_READ : B_READ);
1898 1901                          err = VOP_PAGEIO(pvp, pp, (u_offset_t)poff, PAGESIZE,
1899 1902                              flags, cr, NULL);
1900 1903                          if (flags & B_ASYNC)
1901 1904                                  pp = NULL;
1902 1905                  } else if (rw != S_CREATE) {
1903 1906                          pagezero(pp, 0, PAGESIZE);
1904 1907                  }
1905 1908                  if (err && pp)
1906 1909                          pvn_read_done(pp, B_ERROR);
1907 1910                  if (err == 0) {
1908 1911                          if (pl)
1909 1912                                  pvn_plist_init(pp, pl, plsz, off, PAGESIZE, rw);
1910 1913                          else
1911 1914                                  pvn_io_done(pp);
1912 1915                  }
1913 1916          }
1914 1917          return (err);
1915 1918  }
1916 1919  
1917 1920  
1918 1921  /*
1919 1922   * Flags are composed of {B_INVAL, B_DIRTY B_FREE, B_DONTNEED}.
1920 1923   * If len == 0, do from off to EOF.
1921 1924   */
1922 1925  static int tmp_nopage = 0;      /* Don't do tmp_putpage's if set */
1923 1926  
1924 1927  /* ARGSUSED */
1925 1928  int
1926 1929  tmp_putpage(
1927 1930          register struct vnode *vp,
1928 1931          offset_t off,
1929 1932          size_t len,
1930 1933          int flags,
1931 1934          struct cred *cr,
1932 1935          caller_context_t *ct)
1933 1936  {
1934 1937          register page_t *pp;
1935 1938          u_offset_t io_off;
1936 1939          size_t io_len = 0;
1937 1940          int err = 0;
1938 1941          struct tmpnode *tp = VTOTN(vp);
1939 1942          int dolock;
1940 1943  
1941 1944          if (tmp_nopage)
1942 1945                  return (0);
1943 1946  
1944 1947          ASSERT(vp->v_count != 0);
1945 1948  
1946 1949          if (vp->v_flag & VNOMAP)
1947 1950                  return (ENOSYS);
1948 1951  
1949 1952          /*
1950 1953           * This being tmpfs, we don't ever do i/o unless we really
1951 1954           * have to (when we're low on memory and pageout calls us
1952 1955           * with B_ASYNC | B_FREE or the user explicitly asks for it with
1953 1956           * B_DONTNEED).
1954 1957           * XXX to approximately track the mod time like ufs we should
1955 1958           * update the times here. The problem is, once someone does a
1956 1959           * store we never clear the mod bit and do i/o, thus fsflush
1957 1960           * will keep calling us every 30 seconds to do the i/o and we'll
1958 1961           * continually update the mod time. At least we update the mod
1959 1962           * time on the first store because this results in a call to getpage.
1960 1963           */
1961 1964          if (flags != (B_ASYNC | B_FREE) && (flags & B_INVAL) == 0 &&
1962 1965              (flags & B_DONTNEED) == 0)
1963 1966                  return (0);
1964 1967          /*
1965 1968           * If this thread owns the lock, i.e., this thread grabbed it
1966 1969           * as writer somewhere above, then we don't need to grab the
1967 1970           * lock as reader in this routine.
1968 1971           */
1969 1972          dolock = (rw_owner(&tp->tn_contents) != curthread);
1970 1973  
1971 1974          /*
1972 1975           * If this is pageout don't block on the lock as you could deadlock
1973 1976           * when freemem == 0 (another thread has the read lock and is blocked
1974 1977           * creating a page, and a third thread is waiting to get the writers
1975 1978           * lock - waiting writers priority blocks us from getting the read
1976 1979           * lock). Of course, if the only freeable pages are on this tmpnode
1977 1980           * we're hosed anyways. A better solution might be a new lock type.
1978 1981           * Note: ufs has the same problem.
1979 1982           */
1980 1983          if (curproc == proc_pageout) {
1981 1984                  if (!rw_tryenter(&tp->tn_contents, RW_READER))
1982 1985                          return (ENOMEM);
1983 1986          } else if (dolock)
1984 1987                  rw_enter(&tp->tn_contents, RW_READER);
1985 1988  
1986 1989          if (!vn_has_cached_data(vp))
1987 1990                  goto out;
1988 1991  
1989 1992          if (len == 0) {
1990 1993                  if (curproc == proc_pageout) {
1991 1994                          panic("tmp: pageout can't block");
1992 1995                          /*NOTREACHED*/
1993 1996                  }
1994 1997  
1995 1998                  /* Search the entire vp list for pages >= off. */
1996 1999                  err = pvn_vplist_dirty(vp, (u_offset_t)off, tmp_putapage,
1997 2000                      flags, cr);
1998 2001          } else {
1999 2002                  u_offset_t eoff;
2000 2003  
2001 2004                  /*
2002 2005                   * Loop over all offsets in the range [off...off + len]
2003 2006                   * looking for pages to deal with.
2004 2007                   */
2005 2008                  eoff = MIN(off + len, tp->tn_size);
2006 2009                  for (io_off = off; io_off < eoff; io_off += io_len) {
2007 2010                          /*
2008 2011                           * If we are not invalidating, synchronously
2009 2012                           * freeing or writing pages use the routine
2010 2013                           * page_lookup_nowait() to prevent reclaiming
2011 2014                           * them from the free list.
2012 2015                           */
2013 2016                          if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
2014 2017                                  pp = page_lookup(vp, io_off,
2015 2018                                      (flags & (B_INVAL | B_FREE)) ?
2016 2019                                      SE_EXCL : SE_SHARED);
2017 2020                          } else {
2018 2021                                  pp = page_lookup_nowait(vp, io_off,
2019 2022                                      (flags & B_FREE) ? SE_EXCL : SE_SHARED);
2020 2023                          }
2021 2024  
2022 2025                          if (pp == NULL || pvn_getdirty(pp, flags) == 0)
2023 2026                                  io_len = PAGESIZE;
2024 2027                          else {
2025 2028                                  err = tmp_putapage(vp, pp, &io_off, &io_len,
2026 2029                                      flags, cr);
2027 2030                                  if (err != 0)
2028 2031                                          break;
2029 2032                          }
2030 2033                  }
2031 2034          }
2032 2035          /* If invalidating, verify all pages on vnode list are gone. */
2033 2036          if (err == 0 && off == 0 && len == 0 &&
2034 2037              (flags & B_INVAL) && vn_has_cached_data(vp)) {
2035 2038                  panic("tmp_putpage: B_INVAL, pages not gone");
2036 2039                  /*NOTREACHED*/
2037 2040          }
2038 2041  out:
2039 2042          if ((curproc == proc_pageout) || dolock)
2040 2043                  rw_exit(&tp->tn_contents);
2041 2044          /*
2042 2045           * Only reason putapage is going to give us SE_NOSWAP as error
2043 2046           * is when we ask a page to be written to physical backing store
2044 2047           * and there is none. Ignore this because we might be dealing
2045 2048           * with a swap page which does not have any backing store
2046 2049           * on disk. In any other case we won't get this error over here.
2047 2050           */
2048 2051          if (err == SE_NOSWAP)
2049 2052                  err = 0;
2050 2053          return (err);
2051 2054  }
2052 2055  
2053 2056  long tmp_putpagecnt, tmp_pagespushed;
2054 2057  
2055 2058  /*
2056 2059   * Write out a single page.
2057 2060   * For tmpfs this means choose a physical swap slot and write the page
2058 2061   * out using VOP_PAGEIO. For performance, we attempt to kluster; i.e.,
2059 2062   * we try to find a bunch of other dirty pages adjacent in the file
2060 2063   * and a bunch of contiguous swap slots, and then write all the pages
2061 2064   * out in a single i/o.
2062 2065   */
2063 2066  /*ARGSUSED*/
2064 2067  static int
2065 2068  tmp_putapage(
2066 2069          struct vnode *vp,
2067 2070          page_t *pp,
2068 2071          u_offset_t *offp,
2069 2072          size_t *lenp,
2070 2073          int flags,
2071 2074          struct cred *cr)
2072 2075  {
2073 2076          int err;
2074 2077          ulong_t klstart, kllen;
2075 2078          page_t *pplist, *npplist;
2076 2079          extern int klustsize;
2077 2080          long tmp_klustsize;
2078 2081          struct tmpnode *tp;
2079 2082          size_t pp_off, pp_len;
2080 2083          u_offset_t io_off;
2081 2084          size_t io_len;
2082 2085          struct vnode *pvp;
2083 2086          u_offset_t pstart;
2084 2087          u_offset_t offset;
2085 2088          u_offset_t tmpoff;
2086 2089  
2087 2090          ASSERT(PAGE_LOCKED(pp));
2088 2091  
2089 2092          /* Kluster in tmp_klustsize chunks */
2090 2093          tp = VTOTN(vp);
2091 2094          tmp_klustsize = klustsize;
2092 2095          offset = pp->p_offset;
2093 2096          klstart = (offset / tmp_klustsize) * tmp_klustsize;
2094 2097          kllen = MIN(tmp_klustsize, tp->tn_size - klstart);
2095 2098  
2096 2099          /* Get a kluster of pages */
2097 2100          pplist =
2098 2101              pvn_write_kluster(vp, pp, &tmpoff, &pp_len, klstart, kllen, flags);
2099 2102  
2100 2103          pp_off = (size_t)tmpoff;
2101 2104  
2102 2105          /*
2103 2106           * Get a cluster of physical offsets for the pages; the amount we
2104 2107           * get may be some subrange of what we ask for (io_off, io_len).
2105 2108           */
2106 2109          io_off = pp_off;
2107 2110          io_len = pp_len;
2108 2111          err = swap_newphysname(vp, offset, &io_off, &io_len, &pvp, &pstart);
2109 2112          ASSERT(err != SE_NOANON); /* anon slot must have been filled */
2110 2113          if (err) {
2111 2114                  pvn_write_done(pplist, B_ERROR | B_WRITE | flags);
2112 2115                  /*
2113 2116                   * If this routine is called as a result of segvn_sync
2114 2117                   * operation and we have no physical swap then we can get an
2115 2118                   * error here. In such case we would return SE_NOSWAP as error.
2116 2119                   * At this point, we expect only SE_NOSWAP.
2117 2120                   */
2118 2121                  ASSERT(err == SE_NOSWAP);
2119 2122                  if (flags & B_INVAL)
2120 2123                          err = ENOMEM;
2121 2124                  goto out;
2122 2125          }
2123 2126          ASSERT(pp_off <= io_off && io_off + io_len <= pp_off + pp_len);
2124 2127          ASSERT(io_off <= offset && offset < io_off + io_len);
2125 2128  
2126 2129          /* Toss pages at front/rear that we couldn't get physical backing for */
2127 2130          if (io_off != pp_off) {
2128 2131                  npplist = NULL;
2129 2132                  page_list_break(&pplist, &npplist, btop(io_off - pp_off));
2130 2133                  ASSERT(pplist->p_offset == pp_off);
2131 2134                  ASSERT(pplist->p_prev->p_offset == io_off - PAGESIZE);
2132 2135                  pvn_write_done(pplist, B_ERROR | B_WRITE | flags);
2133 2136                  pplist = npplist;
2134 2137          }
2135 2138          if (io_off + io_len < pp_off + pp_len) {
2136 2139                  npplist = NULL;
2137 2140                  page_list_break(&pplist, &npplist, btop(io_len));
2138 2141                  ASSERT(npplist->p_offset == io_off + io_len);
2139 2142                  ASSERT(npplist->p_prev->p_offset == pp_off + pp_len - PAGESIZE);
2140 2143                  pvn_write_done(npplist, B_ERROR | B_WRITE | flags);
2141 2144          }
2142 2145  
2143 2146          ASSERT(pplist->p_offset == io_off);
2144 2147          ASSERT(pplist->p_prev->p_offset == io_off + io_len - PAGESIZE);
2145 2148          ASSERT(btopr(io_len) <= btopr(kllen));
2146 2149  
2147 2150          /* Do i/o on the remaining kluster */
2148 2151          err = VOP_PAGEIO(pvp, pplist, (u_offset_t)pstart, io_len,
2149 2152              B_WRITE | flags, cr, NULL);
2150 2153  
2151 2154          if ((flags & B_ASYNC) == 0) {
2152 2155                  pvn_write_done(pplist, ((err) ? B_ERROR : 0) | B_WRITE | flags);
2153 2156          }
2154 2157  out:
2155 2158          if (!err) {
2156 2159                  if (offp)
2157 2160                          *offp = io_off;
2158 2161                  if (lenp)
2159 2162                          *lenp = io_len;
2160 2163                  tmp_putpagecnt++;
2161 2164                  tmp_pagespushed += btop(io_len);
2162 2165          }
2163 2166          if (err && err != ENOMEM && err != SE_NOSWAP)
2164 2167                  cmn_err(CE_WARN, "tmp_putapage: err %d\n", err);
2165 2168          return (err);
2166 2169  }
2167 2170  
2168 2171  /* ARGSUSED */
2169 2172  static int
2170 2173  tmp_map(
2171 2174          struct vnode *vp,
2172 2175          offset_t off,
2173 2176          struct as *as,
2174 2177          caddr_t *addrp,
2175 2178          size_t len,
2176 2179          uchar_t prot,
2177 2180          uchar_t maxprot,
2178 2181          uint_t flags,
2179 2182          struct cred *cred,
2180 2183          caller_context_t *ct)
2181 2184  {
2182 2185          struct segvn_crargs vn_a;
2183 2186          struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
2184 2187          int error;
2185 2188  
2186 2189  #ifdef _ILP32
2187 2190          if (len > MAXOFF_T)
2188 2191                  return (ENOMEM);
2189 2192  #endif
2190 2193  
2191 2194          if (vp->v_flag & VNOMAP)
2192 2195                  return (ENOSYS);
2193 2196  
2194 2197          if (off < 0 || (offset_t)(off + len) < 0 ||
2195 2198              off > MAXOFF_T || (off + len) > MAXOFF_T)
2196 2199                  return (ENXIO);
2197 2200  
2198 2201          if (vp->v_type != VREG)
2199 2202                  return (ENODEV);
2200 2203  
2201 2204          /*
2202 2205           * Don't allow mapping to locked file
2203 2206           */
2204 2207          if (vn_has_mandatory_locks(vp, tp->tn_mode)) {
2205 2208                  return (EAGAIN);
2206 2209          }
2207 2210  
2208 2211          as_rangelock(as);
2209 2212          error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
2210 2213          if (error != 0) {
2211 2214                  as_rangeunlock(as);
2212 2215                  return (error);
2213 2216          }
2214 2217  
2215 2218          vn_a.vp = vp;
2216 2219          vn_a.offset = (u_offset_t)off;
2217 2220          vn_a.type = flags & MAP_TYPE;
2218 2221          vn_a.prot = prot;
2219 2222          vn_a.maxprot = maxprot;
2220 2223          vn_a.flags = flags & ~MAP_TYPE;
2221 2224          vn_a.cred = cred;
2222 2225          vn_a.amp = NULL;
2223 2226          vn_a.szc = 0;
2224 2227          vn_a.lgrp_mem_policy_flags = 0;
2225 2228  
2226 2229          error = as_map(as, *addrp, len, segvn_create, &vn_a);
2227 2230          as_rangeunlock(as);
2228 2231          return (error);
2229 2232  }
2230 2233  
2231 2234  /*
2232 2235   * tmp_addmap and tmp_delmap can't be called since the vp
2233 2236   * maintained in the segvn mapping is NULL.
2234 2237   */
2235 2238  /* ARGSUSED */
2236 2239  static int
2237 2240  tmp_addmap(
2238 2241          struct vnode *vp,
2239 2242          offset_t off,
2240 2243          struct as *as,
2241 2244          caddr_t addr,
2242 2245          size_t len,
2243 2246          uchar_t prot,
2244 2247          uchar_t maxprot,
2245 2248          uint_t flags,
2246 2249          struct cred *cred,
2247 2250          caller_context_t *ct)
2248 2251  {
2249 2252          return (0);
2250 2253  }
2251 2254  
2252 2255  /* ARGSUSED */
2253 2256  static int
2254 2257  tmp_delmap(
2255 2258          struct vnode *vp,
2256 2259          offset_t off,
2257 2260          struct as *as,
2258 2261          caddr_t addr,
2259 2262          size_t len,
2260 2263          uint_t prot,
2261 2264          uint_t maxprot,
2262 2265          uint_t flags,
2263 2266          struct cred *cred,
2264 2267          caller_context_t *ct)
2265 2268  {
2266 2269          return (0);
2267 2270  }
2268 2271  
2269 2272  static int
2270 2273  tmp_freesp(struct vnode *vp, struct flock64 *lp, int flag)
2271 2274  {
2272 2275          register int i;
2273 2276          register struct tmpnode *tp = VTOTN(vp);
2274 2277          int error;
2275 2278  
2276 2279          ASSERT(vp->v_type == VREG);
2277 2280          ASSERT(lp->l_start >= 0);
2278 2281  
2279 2282          if (lp->l_len != 0)
2280 2283                  return (EINVAL);
2281 2284  
2282 2285          rw_enter(&tp->tn_rwlock, RW_WRITER);
2283 2286          if (tp->tn_size == lp->l_start) {
2284 2287                  rw_exit(&tp->tn_rwlock);
2285 2288                  return (0);
2286 2289          }
2287 2290  
2288 2291          /*
2289 2292           * Check for any mandatory locks on the range
2290 2293           */
2291 2294          if (MANDLOCK(vp, tp->tn_mode)) {
2292 2295                  long save_start;
2293 2296  
2294 2297                  save_start = lp->l_start;
2295 2298  
2296 2299                  if (tp->tn_size < lp->l_start) {
2297 2300                          /*
2298 2301                           * "Truncate up" case: need to make sure there
2299 2302                           * is no lock beyond current end-of-file. To
2300 2303                           * do so, we need to set l_start to the size
2301 2304                           * of the file temporarily.
2302 2305                           */
2303 2306                          lp->l_start = tp->tn_size;
2304 2307                  }
2305 2308                  lp->l_type = F_WRLCK;
2306 2309                  lp->l_sysid = 0;
2307 2310                  lp->l_pid = ttoproc(curthread)->p_pid;
2308 2311                  i = (flag & (FNDELAY|FNONBLOCK)) ? 0 : SLPFLCK;
2309 2312                  if ((i = reclock(vp, lp, i, 0, lp->l_start, NULL)) != 0 ||
2310 2313                      lp->l_type != F_UNLCK) {
2311 2314                          rw_exit(&tp->tn_rwlock);
2312 2315                          return (i ? i : EAGAIN);
2313 2316                  }
2314 2317  
2315 2318                  lp->l_start = save_start;
2316 2319          }
2317 2320          VFSTOTM(vp->v_vfsp);
2318 2321  
2319 2322          rw_enter(&tp->tn_contents, RW_WRITER);
2320 2323          error = tmpnode_trunc((struct tmount *)VFSTOTM(vp->v_vfsp),
2321 2324              tp, (ulong_t)lp->l_start);
2322 2325          rw_exit(&tp->tn_contents);
2323 2326          rw_exit(&tp->tn_rwlock);
2324 2327          return (error);
2325 2328  }
2326 2329  
2327 2330  /* ARGSUSED */
2328 2331  static int
2329 2332  tmp_space(
2330 2333          struct vnode *vp,
2331 2334          int cmd,
2332 2335          struct flock64 *bfp,
2333 2336          int flag,
2334 2337          offset_t offset,
2335 2338          cred_t *cred,
2336 2339          caller_context_t *ct)
  
    | 
      ↓ open down ↓ | 
    1016 lines elided | 
    
      ↑ open up ↑ | 
  
2337 2340  {
2338 2341          int error;
2339 2342  
2340 2343          if (cmd != F_FREESP)
2341 2344                  return (EINVAL);
2342 2345          if ((error = convoff(vp, bfp, 0, (offset_t)offset)) == 0) {
2343 2346                  if ((bfp->l_start > MAXOFF_T) || (bfp->l_len > MAXOFF_T))
2344 2347                          return (EFBIG);
2345 2348                  error = tmp_freesp(vp, bfp, flag);
2346 2349  
2347      -                if (error == 0 && bfp->l_start == 0)
2348      -                        vnevent_truncate(vp, ct);
     2350 +                if (error == 0) {
     2351 +                        if (bfp->l_start == 0) {
     2352 +                                vnevent_truncate(vp, ct);
     2353 +                        } else {
     2354 +                                vnevent_resize(vp, ct);
     2355 +                        }
     2356 +                }
2349 2357          }
2350 2358          return (error);
2351 2359  }
2352 2360  
2353 2361  /* ARGSUSED */
2354 2362  static int
2355 2363  tmp_seek(
2356 2364          struct vnode *vp,
2357 2365          offset_t ooff,
2358 2366          offset_t *noffp,
2359 2367          caller_context_t *ct)
2360 2368  {
2361 2369          return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
2362 2370  }
2363 2371  
2364 2372  /* ARGSUSED2 */
2365 2373  static int
2366 2374  tmp_rwlock(struct vnode *vp, int write_lock, caller_context_t *ctp)
2367 2375  {
2368 2376          struct tmpnode *tp = VTOTN(vp);
2369 2377  
2370 2378          if (write_lock) {
2371 2379                  rw_enter(&tp->tn_rwlock, RW_WRITER);
2372 2380          } else {
2373 2381                  rw_enter(&tp->tn_rwlock, RW_READER);
2374 2382          }
2375 2383          return (write_lock);
2376 2384  }
2377 2385  
2378 2386  /* ARGSUSED1 */
2379 2387  static void
2380 2388  tmp_rwunlock(struct vnode *vp, int write_lock, caller_context_t *ctp)
2381 2389  {
2382 2390          struct tmpnode *tp = VTOTN(vp);
2383 2391  
2384 2392          rw_exit(&tp->tn_rwlock);
2385 2393  }
2386 2394  
2387 2395  static int
2388 2396  tmp_pathconf(
2389 2397          struct vnode *vp,
2390 2398          int cmd,
2391 2399          ulong_t *valp,
2392 2400          cred_t *cr,
2393 2401          caller_context_t *ct)
2394 2402  {
2395 2403          struct tmpnode *tp = NULL;
2396 2404          int error;
2397 2405  
2398 2406          switch (cmd) {
2399 2407          case _PC_XATTR_EXISTS:
2400 2408                  if (vp->v_vfsp->vfs_flag & VFS_XATTR) {
2401 2409                          *valp = 0;      /* assume no attributes */
2402 2410                          error = 0;      /* okay to ask */
2403 2411                          tp = VTOTN(vp);
2404 2412                          rw_enter(&tp->tn_rwlock, RW_READER);
2405 2413                          if (tp->tn_xattrdp) {
2406 2414                                  rw_enter(&tp->tn_xattrdp->tn_rwlock, RW_READER);
2407 2415                                  /* do not count "." and ".." */
2408 2416                                  if (tp->tn_xattrdp->tn_dirents > 2)
2409 2417                                          *valp = 1;
2410 2418                                  rw_exit(&tp->tn_xattrdp->tn_rwlock);
2411 2419                          }
2412 2420                          rw_exit(&tp->tn_rwlock);
2413 2421                  } else {
2414 2422                          error = EINVAL;
2415 2423                  }
2416 2424                  break;
2417 2425          case _PC_SATTR_ENABLED:
2418 2426          case _PC_SATTR_EXISTS:
2419 2427                  *valp = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
2420 2428                      (vp->v_type == VREG || vp->v_type == VDIR);
2421 2429                  error = 0;
2422 2430                  break;
2423 2431          case _PC_TIMESTAMP_RESOLUTION:
2424 2432                  /* nanosecond timestamp resolution */
2425 2433                  *valp = 1L;
2426 2434                  error = 0;
2427 2435                  break;
2428 2436          default:
2429 2437                  error = fs_pathconf(vp, cmd, valp, cr, ct);
2430 2438          }
2431 2439          return (error);
2432 2440  }
2433 2441  
2434 2442  
2435 2443  struct vnodeops *tmp_vnodeops;
2436 2444  
2437 2445  const fs_operation_def_t tmp_vnodeops_template[] = {
2438 2446          VOPNAME_OPEN,           { .vop_open = tmp_open },
2439 2447          VOPNAME_CLOSE,          { .vop_close = tmp_close },
2440 2448          VOPNAME_READ,           { .vop_read = tmp_read },
2441 2449          VOPNAME_WRITE,          { .vop_write = tmp_write },
2442 2450          VOPNAME_IOCTL,          { .vop_ioctl = tmp_ioctl },
2443 2451          VOPNAME_GETATTR,        { .vop_getattr = tmp_getattr },
2444 2452          VOPNAME_SETATTR,        { .vop_setattr = tmp_setattr },
2445 2453          VOPNAME_ACCESS,         { .vop_access = tmp_access },
2446 2454          VOPNAME_LOOKUP,         { .vop_lookup = tmp_lookup },
2447 2455          VOPNAME_CREATE,         { .vop_create = tmp_create },
2448 2456          VOPNAME_REMOVE,         { .vop_remove = tmp_remove },
2449 2457          VOPNAME_LINK,           { .vop_link = tmp_link },
2450 2458          VOPNAME_RENAME,         { .vop_rename = tmp_rename },
2451 2459          VOPNAME_MKDIR,          { .vop_mkdir = tmp_mkdir },
2452 2460          VOPNAME_RMDIR,          { .vop_rmdir = tmp_rmdir },
2453 2461          VOPNAME_READDIR,        { .vop_readdir = tmp_readdir },
2454 2462          VOPNAME_SYMLINK,        { .vop_symlink = tmp_symlink },
2455 2463          VOPNAME_READLINK,       { .vop_readlink = tmp_readlink },
2456 2464          VOPNAME_FSYNC,          { .vop_fsync = tmp_fsync },
2457 2465          VOPNAME_INACTIVE,       { .vop_inactive = tmp_inactive },
2458 2466          VOPNAME_FID,            { .vop_fid = tmp_fid },
2459 2467          VOPNAME_RWLOCK,         { .vop_rwlock = tmp_rwlock },
2460 2468          VOPNAME_RWUNLOCK,       { .vop_rwunlock = tmp_rwunlock },
2461 2469          VOPNAME_SEEK,           { .vop_seek = tmp_seek },
2462 2470          VOPNAME_SPACE,          { .vop_space = tmp_space },
2463 2471          VOPNAME_GETPAGE,        { .vop_getpage = tmp_getpage },
2464 2472          VOPNAME_PUTPAGE,        { .vop_putpage = tmp_putpage },
2465 2473          VOPNAME_MAP,            { .vop_map = tmp_map },
2466 2474          VOPNAME_ADDMAP,         { .vop_addmap = tmp_addmap },
2467 2475          VOPNAME_DELMAP,         { .vop_delmap = tmp_delmap },
2468 2476          VOPNAME_PATHCONF,       { .vop_pathconf = tmp_pathconf },
2469 2477          VOPNAME_VNEVENT,        { .vop_vnevent = fs_vnevent_support },
2470 2478          NULL,                   NULL
2471 2479  };
  
    | 
      ↓ open down ↓ | 
    113 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX