Print this page
11083 support NFS server in zone
Portions contributed by: Dan Kruchinin <dan.kruchinin@nexenta.com>
Portions contributed by: Stepan Zastupov <stepan.zastupov@gmail.com>
Portions contributed by: Joyce McIntosh <joyce.mcintosh@nexenta.com>
Portions contributed by: Mike Zeller <mike@mikezeller.net>
Portions contributed by: Dan McDonald <danmcd@joyent.com>
Portions contributed by: Gordon Ross <gordon.w.ross@gmail.com>
Portions contributed by: Vitaliy Gusev <gusev.vitaliy@gmail.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
Reviewed by: Rob Gittins <rob.gittins@nexenta.com>
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
Reviewed by: Jason King <jbk@joyent.com>
Reviewed by: C Fraire <cfraire@me.com>
Change-Id: I22f289d357503f9b48a0bc2482cc4328a6d43d16

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/fs/sharefs/sharefs_vnops.c
          +++ new/usr/src/uts/common/fs/sharefs/sharefs_vnops.c
↓ open down ↓ 16 lines elided ↑ open up ↑
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  
  22   22  /*
  23   23   * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
  24   24   * Use is subject to license terms.
  25   25   */
  26   26  
       27 +/*
       28 + * Copyright 2018 Nexenta Systems, Inc.
       29 + */
       30 +
  27   31  #include <fs/fs_subr.h>
  28   32  
  29   33  #include <sys/errno.h>
  30   34  #include <sys/file.h>
  31   35  #include <sys/kmem.h>
  32   36  #include <sys/kobj.h>
  33   37  #include <sys/cmn_err.h>
  34   38  #include <sys/stat.h>
  35   39  #include <sys/systm.h>
  36   40  #include <sys/sysmacros.h>
↓ open down ↓ 1 lines elided ↑ open up ↑
  38   42  #include <sys/vfs.h>
  39   43  #include <sys/vfs_opreg.h>
  40   44  
  41   45  #include <sharefs/sharefs.h>
  42   46  
  43   47  /*
  44   48   * sharefs_snap_create: create a large character buffer with
  45   49   * the shares enumerated.
  46   50   */
  47   51  static int
  48      -sharefs_snap_create(shnode_t *sft)
       52 +sharefs_snap_create(sharetab_globals_t *sg, shnode_t *sft)
  49   53  {
  50   54          sharetab_t              *sht;
  51   55          share_t                 *sh;
  52   56          size_t                  sWritten = 0;
  53   57          int                     iCount = 0;
  54   58          char                    *buf;
  55   59  
  56      -        rw_enter(&sharefs_lock, RW_WRITER);
  57      -        rw_enter(&sharetab_lock, RW_READER);
       60 +        rw_enter(&sg->sharefs_lock, RW_WRITER);
       61 +        rw_enter(&sg->sharetab_lock, RW_READER);
  58   62  
  59   63          if (sft->sharefs_snap) {
  60   64                  /*
  61   65                   * Nothing has changed, so no need to grab a new copy!
  62   66                   */
  63      -                if (sft->sharefs_generation == sharetab_generation) {
  64      -                        rw_exit(&sharetab_lock);
  65      -                        rw_exit(&sharefs_lock);
       67 +                if (sft->sharefs_generation == sg->sharetab_generation) {
       68 +                        rw_exit(&sg->sharetab_lock);
       69 +                        rw_exit(&sg->sharefs_lock);
  66   70                          return (0);
  67   71                  }
  68   72  
  69   73                  ASSERT(sft->sharefs_size != 0);
  70   74                  kmem_free(sft->sharefs_snap, sft->sharefs_size + 1);
  71   75                  sft->sharefs_snap = NULL;
  72   76          }
  73   77  
  74      -        sft->sharefs_size = sharetab_size;
  75      -        sft->sharefs_count = sharetab_count;
       78 +        sft->sharefs_size = sg->sharetab_size;
       79 +        sft->sharefs_count = sg->sharetab_count;
  76   80  
  77   81          if (sft->sharefs_size == 0) {
  78      -                rw_exit(&sharetab_lock);
  79      -                rw_exit(&sharefs_lock);
       82 +                rw_exit(&sg->sharetab_lock);
       83 +                rw_exit(&sg->sharefs_lock);
  80   84                  return (0);
  81   85          }
  82   86  
  83   87          sft->sharefs_snap = kmem_zalloc(sft->sharefs_size + 1, KM_SLEEP);
  84   88  
  85   89          buf = sft->sharefs_snap;
  86   90  
  87   91          /*
  88   92           * Walk the Sharetab, dumping each entry.
  89   93           */
  90      -        for (sht = sharefs_sharetab; sht != NULL; sht = sht->s_next) {
       94 +        for (sht = sg->sharefs_sharetab; sht != NULL; sht = sht->s_next) {
  91   95                  int     i;
  92   96  
  93   97                  for (i = 0; i < SHARETAB_HASHES; i++) {
  94   98                          for (sh = sht->s_buckets[i].ssh_sh;
  95   99                              sh != NULL;
  96  100                              sh = sh->sh_next) {
  97  101                                  int     n;
  98  102  
  99  103                                  if ((sWritten + sh->sh_size) >
 100  104                                      sft->sharefs_size) {
↓ open down ↓ 24 lines elided ↑ open up ↑
 125  129                                  sWritten += n;
 126  130                                  iCount++;
 127  131                          }
 128  132                  }
 129  133          }
 130  134  
 131  135          /*
 132  136           * We want to record the generation number and
 133  137           * mtime inside this snapshot.
 134  138           */
 135      -        gethrestime(&sharetab_snap_time);
 136      -        sft->sharefs_snap_time = sharetab_snap_time;
 137      -        sft->sharefs_generation = sharetab_generation;
      139 +        gethrestime(&sg->sharetab_snap_time);
      140 +        sft->sharefs_snap_time = sg->sharetab_snap_time;
      141 +        sft->sharefs_generation = sg->sharetab_generation;
 138  142  
 139  143          ASSERT(iCount == sft->sharefs_count);
 140  144  
 141      -        rw_exit(&sharetab_lock);
 142      -        rw_exit(&sharefs_lock);
      145 +        rw_exit(&sg->sharetab_lock);
      146 +        rw_exit(&sg->sharefs_lock);
 143  147          return (0);
 144  148  
 145  149  error_fault:
 146  150  
 147  151          kmem_free(sft->sharefs_snap, sft->sharefs_size + 1);
 148  152          sft->sharefs_size = 0;
 149  153          sft->sharefs_count = 0;
 150  154          sft->sharefs_snap = NULL;
 151      -        rw_exit(&sharetab_lock);
 152      -        rw_exit(&sharefs_lock);
      155 +        rw_exit(&sg->sharetab_lock);
      156 +        rw_exit(&sg->sharefs_lock);
 153  157  
 154  158          return (EFAULT);
 155  159  }
 156  160  
 157  161  /* ARGSUSED */
 158  162  static int
 159  163  sharefs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
 160  164      caller_context_t *ct)
 161  165  {
 162  166          timestruc_t     now;
 163  167          shnode_t        *sft = VTOSH(vp);
      168 +        sharetab_globals_t *sg = sharetab_get_globals(vp->v_vfsp->vfs_zone);
 164  169  
 165  170          vap->va_type = VREG;
 166  171          vap->va_mode = S_IRUSR | S_IRGRP | S_IROTH;
 167  172          vap->va_nodeid = SHAREFS_INO_FILE;
 168  173          vap->va_nlink = 1;
 169  174  
 170      -        rw_enter(&sharefs_lock, RW_READER);
      175 +        rw_enter(&sg->sharefs_lock, RW_READER);
 171  176  
 172  177          /*
 173  178           * If we get asked about a snapped vnode, then
 174  179           * we must report the data in that vnode.
 175  180           *
 176  181           * Else we report what is currently in the
 177  182           * sharetab.
 178  183           */
 179  184          if (sft->sharefs_real_vp) {
 180      -                rw_enter(&sharetab_lock, RW_READER);
 181      -                vap->va_size = sharetab_size;
 182      -                vap->va_mtime = sharetab_mtime;
 183      -                rw_exit(&sharetab_lock);
      185 +                rw_enter(&sg->sharetab_lock, RW_READER);
      186 +                vap->va_size = sg->sharetab_size;
      187 +                vap->va_mtime = sg->sharetab_mtime;
      188 +                rw_exit(&sg->sharetab_lock);
 184  189          } else {
 185  190                  vap->va_size = sft->sharefs_size;
 186  191                  vap->va_mtime = sft->sharefs_snap_time;
 187  192          }
 188      -        rw_exit(&sharefs_lock);
      193 +        rw_exit(&sg->sharefs_lock);
 189  194  
 190  195          gethrestime(&now);
 191  196          vap->va_atime = vap->va_ctime = now;
 192  197  
 193  198          vap->va_uid = 0;
 194  199          vap->va_gid = 0;
 195  200          vap->va_rdev = 0;
 196  201          vap->va_blksize = DEV_BSIZE;
 197  202          vap->va_nblocks = howmany(vap->va_size, vap->va_blksize);
 198  203          vap->va_seq = 0;
↓ open down ↓ 53 lines elided ↑ open up ↑
 252  257           * this data structure.
 253  258           */
 254  259          atomic_inc_32(&sft->sharefs_refs);
 255  260          sft->sharefs_real_vp = 0;
 256  261  
 257  262          /*
 258  263           * Since the sharetab could easily change on us whilst we
 259  264           * are dumping an extremely huge sharetab, we make a copy
 260  265           * of it here and use it to dump instead.
 261  266           */
 262      -        error = sharefs_snap_create(sft);
      267 +        error = sharefs_snap_create(sharetab_get_globals(vp->v_vfsp->vfs_zone),
      268 +            sft);
 263  269  
 264  270          return (error);
 265  271  }
 266  272  
 267  273  /* ARGSUSED */
 268  274  int
 269  275  sharefs_close(vnode_t *vp, int flag, int count,
 270  276      offset_t off, cred_t *cr, caller_context_t *ct)
 271  277  {
 272  278          shnode_t        *sft = VTOSH(vp);
      279 +        sharetab_globals_t *sg = sharetab_get_globals(vp->v_vfsp->vfs_zone);
 273  280  
 274  281          if (count > 1)
 275  282                  return (0);
 276  283  
 277      -        rw_enter(&sharefs_lock, RW_WRITER);
      284 +        rw_enter(&sg->sharefs_lock, RW_WRITER);
 278  285          if (vp->v_count == 1) {
 279  286                  if (sft->sharefs_snap != NULL) {
 280  287                          kmem_free(sft->sharefs_snap, sft->sharefs_size + 1);
 281  288                          sft->sharefs_size = 0;
 282  289                          sft->sharefs_snap = NULL;
 283  290                          sft->sharefs_generation = 0;
 284  291                  }
 285  292          }
 286  293          atomic_dec_32(&sft->sharefs_refs);
 287      -        rw_exit(&sharefs_lock);
      294 +        rw_exit(&sg->sharefs_lock);
 288  295  
 289  296          return (0);
 290  297  }
 291  298  
 292  299  /* ARGSUSED */
 293  300  static int
 294  301  sharefs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr,
 295      -                        caller_context_t *ct)
      302 +    caller_context_t *ct)
 296  303  {
 297  304          shnode_t        *sft = VTOSH(vp);
 298  305          off_t           off = uio->uio_offset;
 299  306          size_t          len = uio->uio_resid;
 300  307          int             error = 0;
      308 +        sharetab_globals_t *sg = sharetab_get_globals(vp->v_vfsp->vfs_zone);
 301  309  
 302      -        rw_enter(&sharefs_lock, RW_READER);
      310 +        rw_enter(&sg->sharefs_lock, RW_READER);
 303  311  
 304  312          /*
 305  313           * First check to see if we need to grab a new snapshot.
 306  314           */
 307  315          if (off == (off_t)0) {
 308      -                rw_exit(&sharefs_lock);
 309      -                error = sharefs_snap_create(sft);
      316 +                rw_exit(&sg->sharefs_lock);
      317 +                error = sharefs_snap_create(sg, sft);
 310  318                  if (error) {
 311  319                          return (EFAULT);
 312  320                  }
 313      -                rw_enter(&sharefs_lock, RW_READER);
      321 +                rw_enter(&sg->sharefs_lock, RW_READER);
 314  322          }
 315  323  
 316  324          /* LINTED */
 317  325          if (len <= 0 || off >= sft->sharefs_size) {
 318      -                rw_exit(&sharefs_lock);
      326 +                rw_exit(&sg->sharefs_lock);
 319  327                  return (error);
 320  328          }
 321  329  
 322  330          if ((size_t)(off + len) > sft->sharefs_size)
 323  331                  len = sft->sharefs_size - off;
 324  332  
 325  333          if (off < 0 || len > sft->sharefs_size) {
 326      -                rw_exit(&sharefs_lock);
      334 +                rw_exit(&sg->sharefs_lock);
 327  335                  return (EFAULT);
 328  336          }
 329  337  
 330  338          if (len != 0) {
 331  339                  error = uiomove(sft->sharefs_snap + off,
 332  340                      len, UIO_READ, uio);
 333  341          }
 334  342  
 335      -        rw_exit(&sharefs_lock);
      343 +        rw_exit(&sg->sharefs_lock);
 336  344          return (error);
 337  345  }
 338  346  
 339  347  /* ARGSUSED */
 340  348  static void
 341  349  sharefs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *tx)
 342  350  {
 343  351          gfs_file_t      *fp = vp->v_data;
 344  352          shnode_t        *sft;
      353 +        sharetab_globals_t *sg = sharetab_get_globals(vp->v_vfsp->vfs_zone);
 345  354  
 346  355          sft = (shnode_t *)gfs_file_inactive(vp);
 347  356          if (sft) {
 348      -                rw_enter(&sharefs_lock, RW_WRITER);
      357 +                rw_enter(&sg->sharefs_lock, RW_WRITER);
 349  358                  if (sft->sharefs_snap != NULL) {
 350  359                          kmem_free(sft->sharefs_snap, sft->sharefs_size + 1);
 351  360                  }
 352  361  
 353  362                  kmem_free(sft, fp->gfs_size);
 354      -                rw_exit(&sharefs_lock);
      363 +                rw_exit(&sg->sharefs_lock);
 355  364          }
 356  365  }
 357  366  
 358  367  vnode_t *
 359  368  sharefs_create_root_file(vfs_t *vfsp)
 360  369  {
 361  370          vnode_t         *vp;
 362  371          shnode_t        *sft;
 363  372  
 364  373          vp = gfs_root_create_file(sizeof (shnode_t),
↓ open down ↓ 20 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX