Print this page
NEX-15279 support NFS server in zone
NEX-15520 online NFS shares cause zoneadm halt to hang in nfs_export_zone_fini
Portions contributed by: Dan Kruchinin dan.kruchinin@nexenta.com
Portions contributed by: Stepan Zastupov stepan.zastupov@gmail.com
Reviewed by: Joyce McIntosh <joyce.mcintosh@nexenta.com>
Reviewed by: Rob Gittins <rob.gittins@nexenta.com>
Reviewed by: Gordon Ross <gordon.ross@nexenta.com>
NEX-9275 Got "bad mutex" panic when run IO to nfs share from clients
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
Reviewed by: Rob Gittins <rob.gittins@nexenta.com>
NEX-5667 nfssrv_stats_flags does not work for aggregated kstats
NEX-4472 nfsauth_retrieve() flood caused by NFS clients with personal identity problems
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
NEX-2345 nfsauth_cache_get() could spend a lot of time walking exi_cache
Reviewed by: Gordon Ross <gordon.ross@nexenta.com>
NEX-2972 bad free at checkauth+0x1a2()
Reviewed by: Jan Kryl <jan.kryl@nexenta.com>
Reviewed by: Josef 'Jeff' Sipek <josef.sipek@nexenta.com>
NEX-2949 Panic due to bad mutex, from auth_cache being previously freed
NEX-1974 Support for more than 16 groups with AUTH_SYS
Reviewed by: Gordon Ross <gordon.ross@nexenta.com>
Reviewed by: Josef 'Jeff' Sipek <josef.sipek@nexenta.com>
NEX-1128 NFS server: Generic uid and gid remapping for AUTH_SYS
Reviewed by: Jan Kryl <jan.kryl@nexenta.com>
OS-152 NFS extremely slow in nfsauth
Reviewed by: Jan Kryl <jan.kryl@nexenta.com>
Reviewed by: Ilya Usvyatsky <ilya.usvyatsky@nexenta.com>
Reviewed by: Josef Sipek <josef.sipek@nexenta.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/fs/nfs/nfs_auth.c
          +++ new/usr/src/uts/common/fs/nfs/nfs_auth.c
↓ open down ↓ 12 lines elided ↑ open up ↑
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  
  22   22  /*
  23      - * Copyright 2016 Nexenta Systems, Inc.  All rights reserved.
  24   23   * Copyright (c) 1995, 2010, Oracle and/or its affiliates. All rights reserved.
       24 + */
       25 +
       26 +/*
       27 + * Copyright 2018 Nexenta Systems, Inc.
  25   28   * Copyright (c) 2015 by Delphix. All rights reserved.
  26   29   */
  27   30  
  28   31  #include <sys/param.h>
  29   32  #include <sys/errno.h>
  30   33  #include <sys/vfs.h>
  31   34  #include <sys/vnode.h>
  32   35  #include <sys/cred.h>
  33   36  #include <sys/cmn_err.h>
  34   37  #include <sys/systm.h>
↓ open down ↓ 11 lines elided ↑ open up ↑
  46   49  #include <rpc/clnt.h>
  47   50  
  48   51  #include <nfs/nfs.h>
  49   52  #include <nfs/export.h>
  50   53  #include <nfs/nfs_clnt.h>
  51   54  #include <nfs/auth.h>
  52   55  
  53   56  static struct kmem_cache *exi_cache_handle;
  54   57  static void exi_cache_reclaim(void *);
  55   58  static void exi_cache_trim(struct exportinfo *exi);
       59 +static void *nfsauth_zone_init(zoneid_t);
       60 +static void nfsauth_zone_shutdown(zoneid_t zoneid, void *data);
       61 +static void nfsauth_zone_fini(zoneid_t, void *);
  56   62  
  57   63  extern pri_t minclsyspri;
  58   64  
       65 +/* NFS auth cache statistics */
  59   66  volatile uint_t nfsauth_cache_hit;
  60   67  volatile uint_t nfsauth_cache_miss;
  61   68  volatile uint_t nfsauth_cache_refresh;
  62   69  volatile uint_t nfsauth_cache_reclaim;
  63   70  volatile uint_t exi_cache_auth_reclaim_failed;
  64   71  volatile uint_t exi_cache_clnt_reclaim_failed;
  65   72  
  66   73  /*
  67   74   * The lifetime of an auth cache entry:
  68   75   * ------------------------------------
↓ open down ↓ 43 lines elided ↑ open up ↑
 112  119          list_node_t             ren_node;
 113  120  } refreshq_exi_node_t;
 114  121  
 115  122  typedef struct refreshq_auth_node {
 116  123          struct auth_cache       *ran_auth;
 117  124          char                    *ran_netid;
 118  125          list_node_t             ran_node;
 119  126  } refreshq_auth_node_t;
 120  127  
 121  128  /*
 122      - * Used to manipulate things on the refreshq_queue.
 123      - * Note that the refresh thread will effectively
 124      - * pop a node off of the queue, at which point it
      129 + * Used to manipulate things on the refreshq_queue.  Note that the refresh
      130 + * thread will effectively pop a node off of the queue, at which point it
 125  131   * will no longer need to hold the mutex.
 126  132   */
 127  133  static kmutex_t refreshq_lock;
 128  134  static list_t refreshq_queue;
 129  135  static kcondvar_t refreshq_cv;
 130  136  
 131  137  /*
 132      - * If there is ever a problem with loading the
 133      - * module, then nfsauth_fini() needs to be called
 134      - * to remove state. In that event, since the
 135      - * refreshq thread has been started, they need to
 136      - * work together to get rid of state.
      138 + * If there is ever a problem with loading the module, then nfsauth_fini()
      139 + * needs to be called to remove state.  In that event, since the refreshq
      140 + * thread has been started, they need to work together to get rid of state.
 137  141   */
 138  142  typedef enum nfsauth_refreshq_thread_state {
 139  143          REFRESHQ_THREAD_RUNNING,
 140  144          REFRESHQ_THREAD_FINI_REQ,
 141      -        REFRESHQ_THREAD_HALTED
      145 +        REFRESHQ_THREAD_HALTED,
      146 +        REFRESHQ_THREAD_NEED_CREATE
 142  147  } nfsauth_refreshq_thread_state_t;
 143  148  
 144      -nfsauth_refreshq_thread_state_t
 145      -refreshq_thread_state = REFRESHQ_THREAD_HALTED;
      149 +typedef struct nfsauth_globals {
      150 +        kmutex_t        mountd_lock;
      151 +        door_handle_t   mountd_dh;
 146  152  
      153 +        /*
      154 +         * Used to manipulate things on the refreshq_queue.  Note that the
      155 +         * refresh thread will effectively pop a node off of the queue,
      156 +         * at which point it will no longer need to hold the mutex.
      157 +         */
      158 +        kmutex_t        refreshq_lock;
      159 +        list_t          refreshq_queue;
      160 +        kcondvar_t      refreshq_cv;
      161 +
      162 +        /*
      163 +         * A list_t would be overkill.  These are auth_cache entries which are
      164 +         * no longer linked to an exi.  It should be the case that all of their
      165 +         * states are NFS_AUTH_INVALID, i.e., the only way to be put on this
      166 +         * list is iff their state indicated that they had been placed on the
      167 +         * refreshq_queue.
      168 +         *
      169 +         * Note that while there is no link from the exi or back to the exi,
      170 +         * the exi can not go away until these entries are harvested.
      171 +         */
      172 +        struct auth_cache               *refreshq_dead_entries;
      173 +        nfsauth_refreshq_thread_state_t refreshq_thread_state;
      174 +
      175 +} nfsauth_globals_t;
      176 +
 147  177  static void nfsauth_free_node(struct auth_cache *);
 148      -static void nfsauth_refresh_thread(void);
      178 +static void nfsauth_refresh_thread(nfsauth_globals_t *);
 149  179  
 150  180  static int nfsauth_cache_compar(const void *, const void *);
 151  181  
 152      -/*
 153      - * mountd is a server-side only daemon. This will need to be
 154      - * revisited if the NFS server is ever made zones-aware.
 155      - */
 156      -kmutex_t        mountd_lock;
 157      -door_handle_t   mountd_dh;
      182 +static zone_key_t       nfsauth_zone_key;
 158  183  
 159  184  void
 160  185  mountd_args(uint_t did)
 161  186  {
 162      -        mutex_enter(&mountd_lock);
 163      -        if (mountd_dh != NULL)
 164      -                door_ki_rele(mountd_dh);
 165      -        mountd_dh = door_ki_lookup(did);
 166      -        mutex_exit(&mountd_lock);
      187 +        nfsauth_globals_t *nag;
      188 +
      189 +        nag = zone_getspecific(nfsauth_zone_key, curzone);
      190 +        mutex_enter(&nag->mountd_lock);
      191 +        if (nag->mountd_dh != NULL)
      192 +                door_ki_rele(nag->mountd_dh);
      193 +        nag->mountd_dh = door_ki_lookup(did);
      194 +        mutex_exit(&nag->mountd_lock);
 167  195  }
 168  196  
 169  197  void
 170  198  nfsauth_init(void)
 171  199  {
 172      -        /*
 173      -         * mountd can be restarted by smf(5). We need to make sure
 174      -         * the updated door handle will safely make it to mountd_dh
 175      -         */
 176      -        mutex_init(&mountd_lock, NULL, MUTEX_DEFAULT, NULL);
      200 +        zone_key_create(&nfsauth_zone_key, nfsauth_zone_init,
      201 +            nfsauth_zone_shutdown, nfsauth_zone_fini);
 177  202  
 178      -        mutex_init(&refreshq_lock, NULL, MUTEX_DEFAULT, NULL);
 179      -        list_create(&refreshq_queue, sizeof (refreshq_exi_node_t),
 180      -            offsetof(refreshq_exi_node_t, ren_node));
 181      -
 182      -        cv_init(&refreshq_cv, NULL, CV_DEFAULT, NULL);
 183      -
 184      -        /*
 185      -         * Allocate nfsauth cache handle
 186      -         */
 187  203          exi_cache_handle = kmem_cache_create("exi_cache_handle",
 188  204              sizeof (struct auth_cache), 0, NULL, NULL,
 189  205              exi_cache_reclaim, NULL, NULL, 0);
 190      -
 191      -        refreshq_thread_state = REFRESHQ_THREAD_RUNNING;
 192      -        (void) zthread_create(NULL, 0, nfsauth_refresh_thread,
 193      -            NULL, 0, minclsyspri);
 194  206  }
 195  207  
 196      -/*
 197      - * Finalization routine for nfsauth. It is important to call this routine
 198      - * before destroying the exported_lock.
 199      - */
 200  208  void
 201  209  nfsauth_fini(void)
 202  210  {
 203      -        refreshq_exi_node_t     *ren;
      211 +        kmem_cache_destroy(exi_cache_handle);
      212 +}
 204  213  
      214 +/*ARGSUSED*/
      215 +static void *
      216 +nfsauth_zone_init(zoneid_t zoneid)
      217 +{
      218 +        nfsauth_globals_t *nag;
      219 +
      220 +        nag = kmem_zalloc(sizeof (*nag), KM_SLEEP);
      221 +
 205  222          /*
 206      -         * Prevent the nfsauth_refresh_thread from getting new
 207      -         * work.
      223 +         * mountd can be restarted by smf(5).  We need to make sure
      224 +         * the updated door handle will safely make it to mountd_dh.
 208  225           */
 209      -        mutex_enter(&refreshq_lock);
 210      -        if (refreshq_thread_state != REFRESHQ_THREAD_HALTED) {
 211      -                refreshq_thread_state = REFRESHQ_THREAD_FINI_REQ;
 212      -                cv_broadcast(&refreshq_cv);
      226 +        mutex_init(&nag->mountd_lock, NULL, MUTEX_DEFAULT, NULL);
      227 +        mutex_init(&nag->refreshq_lock, NULL, MUTEX_DEFAULT, NULL);
      228 +        list_create(&nag->refreshq_queue, sizeof (refreshq_exi_node_t),
      229 +            offsetof(refreshq_exi_node_t, ren_node));
      230 +        cv_init(&nag->refreshq_cv, NULL, CV_DEFAULT, NULL);
      231 +        nag->refreshq_thread_state = REFRESHQ_THREAD_NEED_CREATE;
 213  232  
 214      -                /*
 215      -                 * Also, wait for nfsauth_refresh_thread() to exit.
 216      -                 */
 217      -                while (refreshq_thread_state != REFRESHQ_THREAD_HALTED) {
 218      -                        cv_wait(&refreshq_cv, &refreshq_lock);
 219      -                }
      233 +        return (nag);
      234 +}
      235 +
      236 +/*ARGSUSED*/
      237 +static void
      238 +nfsauth_zone_shutdown(zoneid_t zoneid, void *data)
      239 +{
      240 +        refreshq_exi_node_t     *ren;
      241 +        nfsauth_globals_t       *nag = data;
      242 +
      243 +        /* Prevent the nfsauth_refresh_thread from getting new work */
      244 +        mutex_enter(&nag->refreshq_lock);
      245 +        if (nag->refreshq_thread_state == REFRESHQ_THREAD_RUNNING) {
      246 +                nag->refreshq_thread_state = REFRESHQ_THREAD_FINI_REQ;
      247 +                cv_broadcast(&nag->refreshq_cv);
      248 +
      249 +                /* Wait for nfsauth_refresh_thread() to exit */
      250 +                while (nag->refreshq_thread_state != REFRESHQ_THREAD_HALTED)
      251 +                        cv_wait(&nag->refreshq_cv, &nag->refreshq_lock);
 220  252          }
 221      -        mutex_exit(&refreshq_lock);
      253 +        mutex_exit(&nag->refreshq_lock);
 222  254  
 223  255          /*
 224  256           * Walk the exi_list and in turn, walk the auth_lists and free all
 225  257           * lists.  In addition, free INVALID auth_cache entries.
 226  258           */
 227      -        while ((ren = list_remove_head(&refreshq_queue))) {
      259 +        while ((ren = list_remove_head(&nag->refreshq_queue))) {
 228  260                  refreshq_auth_node_t *ran;
 229  261  
 230  262                  while ((ran = list_remove_head(&ren->ren_authlist)) != NULL) {
 231  263                          struct auth_cache *p = ran->ran_auth;
 232  264                          if (p->auth_state == NFS_AUTH_INVALID)
 233  265                                  nfsauth_free_node(p);
 234  266                          strfree(ran->ran_netid);
 235      -                        kmem_free(ran, sizeof (refreshq_auth_node_t));
      267 +                        kmem_free(ran, sizeof (*ran));
 236  268                  }
 237  269  
 238  270                  list_destroy(&ren->ren_authlist);
 239      -                exi_rele(ren->ren_exi);
 240      -                kmem_free(ren, sizeof (refreshq_exi_node_t));
      271 +                exi_rele(&ren->ren_exi);
      272 +                kmem_free(ren, sizeof (*ren));
 241  273          }
 242      -        list_destroy(&refreshq_queue);
      274 +}
 243  275  
 244      -        cv_destroy(&refreshq_cv);
 245      -        mutex_destroy(&refreshq_lock);
      276 +/*ARGSUSED*/
      277 +static void
      278 +nfsauth_zone_fini(zoneid_t zoneid, void *data)
      279 +{
      280 +        nfsauth_globals_t *nag = data;
 246  281  
 247      -        mutex_destroy(&mountd_lock);
 248      -
 249      -        /*
 250      -         * Deallocate nfsauth cache handle
 251      -         */
 252      -        kmem_cache_destroy(exi_cache_handle);
      282 +        list_destroy(&nag->refreshq_queue);
      283 +        cv_destroy(&nag->refreshq_cv);
      284 +        mutex_destroy(&nag->refreshq_lock);
      285 +        mutex_destroy(&nag->mountd_lock);
      286 +        kmem_free(nag, sizeof (*nag));
 253  287  }
 254  288  
 255  289  /*
 256  290   * Convert the address in a netbuf to
 257  291   * a hash index for the auth_cache table.
 258  292   */
 259  293  static int
 260  294  hash(struct netbuf *a)
 261  295  {
 262  296          int i, h = 0;
↓ open down ↓ 71 lines elided ↑ open up ↑
 334  368          if ((tstamp + 60) < now) {
 335  369                  tstamp = now;
 336  370                  cmn_err(CE_WARN, msg);
 337  371          }
 338  372  }
 339  373  
 340  374  /*
 341  375   * Callup to the mountd to get access information in the kernel.
 342  376   */
 343  377  static bool_t
 344      -nfsauth_retrieve(struct exportinfo *exi, char *req_netid, int flavor,
 345      -    struct netbuf *addr, int *access, cred_t *clnt_cred, uid_t *srv_uid,
 346      -    gid_t *srv_gid, uint_t *srv_gids_cnt, gid_t **srv_gids)
      378 +nfsauth_retrieve(nfsauth_globals_t *nag, struct exportinfo *exi,
      379 +    char *req_netid, int flavor, struct netbuf *addr, int *access,
      380 +    cred_t *clnt_cred, uid_t *srv_uid, gid_t *srv_gid, uint_t *srv_gids_cnt,
      381 +    gid_t **srv_gids)
 347  382  {
 348  383          varg_t                    varg = {0};
 349  384          nfsauth_res_t             res = {0};
 350  385          XDR                       xdrs;
 351  386          size_t                    absz;
 352  387          caddr_t                   abuf;
 353  388          int                       last = 0;
 354  389          door_arg_t                da;
 355  390          door_info_t               di;
 356  391          door_handle_t             dh;
↓ open down ↓ 52 lines elided ↑ open up ↑
 409  444           * expected and doesn't pass the data to us.
 410  445           */
 411  446          da.data_ptr = (char *)abuf;
 412  447          da.data_size = absz;
 413  448          da.desc_ptr = NULL;
 414  449          da.desc_num = 0;
 415  450          da.rbuf = NULL;
 416  451          da.rsize = 1;
 417  452  
 418  453  retry:
 419      -        mutex_enter(&mountd_lock);
 420      -        dh = mountd_dh;
      454 +        mutex_enter(&nag->mountd_lock);
      455 +        dh = nag->mountd_dh;
 421  456          if (dh != NULL)
 422  457                  door_ki_hold(dh);
 423      -        mutex_exit(&mountd_lock);
      458 +        mutex_exit(&nag->mountd_lock);
 424  459  
 425  460          if (dh == NULL) {
 426  461                  /*
 427  462                   * The rendezvous point has not been established yet!
 428  463                   * This could mean that either mountd(1m) has not yet
 429  464                   * been started or that _this_ routine nuked the door
 430  465                   * handle after receiving an EINTR for a REVOKED door.
 431  466                   *
 432  467                   * Returning NFSAUTH_DROP will cause the NFS client
 433  468                   * to retransmit the request, so let's try to be more
↓ open down ↓ 49 lines elided ↑ open up ↑
 483  518                          door_ki_rele(dh);
 484  519  
 485  520                          if (di.di_attributes & DOOR_REVOKED) {
 486  521                                  /*
 487  522                                   * The server barfed and revoked
 488  523                                   * the (existing) door on us; we
 489  524                                   * want to wait to give smf(5) a
 490  525                                   * chance to restart mountd(1m)
 491  526                                   * and establish a new door handle.
 492  527                                   */
 493      -                                mutex_enter(&mountd_lock);
 494      -                                if (dh == mountd_dh) {
 495      -                                        door_ki_rele(mountd_dh);
 496      -                                        mountd_dh = NULL;
      528 +                                mutex_enter(&nag->mountd_lock);
      529 +                                if (dh == nag->mountd_dh) {
      530 +                                        door_ki_rele(nag->mountd_dh);
      531 +                                        nag->mountd_dh = NULL;
 497  532                                  }
 498      -                                mutex_exit(&mountd_lock);
      533 +                                mutex_exit(&nag->mountd_lock);
 499  534                                  delay(hz);
 500  535                                  goto retry;
 501  536                          }
 502  537                          /*
 503  538                           * If the door was _not_ revoked on us,
 504  539                           * then more than likely we took an INTR,
 505  540                           * so we need to fail the operation.
 506  541                           */
 507  542                          goto fail;
 508  543                  }
↓ open down ↓ 71 lines elided ↑ open up ↑
 580  615                          /* NOTREACHED */
 581  616          }
 582  617  
 583  618          xdr_free(xdr_nfsauth_res, (char *)&res);
 584  619          kmem_free(abuf, absz);
 585  620  
 586  621          return (TRUE);
 587  622  }
 588  623  
 589  624  static void
 590      -nfsauth_refresh_thread(void)
      625 +nfsauth_refresh_thread(nfsauth_globals_t *nag)
 591  626  {
 592  627          refreshq_exi_node_t     *ren;
 593  628          refreshq_auth_node_t    *ran;
 594  629  
 595  630          struct exportinfo       *exi;
 596  631  
 597  632          int                     access;
 598  633          bool_t                  retrieval;
 599  634  
 600  635          callb_cpr_t             cprinfo;
 601  636  
 602      -        CALLB_CPR_INIT(&cprinfo, &refreshq_lock, callb_generic_cpr,
      637 +        CALLB_CPR_INIT(&cprinfo, &nag->refreshq_lock, callb_generic_cpr,
 603  638              "nfsauth_refresh");
 604  639  
 605  640          for (;;) {
 606      -                mutex_enter(&refreshq_lock);
 607      -                if (refreshq_thread_state != REFRESHQ_THREAD_RUNNING) {
      641 +                mutex_enter(&nag->refreshq_lock);
      642 +                if (nag->refreshq_thread_state != REFRESHQ_THREAD_RUNNING) {
 608  643                          /* Keep the hold on the lock! */
 609  644                          break;
 610  645                  }
 611  646  
 612      -                ren = list_remove_head(&refreshq_queue);
      647 +                ren = list_remove_head(&nag->refreshq_queue);
 613  648                  if (ren == NULL) {
 614  649                          CALLB_CPR_SAFE_BEGIN(&cprinfo);
 615      -                        cv_wait(&refreshq_cv, &refreshq_lock);
 616      -                        CALLB_CPR_SAFE_END(&cprinfo, &refreshq_lock);
 617      -                        mutex_exit(&refreshq_lock);
      650 +                        cv_wait(&nag->refreshq_cv, &nag->refreshq_lock);
      651 +                        CALLB_CPR_SAFE_END(&cprinfo, &nag->refreshq_lock);
      652 +                        mutex_exit(&nag->refreshq_lock);
 618  653                          continue;
 619  654                  }
 620      -                mutex_exit(&refreshq_lock);
      655 +                mutex_exit(&nag->refreshq_lock);
 621  656  
 622  657                  exi = ren->ren_exi;
 623  658                  ASSERT(exi != NULL);
 624  659  
 625  660                  /*
 626  661                   * Since the ren was removed from the refreshq_queue above,
 627  662                   * this is the only thread aware about the ren existence, so we
 628  663                   * have the exclusive ownership of it and we do not need to
 629  664                   * protect it by any lock.
 630  665                   */
↓ open down ↓ 26 lines elided ↑ open up ↑
 657  692                           * close to the refreshq_thread_state check.
 658  693                           *
 659  694                           * The check for the refreshq_thread_state value here
 660  695                           * is purely advisory to allow the faster
 661  696                           * nfsauth_refresh_thread() shutdown.  In a case we
 662  697                           * will miss such advisory, nothing catastrophic
 663  698                           * happens: we will just spin longer here before the
 664  699                           * shutdown.
 665  700                           */
 666  701                          if (p->auth_state == NFS_AUTH_INVALID ||
 667      -                            refreshq_thread_state != REFRESHQ_THREAD_RUNNING) {
      702 +                            nag->refreshq_thread_state !=
      703 +                            REFRESHQ_THREAD_RUNNING) {
 668  704                                  mutex_exit(&p->auth_lock);
 669  705  
 670  706                                  if (p->auth_state == NFS_AUTH_INVALID)
 671  707                                          nfsauth_free_node(p);
 672  708  
 673  709                                  strfree(netid);
 674  710  
 675  711                                  continue;
 676  712                          }
 677  713  
↓ open down ↓ 14 lines elided ↑ open up ↑
 692  728                          /*
 693  729                           * The first caching of the access rights
 694  730                           * is done with the netid pulled out of the
 695  731                           * request from the client. All subsequent
 696  732                           * users of the cache may or may not have
 697  733                           * the same netid. It doesn't matter. So
 698  734                           * when we refresh, we simply use the netid
 699  735                           * of the request which triggered the
 700  736                           * refresh attempt.
 701  737                           */
 702      -                        retrieval = nfsauth_retrieve(exi, netid,
      738 +                        retrieval = nfsauth_retrieve(nag, exi, netid,
 703  739                              p->auth_flavor, &p->auth_clnt->authc_addr, &access,
 704  740                              p->auth_clnt_cred, &uid, &gid, &ngids, &gids);
 705  741  
 706  742                          /*
 707  743                           * This can only be set in one other place
 708  744                           * and the state has to be NFS_AUTH_FRESH.
 709  745                           */
 710  746                          strfree(netid);
 711  747  
 712  748                          mutex_enter(&p->auth_lock);
↓ open down ↓ 22 lines elided ↑ open up ↑
 735  771                                          p->auth_freshness = gethrestime_sec();
 736  772                                  }
 737  773                                  p->auth_state = NFS_AUTH_FRESH;
 738  774  
 739  775                                  cv_broadcast(&p->auth_cv);
 740  776                                  mutex_exit(&p->auth_lock);
 741  777                          }
 742  778                  }
 743  779  
 744  780                  list_destroy(&ren->ren_authlist);
 745      -                exi_rele(ren->ren_exi);
      781 +                exi_rele(&ren->ren_exi);
 746  782                  kmem_free(ren, sizeof (refreshq_exi_node_t));
 747  783          }
 748  784  
 749      -        refreshq_thread_state = REFRESHQ_THREAD_HALTED;
 750      -        cv_broadcast(&refreshq_cv);
      785 +        nag->refreshq_thread_state = REFRESHQ_THREAD_HALTED;
      786 +        cv_broadcast(&nag->refreshq_cv);
 751  787          CALLB_CPR_EXIT(&cprinfo);
      788 +        DTRACE_PROBE(nfsauth__nfsauth__refresh__thread__exit);
 752  789          zthread_exit();
 753  790  }
 754  791  
 755  792  int
 756  793  nfsauth_cache_clnt_compar(const void *v1, const void *v2)
 757  794  {
 758  795          int c;
 759  796  
 760  797          const struct auth_cache_clnt *a1 = (const struct auth_cache_clnt *)v1;
 761  798          const struct auth_cache_clnt *a2 = (const struct auth_cache_clnt *)v2;
↓ open down ↓ 51 lines elided ↑ open up ↑
 813  850  }
 814  851  
 815  852  /*
 816  853   * Get the access information from the cache or callup to the mountd
 817  854   * to get and cache the access information in the kernel.
 818  855   */
 819  856  static int
 820  857  nfsauth_cache_get(struct exportinfo *exi, struct svc_req *req, int flavor,
 821  858      cred_t *cr, uid_t *uid, gid_t *gid, uint_t *ngids, gid_t **gids)
 822  859  {
      860 +        nfsauth_globals_t       *nag;
 823  861          struct netbuf           *taddrmask;
 824  862          struct netbuf           addr;   /* temporary copy of client's address */
 825  863          const struct netbuf     *claddr;
 826  864          avl_tree_t              *tree;
 827  865          struct auth_cache       ac;     /* used as a template for avl_find() */
 828  866          struct auth_cache_clnt  *c;
 829  867          struct auth_cache_clnt  acc;    /* used as a template for avl_find() */
 830  868          struct auth_cache       *p = NULL;
 831  869          int                     access;
 832  870  
 833  871          uid_t                   tmpuid;
 834  872          gid_t                   tmpgid;
 835  873          uint_t                  tmpngids;
 836  874          gid_t                   *tmpgids;
 837  875  
 838  876          avl_index_t             where;  /* used for avl_find()/avl_insert() */
 839  877  
 840  878          ASSERT(cr != NULL);
 841  879  
      880 +        nag = zone_getspecific(nfsauth_zone_key, curzone);
      881 +
 842  882          /*
 843  883           * Now check whether this client already
 844  884           * has an entry for this flavor in the cache
 845  885           * for this export.
 846  886           * Get the caller's address, mask off the
 847  887           * parts of the address that do not identify
 848  888           * the host (port number, etc), and then hash
 849  889           * it to find the chain of cache entries.
 850  890           */
 851  891  
↓ open down ↓ 139 lines elided ↑ open up ↑
 991 1031                   */
 992 1032                  auth_state_t state = NFS_AUTH_NEW;
 993 1033  
 994 1034                  p->auth_state = NFS_AUTH_WAITING;
 995 1035                  mutex_exit(&p->auth_lock);
 996 1036                  kmem_free(addr.buf, addr.maxlen);
 997 1037                  addr = p->auth_clnt->authc_addr;
 998 1038  
 999 1039                  atomic_inc_uint(&nfsauth_cache_miss);
1000 1040  
1001      -                res = nfsauth_retrieve(exi, svc_getnetid(req->rq_xprt), flavor,
1002      -                    &addr, &access, cr, &tmpuid, &tmpgid, &tmpngids, &tmpgids);
     1041 +                res = nfsauth_retrieve(nag, exi, svc_getnetid(req->rq_xprt),
     1042 +                    flavor, &addr, &access, cr, &tmpuid, &tmpgid, &tmpngids,
     1043 +                    &tmpgids);
1003 1044  
1004 1045                  p->auth_access = access;
1005 1046                  p->auth_time = p->auth_freshness = gethrestime_sec();
1006 1047  
1007 1048                  if (res == TRUE) {
1008 1049                          if (uid != NULL)
1009 1050                                  *uid = tmpuid;
1010 1051                          if (gid != NULL)
1011 1052                                  *gid = tmpgid;
1012 1053                          if (ngids != NULL && gids != NULL) {
↓ open down ↓ 60 lines elided ↑ open up ↑
1073 1114                          DTRACE_PROBE3(nfsauth__debug__cache__stale,
1074 1115                              struct exportinfo *, exi,
1075 1116                              struct auth_cache *, p,
1076 1117                              uint_t, nacr);
1077 1118  
1078 1119                          ran = kmem_alloc(sizeof (refreshq_auth_node_t),
1079 1120                              KM_SLEEP);
1080 1121                          ran->ran_auth = p;
1081 1122                          ran->ran_netid = strdup(svc_getnetid(req->rq_xprt));
1082 1123  
1083      -                        mutex_enter(&refreshq_lock);
     1124 +                        mutex_enter(&nag->refreshq_lock);
     1125 +
     1126 +                        if (nag->refreshq_thread_state ==
     1127 +                            REFRESHQ_THREAD_NEED_CREATE) {
     1128 +                                /* Launch nfsauth refresh thread */
     1129 +                                nag->refreshq_thread_state =
     1130 +                                    REFRESHQ_THREAD_RUNNING;
     1131 +                                (void) zthread_create(NULL, 0,
     1132 +                                    nfsauth_refresh_thread, nag, 0,
     1133 +                                    minclsyspri);
     1134 +                        }
     1135 +
1084 1136                          /*
1085      -                         * We should not add a work queue
1086      -                         * item if the thread is not
1087      -                         * accepting them.
     1137 +                         * We should not add a work queue item if the thread
     1138 +                         * is not accepting them.
1088 1139                           */
1089      -                        if (refreshq_thread_state == REFRESHQ_THREAD_RUNNING) {
     1140 +                        if (nag->refreshq_thread_state ==
     1141 +                            REFRESHQ_THREAD_RUNNING) {
1090 1142                                  refreshq_exi_node_t *ren;
1091 1143  
1092 1144                                  /*
1093 1145                                   * Is there an existing exi_list?
1094 1146                                   */
1095      -                                for (ren = list_head(&refreshq_queue);
     1147 +                                for (ren = list_head(&nag->refreshq_queue);
1096 1148                                      ren != NULL;
1097      -                                    ren = list_next(&refreshq_queue, ren)) {
     1149 +                                    ren = list_next(&nag->refreshq_queue,
     1150 +                                    ren)) {
1098 1151                                          if (ren->ren_exi == exi) {
1099 1152                                                  list_insert_tail(
1100 1153                                                      &ren->ren_authlist, ran);
1101 1154                                                  break;
1102 1155                                          }
1103 1156                                  }
1104 1157  
1105 1158                                  if (ren == NULL) {
1106 1159                                          ren = kmem_alloc(
1107 1160                                              sizeof (refreshq_exi_node_t),
↓ open down ↓ 2 lines elided ↑ open up ↑
1110 1163                                          exi_hold(exi);
1111 1164                                          ren->ren_exi = exi;
1112 1165  
1113 1166                                          list_create(&ren->ren_authlist,
1114 1167                                              sizeof (refreshq_auth_node_t),
1115 1168                                              offsetof(refreshq_auth_node_t,
1116 1169                                              ran_node));
1117 1170  
1118 1171                                          list_insert_tail(&ren->ren_authlist,
1119 1172                                              ran);
1120      -                                        list_insert_tail(&refreshq_queue, ren);
     1173 +                                        list_insert_tail(&nag->refreshq_queue,
     1174 +                                            ren);
1121 1175                                  }
1122 1176  
1123      -                                cv_broadcast(&refreshq_cv);
     1177 +                                cv_broadcast(&nag->refreshq_cv);
1124 1178                          } else {
1125 1179                                  strfree(ran->ran_netid);
1126 1180                                  kmem_free(ran, sizeof (refreshq_auth_node_t));
1127 1181                          }
1128 1182  
1129      -                        mutex_exit(&refreshq_lock);
     1183 +                        mutex_exit(&nag->refreshq_lock);
1130 1184                  } else {
1131 1185                          mutex_exit(&p->auth_lock);
1132 1186                  }
1133 1187  
1134 1188                  nach = atomic_inc_uint_nv(&nfsauth_cache_hit);
1135 1189                  DTRACE_PROBE2(nfsauth__debug__cache__hit,
1136 1190                      uint_t, nach,
1137 1191                      time_t, refresh);
1138 1192  
1139 1193                  kmem_free(addr.buf, addr.maxlen);
↓ open down ↓ 5 lines elided ↑ open up ↑
1145 1199          crfree(ac.auth_clnt_cred);
1146 1200  
1147 1201          /*
1148 1202           * Retrieve the required data without caching.
1149 1203           */
1150 1204  
1151 1205          ASSERT(p == NULL);
1152 1206  
1153 1207          atomic_inc_uint(&nfsauth_cache_miss);
1154 1208  
1155      -        if (nfsauth_retrieve(exi, svc_getnetid(req->rq_xprt), flavor, &addr,
1156      -            &access, cr, &tmpuid, &tmpgid, &tmpngids, &tmpgids)) {
     1209 +        if (nfsauth_retrieve(nag, exi, svc_getnetid(req->rq_xprt), flavor,
     1210 +            &addr, &access, cr, &tmpuid, &tmpgid, &tmpngids, &tmpgids)) {
1157 1211                  if (uid != NULL)
1158 1212                          *uid = tmpuid;
1159 1213                  if (gid != NULL)
1160 1214                          *gid = tmpgid;
1161 1215                  if (ngids != NULL && gids != NULL) {
1162 1216                          *ngids = tmpngids;
1163 1217                          *gids = tmpgids;
1164 1218                  } else {
1165 1219                          kmem_free(tmpgids, tmpngids * sizeof (gid_t));
1166 1220                  }
↓ open down ↓ 237 lines elided ↑ open up ↑
1404 1458   * memory is low. Free unused cache entries.
1405 1459   * If that's not enough, the VM system will
1406 1460   * call again for some more.
1407 1461   */
1408 1462  /*ARGSUSED*/
1409 1463  void
1410 1464  exi_cache_reclaim(void *cdrarg)
1411 1465  {
1412 1466          int i;
1413 1467          struct exportinfo *exi;
     1468 +        nfs_export_t *ne = nfs_get_export();
1414 1469  
1415      -        rw_enter(&exported_lock, RW_READER);
     1470 +        rw_enter(&ne->exported_lock, RW_READER);
1416 1471  
1417 1472          for (i = 0; i < EXPTABLESIZE; i++) {
1418      -                for (exi = exptable[i]; exi; exi = exi->fid_hash.next) {
     1473 +                for (exi = ne->exptable[i]; exi; exi = exi->fid_hash.next)
1419 1474                          exi_cache_trim(exi);
1420      -                }
1421 1475          }
1422 1476  
1423      -        rw_exit(&exported_lock);
     1477 +        rw_exit(&ne->exported_lock);
1424 1478  
1425 1479          atomic_inc_uint(&nfsauth_cache_reclaim);
1426 1480  }
1427 1481  
1428 1482  void
1429 1483  exi_cache_trim(struct exportinfo *exi)
1430 1484  {
1431 1485          struct auth_cache_clnt *c;
1432 1486          struct auth_cache_clnt *nextc;
1433 1487          struct auth_cache *p;
↓ open down ↓ 88 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX