Print this page
    
Clean up merge problems with illumos#11083 (nfs-zone)
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/common/klm/nlm_impl.h
          +++ new/usr/src/uts/common/klm/nlm_impl.h
   1    1  /*
   2    2   * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
   3    3   * Authors: Doug Rabson <dfr@rabson.org>
   4    4   * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
   5    5   *
   6    6   * Redistribution and use in source and binary forms, with or without
   7    7   * modification, are permitted provided that the following conditions
   8    8   * are met:
   9    9   * 1. Redistributions of source code must retain the above copyright
  10   10   *    notice, this list of conditions and the following disclaimer.
  11   11   * 2. Redistributions in binary form must reproduce the above copyright
  12   12   *    notice, this list of conditions and the following disclaimer in the
  13   13   *    documentation and/or other materials provided with the distribution.
  14   14   *
  15   15   * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  16   16   * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17   17   * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  18   18   * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  19   19   * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20   20   * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  21   21   * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  22   22   * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  23   23   * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  24   24   * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  25   25   * SUCH DAMAGE.
  26   26   *
  27   27   * $FreeBSD$
  28   28   */
  29   29  
  30   30  /*
  31   31   * Copyright 2012 Nexenta Systems, Inc.  All rights reserved.
  32   32   * Copyright (c) 2012 by Delphix. All rights reserved.
  33   33   * Copyright 2016 Joyent, Inc.
  34   34   */
  35   35  
  36   36  /*
  37   37   * NFS Lock Manager (NLM) private declarations, etc.
  38   38   *
  39   39   * Source code derived from FreeBSD nlm.h
  40   40   */
  41   41  
  42   42  #ifndef _NLM_NLM_H_
  43   43  #define _NLM_NLM_H_
  44   44  
  45   45  #include <sys/cmn_err.h>
  46   46  #include <sys/queue.h>
  47   47  #include <sys/modhash.h>
  48   48  #include <sys/avl.h>
  49   49  
  50   50  #define RPC_MSGOUT(args...)     cmn_err(CE_NOTE, args)
  51   51  #define NLM_ERR(...)            cmn_err(CE_NOTE, __VA_ARGS__)
  52   52  #define NLM_WARN(...)           cmn_err(CE_WARN, __VA_ARGS__)
  53   53  
  54   54  #ifndef SEEK_SET
  55   55  #define SEEK_SET        0
  56   56  #endif
  57   57  #ifndef SEEK_CUR
  58   58  #define SEEK_CUR        1
  59   59  #endif
  60   60  #ifndef SEEK_END
  61   61  #define SEEK_END        2
  62   62  #endif
  63   63  
  64   64  /*
  65   65   * Maximum offset supported by NLM calls using the older
  66   66   * (32-bit) versions of the protocol.
  67   67   */
  68   68  #define MAX_UOFF32      0xffffffffULL
  69   69  
  70   70  struct nlm_host;
  71   71  struct vnode;
  72   72  struct exportinfo;
  73   73  struct shrlock;
  74   74  struct _kthread;
  75   75  
  76   76  /*
  77   77   * How to read the code: probably the best point to start
  78   78   * it the nlm_host structure that is sort of most major
  79   79   * structure in klmmod. nlm_host is closely tied with all
  80   80   * other NLM structures.
  81   81   *
  82   82   * There're three major locks we use inside NLM:
  83   83   * 1) Global read-write lock (lm_lck) that is used to
  84   84   *    protect operations with sysid allocation and
  85   85   *    management of zone globals structures for each
  86   86   *    zone.
  87   87   * 2) Zone global lock: (nlm_globals->lock) is a mutex
  88   88   *    used to protect all operations inside particular
  89   89   *    zone.
  90   90   * 3) Host's lock: (nlm_host->nh_lock) is per-host mutex
  91   91   *    used to protect host's internal fields and all
  92   92   *    operations with the given host.
  93   93   *
  94   94   * Locks order _must_ obey the following scheme:
  95   95   *  lm_lck then nlm_globals->lock then nlm_host->nh_lock
  96   96   *
  97   97   * Locks:
  98   98   * (g)          locked by lm_lck
  99   99   * (z)          locked by nlm_globals->lock
 100  100   * (l)          locked by host->nh_lock
 101  101   * (c)          const until freeing
 102  102   */
 103  103  
 104  104  /*
 105  105   * Callback functions for nlm_do_lock() and others.
 106  106   *
 107  107   * Calls to nlm_do_lock are unusual, because it needs to handle
 108  108   * the reply itself, instead of letting it happen the normal way.
 109  109   * It also needs to make an RPC call _back_ to the client when a
 110  110   * blocked lock request completes.
 111  111   *
 112  112   * We pass three callback functions to nlm_do_lock:
 113  113   *    nlm_reply_cb: send a normal RPC reply
 114  114   *      nlm_res_cb: do a _res (message style) RPC (call)
 115  115   * nlm_testargs_cb: do a "granted" RPC call (after blocking)
 116  116   * Only one of the 1st or 2nd is used.
 117  117   * The 3rd is used only for blocking
 118  118   *
 119  119   * We also use callback functions for all the _msg variants
 120  120   * of the NLM svc calls, where the reply is a reverse call.
 121  121   * The nlm_testres_cb is used by the _test_msg svc calls.
 122  122   * The nlm_res_cb type is used by the other _msg calls.
 123  123   */
 124  124  typedef bool_t (*nlm_reply_cb)(SVCXPRT *, nlm4_res *);
 125  125  typedef enum clnt_stat (*nlm_res_cb)(nlm4_res *, void *, CLIENT *);
 126  126  typedef enum clnt_stat (*nlm_testargs_cb)(nlm4_testargs *, void *, CLIENT *);
 127  127  typedef enum clnt_stat (*nlm_testres_cb)(nlm4_testres *, void *, CLIENT *);
 128  128  
 129  129  /*
 130  130   * NLM sleeping lock request.
 131  131   *
 132  132   * Sleeping lock requests are server side only objects
 133  133   * that are created when client asks server to add new
 134  134   * sleeping lock and when this lock needs to block.
 135  135   * Server keeps a track of these requests in order to be
 136  136   * able to cancel them or clean them up.
 137  137   *
 138  138   * Sleeping lock requests are closely tiled with particular
 139  139   * vnode or, strictly speaking, NLM vhold object that holds
 140  140   * the vnode.
 141  141   *
 142  142   * struct nlm_slreq:
 143  143   *   nsr_fl: an information about file lock
 144  144   *   nsr_link: a list node to store lock requests
 145  145   *             in vhold object.
 146  146   */
 147  147  struct nlm_slreq {
 148  148          struct flock64          nsr_fl;
 149  149          TAILQ_ENTRY(nlm_slreq)  nsr_link;
 150  150  };
 151  151  TAILQ_HEAD(nlm_slreq_list, nlm_slreq);
 152  152  
 153  153  /*
 154  154   * NLM vhold object is a sort of wrapper on vnodes remote
 155  155   * clients have locked (or added share reservation)
 156  156   * on NLM server. Vhold keeps vnode held (by VN_HOLD())
 157  157   * while vnode has any locks or shares made by parent host.
 158  158   * Vholds are used for two purposes:
 159  159   * 1) Hold vnode (with VN_HOLD) while it has any locks;
 160  160   * 2) Keep a track of all vnodes remote host touched
 161  161   *    with lock/share operations on NLM server, so that NLM
 162  162   *    can know what vnodes are potentially locked;
 163  163   *
 164  164   * Vholds are used on server side only. For server side it's really
 165  165   * important to keep vnodes held while they potentially have
 166  166   * any locks/shares. In contrast, it's not important for clinet
 167  167   * side at all. When particular vnode comes to the NLM client side
 168  168   * code, it's already held (VN_HOLD) by the process calling
 169  169   * lock/share function (it's referenced because client calls open()
 170  170   * before making locks or shares).
 171  171   *
 172  172   * Each NLM host object has a collection of vholds associated
 173  173   * with vnodes host touched earlier by adding locks or shares.
 174  174   * Having this collection allows us to decide if host is still
 175  175   * in use. When it has any vhold objects it's considered to be
 176  176   * in use. Otherwise we're free to destroy it.
 177  177   *
 178  178   * Vholds are destroyed by the NLM garbage collecter thread that
 179  179   * periodically checks whether they have any locks or shares.
 180  180   * Checking occures when parent host is untouched by client
 181  181   * or server for some period of time.
 182  182   *
 183  183   * struct nlm_vhold:
 184  184   *   nv_vp: a pointer to vnode that is hold by given nlm_vhold
 185  185   *   nv_refcnt: reference counter (non zero when vhold is inuse)
 186  186   *   nv_slreqs: sleeping lock requests that were made on the nv_vp
 187  187   *   nv_link: list node to store vholds in host's nh_vnodes_list
 188  188   */
 189  189  struct nlm_vhold {
 190  190          vnode_t                 *nv_vp;    /* (c) */
 191  191          int                     nv_refcnt; /* (l) */
 192  192          struct nlm_slreq_list   nv_slreqs; /* (l) */
 193  193          TAILQ_ENTRY(nlm_vhold)  nv_link;   /* (l) */
 194  194  };
 195  195  TAILQ_HEAD(nlm_vhold_list, nlm_vhold);
 196  196  
 197  197  /*
 198  198   * Client side sleeping lock state.
 199  199   * - NLM_SL_BLOCKED: some thread is blocked on this lock
 200  200   * - NLM_SL_GRANTED: server granted us the lock
 201  201   * - NLM_SL_CANCELLED: the lock is cancelled (i.e. invalid/inactive)
 202  202   */
 203  203  typedef enum nlm_slock_state {
 204  204          NLM_SL_UNKNOWN = 0,
 205  205          NLM_SL_BLOCKED,
 206  206          NLM_SL_GRANTED,
 207  207          NLM_SL_CANCELLED
 208  208  } nlm_slock_state_t;
 209  209  
 210  210  /*
 211  211   * A client side sleeping lock request (set by F_SETLKW)
 212  212   * stored in nlm_slocks collection of nlm_globals.
 213  213   *
 214  214   *  struct nlm_slock
 215  215   *   nsl_state: Sleeping lock state.
 216  216   *             (see nlm_slock_state for more information)
 217  217   *   nsl_cond: Condvar that is used when sleeping lock
 218  218   *            needs to wait for a GRANT callback
 219  219   *            or cancellation event.
 220  220   *   nsl_lock: nlm4_lock structure that is sent to the server
 221  221   *   nsl_fh: Filehandle that corresponds to nw_vp
 222  222   *   nsl_host: A host owning this sleeping lock
 223  223   *   nsl_vp: A vnode sleeping lock is waiting on.
 224  224   *   nsl_link: A list node for nlm_globals->nlm_slocks list.
 225  225   */
 226  226  struct nlm_slock {
 227  227          nlm_slock_state_t       nsl_state; /* (z) */
 228  228          kcondvar_t              nsl_cond;  /* (z) */
 229  229          nlm4_lock               nsl_lock;  /* (c) */
 230  230          struct netobj           nsl_fh;    /* (c) */
 231  231          struct nlm_host         *nsl_host; /* (c) */
 232  232          struct vnode            *nsl_vp;   /* (c) */
 233  233          TAILQ_ENTRY(nlm_slock)  nsl_link;  /* (z) */
 234  234  };
 235  235  TAILQ_HEAD(nlm_slock_list, nlm_slock);
 236  236  
 237  237  /*
 238  238   * Share reservation description. NLM tracks all active
 239  239   * share reservations made by the client side, so that
 240  240   * they can be easily recovered if remote NLM server
 241  241   * reboots. Share reservations tracking is also useful
 242  242   * when NLM needs to determine whether host owns any
 243  243   * resources on the system and can't be destroyed.
 244  244   *
 245  245   * nlm_shres:
 246  246   *   ns_shr: share reservation description
 247  247   *   ns_vp: a pointer to vnode where share reservation is located
 248  248   *   ns_next: next nlm_shres instance (or NULL if next item isn't
 249  249   *            present).
 250  250   */
 251  251  struct nlm_shres {
 252  252          struct shrlock          *ns_shr;
 253  253          vnode_t                 *ns_vp;
 254  254          struct nlm_shres        *ns_next;
 255  255  };
 256  256  
 257  257  /*
 258  258   * NLM RPC handle object.
 259  259   *
 260  260   * In kRPC subsystem it's unsafe to use one RPC handle by
 261  261   * several threads simultaneously. It was designed so that
 262  262   * each thread has to create an RPC handle that it'll use.
 263  263   * RPC handle creation can be quite expensive operation, especially
 264  264   * with session oriented protocols (such as TCP) that need to
 265  265   * establish session at first. NLM RPC handle object is a sort of
 266  266   * wrapper on kRPC handle object that can be cached and used in
 267  267   * future. We store all created RPC handles for given host in a
 268  268   * host's RPC handles cache, so that to make new requests threads
 269  269   * can simply take ready objects from the cache. That improves
 270  270   * NLM performance.
 271  271   *
 272  272   * nlm_rpc_t:
 273  273   *   nr_handle: a kRPC handle itself.
 274  274   *   nr_vers: a version of NLM protocol kRPC handle was
 275  275   *            created for.
 276  276   *   nr_link: a list node to store NLM RPC handles in the host
 277  277   *            RPC handles cache.
 278  278   */
 279  279  typedef struct nlm_rpc {
 280  280          CLIENT    *nr_handle;           /* (l) */
 281  281          rpcvers_t  nr_vers;             /* (c) */
 282  282          TAILQ_ENTRY(nlm_rpc) nr_link;   /* (l) */
 283  283  } nlm_rpc_t;
 284  284  TAILQ_HEAD(nlm_rpch_list, nlm_rpc);
 285  285  
 286  286  /*
 287  287   * Describes the state of NLM host's RPC binding.
 288  288   * RPC binding can be in one of three states:
 289  289   * 1) NRPCB_NEED_UPDATE:
 290  290   *    Binding is either not initialized or stale.
 291  291   * 2) NRPCB_UPDATE_INPROGRESS:
 292  292   *    When some thread updates host's RPC binding,
 293  293   *    it sets binding's state to NRPCB_UPDATE_INPROGRESS
 294  294   *    which denotes that other threads must wait until
 295  295   *    update process is finished.
 296  296   * 3) NRPCB_UPDATED:
 297  297   *    Denotes that host's RPC binding is both initialized
 298  298   *    and fresh.
 299  299   */
 300  300  enum nlm_rpcb_state {
 301  301          NRPCB_NEED_UPDATE = 0,
 302  302          NRPCB_UPDATE_INPROGRESS,
 303  303          NRPCB_UPDATED
 304  304  };
 305  305  
 306  306  /*
 307  307   * NLM host flags
 308  308   */
 309  309  #define NLM_NH_MONITORED 0x01
 310  310  #define NLM_NH_RECLAIM   0x02
 311  311  #define NLM_NH_INIDLE    0x04
 312  312  #define NLM_NH_SUSPEND   0x08
 313  313  
 314  314  /*
 315  315   * NLM host object is the most major structure in NLM.
 316  316   * It identifies remote client or remote server or both.
 317  317   * NLM host object keep a track of all vnodes client/server
 318  318   * locked and all sleeping locks it has. All lock/unlock
 319  319   * operations are done using host object.
 320  320   *
 321  321   * nlm_host:
 322  322   *   nh_lock: a mutex protecting host object fields
 323  323   *   nh_refs: reference counter. Identifies how many threads
 324  324   *            uses this host object.
 325  325   *   nh_link: a list node for keeping host in zone-global list.
 326  326   *   nh_by_addr: an AVL tree node for keeping host in zone-global tree.
 327  327   *              Host can be looked up in the tree by <netid, address>
 328  328   *              pair.
 329  329   *   nh_name: host name.
 330  330   *   nh_netid: netid string identifying type of transport host uses.
 331  331   *   nh_knc: host's knetconfig (used by kRPC subsystem).
 332  332   *   nh_addr: host's address (either IPv4 or IPv6).
 333  333   *   nh_sysid: unique sysid associated with this host.
 334  334   *   nh_state: last seen host's state reported by NSM.
 335  335   *   nh_flags: ORed host flags.
 336  336   *   nh_idle_timeout: host idle timeout. When expired host is freed.
 337  337   *   nh_recl_cv: condition variable used for reporting that reclamation
 338  338   *               process is finished.
 339  339   *   nh_rpcb_cv: condition variable that is used to make sure
 340  340   *               that only one thread renews host's RPC binding.
 341  341   *   nh_rpcb_ustat: error code returned by RPC binding update operation.
 342  342   *   nh_rpcb_state: host's RPC binding state (see enum nlm_rpcb_state
 343  343   *                  for more details).
 344  344   *   nh_rpchc: host's RPC handles cache.
 345  345   *   nh_vholds_by_vp: a hash table of all vholds host owns. (used for lookup)
 346  346   *   nh_vholds_list: a linked list of all vholds host owns. (used for iteration)
 347  347   *   nh_shrlist: a list of all active share resevations on the client side.
 348  348   *   nh_reclaimer: a pointer to reclamation thread (kthread_t)
 349  349   *                 NULL if reclamation thread doesn't exist
 350  350   */
 351  351  struct nlm_host {
 352  352          kmutex_t                nh_lock;                /* (c) */
 353  353          volatile uint_t         nh_refs;                /* (z) */
 354  354          TAILQ_ENTRY(nlm_host)   nh_link;                /* (z) */
 355  355          avl_node_t              nh_by_addr;             /* (z) */
 356  356          char                    *nh_name;               /* (c) */
 357  357          char                    *nh_netid;              /* (c) */
 358  358          struct knetconfig       nh_knc;                 /* (c) */
 359  359          struct netbuf           nh_addr;                /* (c) */
 360  360          sysid_t                 nh_sysid;               /* (c) */
 361  361          int32_t                 nh_state;               /* (z) */
 362  362          clock_t                 nh_idle_timeout;        /* (z) */
 363  363          uint8_t                 nh_flags;               /* (z) */
 364  364          kcondvar_t              nh_recl_cv;             /* (z) */
 365  365          kcondvar_t              nh_rpcb_cv;             /* (l) */
 366  366          enum clnt_stat          nh_rpcb_ustat;          /* (l) */
 367  367          enum nlm_rpcb_state     nh_rpcb_state;          /* (l) */
 368  368          struct nlm_rpch_list    nh_rpchc;               /* (l) */
 369  369          mod_hash_t              *nh_vholds_by_vp;       /* (l) */
 370  370          struct nlm_vhold_list   nh_vholds_list;         /* (l) */
 371  371          struct nlm_shres        *nh_shrlist;            /* (l) */
 372  372          kthread_t               *nh_reclaimer;          /* (l) */
 373  373  };
 374  374  TAILQ_HEAD(nlm_host_list, nlm_host);
 375  375  
 376  376  /*
 377  377   * nlm_nsm structure describes RPC client handle that can be
 378  378   * used to communicate with local NSM via kRPC.
 379  379   *
 380  380   * We need to wrap handle with nlm_nsm structure because kRPC
 381  381   * can not share one handle between several threads. It's assumed
 382  382   * that NLM uses only one NSM handle per zone, thus all RPC operations
 383  383   * on NSM's handle are serialized using nlm_nsm->sem semaphore.
 384  384   *
 385  385   * nlm_nsm also contains refcnt field used for reference counting.
 386  386   * It's used because there exist a possibility of simultaneous
 387  387   * execution of NLM shutdown operation and host monitor/unmonitor
 388  388   * operations.
 389  389   *
 390  390   * struct nlm_nsm:
 391  391   *  ns_sem: a semaphore for serialization network operations to statd
 392  392   *  ns_knc: a kneconfig describing transport that is used for communication
 393  393   *  ns_addr: an address of local statd we're talking to
 394  394   *  ns_handle: an RPC handle used for talking to local statd using the status
 395  395   *      monitor protocol (SM_PROG)
 396  396   *  ns_addr_handle: an RPC handle used for talking to local statd using the
 397  397   *      address registration protocol (NSM_ADDR_PROGRAM)
 398  398   */
 399  399  struct nlm_nsm {
 400  400          ksema_t                 ns_sem;
 401  401          struct knetconfig       ns_knc;          /* (c) */
 402  402          struct netbuf           ns_addr;         /* (c) */
 403  403          CLIENT                  *ns_handle;      /* (c) */
 404  404          CLIENT                  *ns_addr_handle; /* (c) */
 405  405  };
 406  406  
 407  407  /*
 408  408   * Could use flock.h flk_nlm_status_t instead, but
 409  409   * prefer our own enum with initial zero...
 410  410   */
 411  411  typedef enum {
 412  412          NLM_ST_DOWN = 0,
 413  413          NLM_ST_STOPPING,
 414  414          NLM_ST_UP,
 415  415          NLM_ST_STARTING
 416  416  } nlm_run_status_t;
 417  417  
 418  418  /*
 419  419   * nlm_globals structure allows NLM be zone aware. The structure
 420  420   * collects all "global variables" NLM has for each zone.
 421  421   *
 422  422   * struct nlm_globals:
 423  423   * lock: mutex protecting all operations inside given zone
 424  424   * grace_threshold: grace period expiration time (in ticks)
 425  425   * lockd_pid: PID of lockd user space daemon
 426  426   * run_status: run status of klmmod inside given zone
 427  427   * nsm_state: state obtained from local statd during klmmod startup
 428  428   * nlm_gc_thread: garbage collector thread
 429  429   * nlm_gc_sched_cv: condvar that can be signalled to wakeup GC
 430  430   * nlm_gc_finish_cv: condvar that is signalled just before GC thread exits
 431  431   * nlm_nsm: an object describing RPC handle used for talking to local statd
 432  432   * nlm_hosts_tree: an AVL tree of all hosts in the given zone
 433  433   *                 (used for hosts lookup by <netid, address> pair)
 434  434   * nlm_hosts_hash: a hash table of all hosts in the given zone
 435  435   *                 (used for hosts lookup by sysid)
 436  436   * nlm_idle_hosts: a list of all hosts that are idle state (i.e. unused)
 437  437   * nlm_slocks: a list of all client-side sleeping locks in the zone
 438  438   * cn_idle_tmo: a value of idle timeout (in seconds) obtained from lockd
 439  439   * grace_period: a value of grace period (in seconds) obtained from lockd
 440  440   * retrans_tmo: a value of retransmission timeout (in seconds) obtained
 441  441   *              from lockd.
 442  442   * clean_lock: mutex used to serialize clear_locks calls.
 443  443   * nlm_link: a list node used for keeping all nlm_globals objects
 444  444   *           in one global linked list.
 445  445   */
 446  446  struct nlm_globals {
 447  447          kmutex_t                        lock;
 448  448          clock_t                         grace_threshold;        /* (z) */
 449  449          pid_t                           lockd_pid;              /* (z) */
 450  450          nlm_run_status_t                run_status;             /* (z) */
 451  451          int32_t                         nsm_state;              /* (z) */
 452  452          kthread_t                       *nlm_gc_thread;         /* (z) */
  
    | 
      ↓ open down ↓ | 
    452 lines elided | 
    
      ↑ open up ↑ | 
  
 453  453          kcondvar_t                      nlm_gc_sched_cv;        /* (z) */
 454  454          kcondvar_t                      nlm_gc_finish_cv;       /* (z) */
 455  455          struct nlm_nsm                  nlm_nsm;                /* (z) */
 456  456          avl_tree_t                      nlm_hosts_tree;         /* (z) */
 457  457          mod_hash_t                      *nlm_hosts_hash;        /* (z) */
 458  458          struct nlm_host_list            nlm_idle_hosts;         /* (z) */
 459  459          struct nlm_slock_list           nlm_slocks;             /* (z) */
 460  460          int                             cn_idle_tmo;            /* (z) */
 461  461          int                             grace_period;           /* (z) */
 462  462          int                             retrans_tmo;            /* (z) */
      463 +        boolean_t                       nlm_v4_only;            /* (z) */
 463  464          zoneid_t                        nlm_zoneid;             /* (c) */
 464  465          kmutex_t                        clean_lock;             /* (c) */
 465  466          TAILQ_ENTRY(nlm_globals)        nlm_link;               /* (g) */
 466  467  };
 467  468  TAILQ_HEAD(nlm_globals_list, nlm_globals);
 468  469  
 469  470  
 470  471  /*
 471  472   * This is what we pass as the "owner handle" for NLM_LOCK.
 472  473   * This lets us find the blocked lock in NLM_GRANTED.
 473  474   * It also exposes on the wire what we're using as the
 474  475   * sysid for any server, which can be very helpful for
 475  476   * problem diagnosis.  (Observability is good).
 476  477   */
 477  478  struct nlm_owner_handle {
 478  479          sysid_t oh_sysid;               /* of remote host */
 479  480  };
 480  481  
 481  482  /*
 482  483   * Number retries NLM RPC call is repeatead in case of failure.
 483  484   * (used in case of conectionless transport).
 484  485   */
 485  486  #define NLM_RPC_RETRIES 5
 486  487  
 487  488  /*
 488  489   * Klmmod global variables
 489  490   */
 490  491  extern krwlock_t lm_lck;
 491  492  extern zone_key_t nlm_zone_key;
 492  493  
 493  494  /*
 494  495   * NLM interface functions (called directly by
 495  496   * either klmmod or klmpos)
 496  497   */
 497  498  extern int nlm_frlock(struct vnode *, int, struct flock64 *, int, u_offset_t,
 498  499      struct cred *, struct netobj *, struct flk_callback *, int);
 499  500  extern int nlm_shrlock(struct vnode *, int, struct shrlock *, int,
 500  501      struct netobj *, int);
 501  502  extern int nlm_safemap(const vnode_t *);
 502  503  extern int nlm_safelock(vnode_t *, const struct flock64 *, cred_t *);
 503  504  extern int nlm_has_sleep(const vnode_t *);
 504  505  extern void nlm_register_lock_locally(struct vnode *, struct nlm_host *,
 505  506      struct flock64 *, int, u_offset_t);
 506  507  int nlm_vp_active(const vnode_t *vp);
 507  508  void nlm_sysid_free(sysid_t);
 508  509  int nlm_vp_active(const vnode_t *);
 509  510  void nlm_unexport(struct exportinfo *);
 510  511  
 511  512  /*
 512  513   * NLM startup/shutdown
 513  514   */
 514  515  int nlm_svc_starting(struct nlm_globals *, struct file *,
 515  516      const char *, struct knetconfig *);
 516  517  void nlm_svc_stopping(struct nlm_globals *);
 517  518  int nlm_svc_add_ep(struct file *, const char *, struct knetconfig *);
 518  519  
 519  520  /*
 520  521   * NLM suspend/resume
 521  522   */
 522  523  void nlm_cprsuspend(void);
 523  524  void nlm_cprresume(void);
 524  525  
 525  526  /*
 526  527   * NLM internal functions for initialization.
 527  528   */
 528  529  void nlm_init(void);
 529  530  void nlm_rpc_init(void);
 530  531  void nlm_rpc_cache_destroy(struct nlm_host *);
 531  532  void nlm_globals_register(struct nlm_globals *);
 532  533  void nlm_globals_unregister(struct nlm_globals *);
 533  534  sysid_t nlm_sysid_alloc(void);
 534  535  
 535  536  /*
 536  537   * Client reclamation/cancelation
 537  538   */
 538  539  void nlm_reclaim_client(struct nlm_globals *, struct nlm_host *);
 539  540  void nlm_client_cancel_all(struct nlm_globals *, struct nlm_host *);
 540  541  
 541  542  /* (nlm_rpc_clnt.c) */
 542  543  enum clnt_stat nlm_null_rpc(CLIENT *, rpcvers_t);
 543  544  enum clnt_stat nlm_test_rpc(nlm4_testargs *, nlm4_testres *,
 544  545      CLIENT *, rpcvers_t);
 545  546  enum clnt_stat nlm_lock_rpc(nlm4_lockargs *, nlm4_res *,
 546  547      CLIENT *, rpcvers_t);
 547  548  enum clnt_stat nlm_cancel_rpc(nlm4_cancargs *, nlm4_res *,
 548  549      CLIENT *, rpcvers_t);
 549  550  enum clnt_stat nlm_unlock_rpc(nlm4_unlockargs *, nlm4_res *,
 550  551      CLIENT *, rpcvers_t);
 551  552  enum clnt_stat nlm_share_rpc(nlm4_shareargs *, nlm4_shareres *,
 552  553      CLIENT *, rpcvers_t);
 553  554  enum clnt_stat nlm_unshare_rpc(nlm4_shareargs *, nlm4_shareres *,
 554  555      CLIENT *, rpcvers_t);
 555  556  
 556  557  
 557  558  /*
 558  559   * RPC service functions.
 559  560   * nlm_dispatch.c
 560  561   */
 561  562  void nlm_prog_3(struct svc_req *rqstp, SVCXPRT *transp);
 562  563  void nlm_prog_4(struct svc_req *rqstp, SVCXPRT *transp);
 563  564  
 564  565  /*
 565  566   * Functions for working with knetconfigs (nlm_netconfig.c)
 566  567   */
 567  568  const char *nlm_knc_to_netid(struct knetconfig *);
 568  569  int nlm_knc_from_netid(const char *, struct knetconfig *);
 569  570  
 570  571  /*
 571  572   * NLM host functions (nlm_impl.c)
 572  573   */
 573  574  struct nlm_host *nlm_host_findcreate(struct nlm_globals *, char *,
 574  575      const char *, struct netbuf *);
 575  576  struct nlm_host *nlm_host_find(struct nlm_globals *,
 576  577      const char *, struct netbuf *);
 577  578  struct nlm_host *nlm_host_find_by_sysid(struct nlm_globals *, sysid_t);
 578  579  void nlm_host_release(struct nlm_globals *, struct nlm_host *);
 579  580  
 580  581  void nlm_host_monitor(struct nlm_globals *, struct nlm_host *, int);
 581  582  void nlm_host_unmonitor(struct nlm_globals *, struct nlm_host *);
 582  583  
 583  584  void nlm_host_notify_server(struct nlm_host *, int32_t);
 584  585  void nlm_host_notify_client(struct nlm_host *, int32_t);
 585  586  
 586  587  int nlm_host_get_state(struct nlm_host *);
 587  588  
 588  589  struct nlm_vhold *nlm_vhold_get(struct nlm_host *, vnode_t *);
 589  590  void nlm_vhold_release(struct nlm_host *, struct nlm_vhold *);
 590  591  struct nlm_vhold *nlm_vhold_find_locked(struct nlm_host *, const vnode_t *);
 591  592  
 592  593  struct nlm_slock *nlm_slock_register(struct nlm_globals *,
 593  594      struct nlm_host *, struct nlm4_lock *, struct vnode *);
 594  595  void nlm_slock_unregister(struct nlm_globals *, struct nlm_slock *);
 595  596  int nlm_slock_wait(struct nlm_globals *, struct nlm_slock *, uint_t);
 596  597  int nlm_slock_grant(struct nlm_globals *,
 597  598      struct nlm_host *, struct nlm4_lock *);
 598  599  void nlm_host_cancel_slocks(struct nlm_globals *, struct nlm_host *);
 599  600  
 600  601  int nlm_slreq_register(struct nlm_host *,
 601  602      struct nlm_vhold *, struct flock64 *);
 602  603  int nlm_slreq_unregister(struct nlm_host *,
 603  604      struct nlm_vhold *, struct flock64 *);
 604  605  
 605  606  void nlm_shres_track(struct nlm_host *, vnode_t *, struct shrlock *);
 606  607  void nlm_shres_untrack(struct nlm_host *, vnode_t *, struct shrlock *);
 607  608  struct nlm_shres *nlm_get_active_shres(struct nlm_host *);
 608  609  void nlm_free_shrlist(struct nlm_shres *);
 609  610  
 610  611  int nlm_host_wait_grace(struct nlm_host *);
 611  612  int nlm_host_cmp(const void *, const void *);
 612  613  void nlm_copy_netobj(struct netobj *, struct netobj *);
 613  614  
 614  615  int nlm_host_get_rpc(struct nlm_host *, int, nlm_rpc_t **);
 615  616  void nlm_host_rele_rpc(struct nlm_host *, nlm_rpc_t *);
 616  617  
 617  618  /*
 618  619   * NLM server functions (nlm_service.c)
 619  620   */
 620  621  int nlm_vp_active(const vnode_t *vp);
 621  622  void nlm_do_notify1(nlm_sm_status *, void *, struct svc_req *);
 622  623  void nlm_do_notify2(nlm_sm_status *, void *, struct svc_req *);
 623  624  void nlm_do_test(nlm4_testargs *, nlm4_testres *,
 624  625      struct svc_req *, nlm_testres_cb);
 625  626  void nlm_do_lock(nlm4_lockargs *, nlm4_res *, struct svc_req *,
 626  627      nlm_reply_cb, nlm_res_cb, nlm_testargs_cb);
 627  628  void nlm_do_cancel(nlm4_cancargs *, nlm4_res *,
 628  629      struct svc_req *, nlm_res_cb);
 629  630  void nlm_do_unlock(nlm4_unlockargs *, nlm4_res *,
 630  631      struct svc_req *, nlm_res_cb);
 631  632  void nlm_do_granted(nlm4_testargs *, nlm4_res *,
 632  633      struct svc_req *, nlm_res_cb);
 633  634  void nlm_do_share(nlm4_shareargs *, nlm4_shareres *, struct svc_req *);
 634  635  void nlm_do_unshare(nlm4_shareargs *, nlm4_shareres *, struct svc_req *);
 635  636  void nlm_do_free_all(nlm4_notify *, void *, struct svc_req *);
 636  637  
 637  638  /*
 638  639   * NLM RPC functions
 639  640   */
 640  641  enum clnt_stat nlm_clnt_call(CLIENT *, rpcproc_t, xdrproc_t,
 641  642      caddr_t, xdrproc_t, caddr_t, struct timeval);
 642  643  bool_t nlm_caller_is_local(SVCXPRT *);
 643  644  
 644  645  #endif  /* _NLM_NLM_H_ */
  
    | 
      ↓ open down ↓ | 
    172 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX