Print this page
NEX-3758 Support for remote stale lock detection
Reviewed by: Gordon Ross <gordon.ross@nexenta.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/klm/nlm_impl.h
+++ new/usr/src/uts/common/klm/nlm_impl.h
1 1 /*
2 2 * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
3 3 * Authors: Doug Rabson <dfr@rabson.org>
4 4 * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
5 5 *
6 6 * Redistribution and use in source and binary forms, with or without
7 7 * modification, are permitted provided that the following conditions
8 8 * are met:
9 9 * 1. Redistributions of source code must retain the above copyright
10 10 * notice, this list of conditions and the following disclaimer.
11 11 * 2. Redistributions in binary form must reproduce the above copyright
12 12 * notice, this list of conditions and the following disclaimer in the
13 13 * documentation and/or other materials provided with the distribution.
14 14 *
15 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
21 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 25 * SUCH DAMAGE.
26 26 *
27 27 * $FreeBSD$
28 28 */
29 29
30 30 /*
31 - * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
31 + * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
32 32 * Copyright (c) 2012 by Delphix. All rights reserved.
33 33 */
34 34
35 35 /*
36 36 * NFS Lock Manager (NLM) private declarations, etc.
37 37 *
38 38 * Source code derived from FreeBSD nlm.h
39 39 */
40 40
41 41 #ifndef _NLM_NLM_H_
42 42 #define _NLM_NLM_H_
43 43
44 44 #include <sys/cmn_err.h>
45 45 #include <sys/queue.h>
46 46 #include <sys/modhash.h>
47 47 #include <sys/avl.h>
48 +#include <sys/socket.h>
48 49
49 50 #define RPC_MSGOUT(args...) cmn_err(CE_NOTE, args)
50 51 #define NLM_ERR(...) cmn_err(CE_NOTE, __VA_ARGS__)
51 52 #define NLM_WARN(...) cmn_err(CE_WARN, __VA_ARGS__)
52 53
53 54 #ifndef SEEK_SET
54 55 #define SEEK_SET 0
55 56 #endif
56 57 #ifndef SEEK_CUR
57 58 #define SEEK_CUR 1
58 59 #endif
59 60 #ifndef SEEK_END
60 61 #define SEEK_END 2
61 62 #endif
62 63
63 64 /*
64 65 * Maximum offset supported by NLM calls using the older
65 66 * (32-bit) versions of the protocol.
66 67 */
67 68 #define MAX_UOFF32 0xffffffffULL
68 69
69 70 struct nlm_host;
70 71 struct vnode;
71 72 struct exportinfo;
72 73 struct shrlock;
73 74 struct _kthread;
74 75
75 76 /*
76 77 * How to read the code: probably the best point to start
77 78 * it the nlm_host structure that is sort of most major
78 79 * structure in klmmod. nlm_host is closely tied with all
79 80 * other NLM structures.
80 81 *
81 82 * There're three major locks we use inside NLM:
82 83 * 1) Global read-write lock (lm_lck) that is used to
83 84 * protect operations with sysid allocation and
84 85 * management of zone globals structures for each
85 86 * zone.
86 87 * 2) Zone global lock: (nlm_globals->lock) is a mutex
87 88 * used to protect all operations inside particular
88 89 * zone.
89 90 * 3) Host's lock: (nlm_host->nh_lock) is per-host mutex
90 91 * used to protect host's internal fields and all
91 92 * operations with the given host.
92 93 *
93 94 * Locks order _must_ obey the following scheme:
94 95 * lm_lck then nlm_globals->lock then nlm_host->nh_lock
95 96 *
96 97 * Locks:
97 98 * (g) locked by lm_lck
98 99 * (z) locked by nlm_globals->lock
99 100 * (l) locked by host->nh_lock
100 101 * (c) const until freeing
101 102 */
102 103
103 104 /*
104 105 * Callback functions for nlm_do_lock() and others.
105 106 *
106 107 * Calls to nlm_do_lock are unusual, because it needs to handle
107 108 * the reply itself, instead of letting it happen the normal way.
108 109 * It also needs to make an RPC call _back_ to the client when a
109 110 * blocked lock request completes.
110 111 *
111 112 * We pass three callback functions to nlm_do_lock:
112 113 * nlm_reply_cb: send a normal RPC reply
113 114 * nlm_res_cb: do a _res (message style) RPC (call)
114 115 * nlm_testargs_cb: do a "granted" RPC call (after blocking)
115 116 * Only one of the 1st or 2nd is used.
116 117 * The 3rd is used only for blocking
117 118 *
118 119 * We also use callback functions for all the _msg variants
119 120 * of the NLM svc calls, where the reply is a reverse call.
120 121 * The nlm_testres_cb is used by the _test_msg svc calls.
121 122 * The nlm_res_cb type is used by the other _msg calls.
122 123 */
123 124 typedef bool_t (*nlm_reply_cb)(SVCXPRT *, nlm4_res *);
124 125 typedef enum clnt_stat (*nlm_res_cb)(nlm4_res *, void *, CLIENT *);
125 126 typedef enum clnt_stat (*nlm_testargs_cb)(nlm4_testargs *, void *, CLIENT *);
126 127 typedef enum clnt_stat (*nlm_testres_cb)(nlm4_testres *, void *, CLIENT *);
127 128
128 129 /*
129 130 * NLM sleeping lock request.
130 131 *
131 132 * Sleeping lock requests are server side only objects
132 133 * that are created when client asks server to add new
133 134 * sleeping lock and when this lock needs to block.
134 135 * Server keeps a track of these requests in order to be
135 136 * able to cancel them or clean them up.
136 137 *
137 138 * Sleeping lock requests are closely tiled with particular
138 139 * vnode or, strictly speaking, NLM vhold object that holds
139 140 * the vnode.
140 141 *
141 142 * struct nlm_slreq:
142 143 * nsr_fl: an information about file lock
143 144 * nsr_link: a list node to store lock requests
144 145 * in vhold object.
145 146 */
146 147 struct nlm_slreq {
147 148 struct flock64 nsr_fl;
148 149 TAILQ_ENTRY(nlm_slreq) nsr_link;
149 150 };
150 151 TAILQ_HEAD(nlm_slreq_list, nlm_slreq);
151 152
152 153 /*
153 154 * NLM vhold object is a sort of wrapper on vnodes remote
154 155 * clients have locked (or added share reservation)
155 156 * on NLM server. Vhold keeps vnode held (by VN_HOLD())
156 157 * while vnode has any locks or shares made by parent host.
157 158 * Vholds are used for two purposes:
158 159 * 1) Hold vnode (with VN_HOLD) while it has any locks;
159 160 * 2) Keep a track of all vnodes remote host touched
160 161 * with lock/share operations on NLM server, so that NLM
161 162 * can know what vnodes are potentially locked;
162 163 *
163 164 * Vholds are used on server side only. For server side it's really
164 165 * important to keep vnodes held while they potentially have
165 166 * any locks/shares. In contrast, it's not important for clinet
166 167 * side at all. When particular vnode comes to the NLM client side
167 168 * code, it's already held (VN_HOLD) by the process calling
168 169 * lock/share function (it's referenced because client calls open()
169 170 * before making locks or shares).
170 171 *
171 172 * Each NLM host object has a collection of vholds associated
172 173 * with vnodes host touched earlier by adding locks or shares.
173 174 * Having this collection allows us to decide if host is still
174 175 * in use. When it has any vhold objects it's considered to be
175 176 * in use. Otherwise we're free to destroy it.
176 177 *
177 178 * Vholds are destroyed by the NLM garbage collecter thread that
178 179 * periodically checks whether they have any locks or shares.
179 180 * Checking occures when parent host is untouched by client
180 181 * or server for some period of time.
181 182 *
182 183 * struct nlm_vhold:
183 184 * nv_vp: a pointer to vnode that is hold by given nlm_vhold
184 185 * nv_refcnt: reference counter (non zero when vhold is inuse)
185 186 * nv_slreqs: sleeping lock requests that were made on the nv_vp
186 187 * nv_link: list node to store vholds in host's nh_vnodes_list
187 188 */
188 189 struct nlm_vhold {
189 190 vnode_t *nv_vp; /* (c) */
190 191 int nv_refcnt; /* (l) */
191 192 struct nlm_slreq_list nv_slreqs; /* (l) */
192 193 TAILQ_ENTRY(nlm_vhold) nv_link; /* (l) */
193 194 };
194 195 TAILQ_HEAD(nlm_vhold_list, nlm_vhold);
195 196
196 197 /*
197 198 * Client side sleeping lock state.
198 199 * - NLM_SL_BLOCKED: some thread is blocked on this lock
199 200 * - NLM_SL_GRANTED: server granted us the lock
200 201 * - NLM_SL_CANCELLED: the lock is cancelled (i.e. invalid/inactive)
201 202 */
202 203 typedef enum nlm_slock_state {
203 204 NLM_SL_UNKNOWN = 0,
204 205 NLM_SL_BLOCKED,
205 206 NLM_SL_GRANTED,
206 207 NLM_SL_CANCELLED
207 208 } nlm_slock_state_t;
208 209
209 210 /*
210 211 * A client side sleeping lock request (set by F_SETLKW)
211 212 * stored in nlm_slocks collection of nlm_globals.
212 213 *
213 214 * struct nlm_slock
214 215 * nsl_state: Sleeping lock state.
215 216 * (see nlm_slock_state for more information)
216 217 * nsl_cond: Condvar that is used when sleeping lock
217 218 * needs to wait for a GRANT callback
218 219 * or cancellation event.
219 220 * nsl_lock: nlm4_lock structure that is sent to the server
220 221 * nsl_fh: Filehandle that corresponds to nw_vp
221 222 * nsl_host: A host owning this sleeping lock
222 223 * nsl_vp: A vnode sleeping lock is waiting on.
223 224 * nsl_link: A list node for nlm_globals->nlm_slocks list.
224 225 */
225 226 struct nlm_slock {
226 227 nlm_slock_state_t nsl_state; /* (z) */
227 228 kcondvar_t nsl_cond; /* (z) */
228 229 nlm4_lock nsl_lock; /* (c) */
229 230 struct netobj nsl_fh; /* (c) */
230 231 struct nlm_host *nsl_host; /* (c) */
231 232 struct vnode *nsl_vp; /* (c) */
232 233 TAILQ_ENTRY(nlm_slock) nsl_link; /* (z) */
233 234 };
234 235 TAILQ_HEAD(nlm_slock_list, nlm_slock);
235 236
236 237 /*
237 238 * Share reservation description. NLM tracks all active
238 239 * share reservations made by the client side, so that
239 240 * they can be easily recovered if remote NLM server
240 241 * reboots. Share reservations tracking is also useful
241 242 * when NLM needs to determine whether host owns any
242 243 * resources on the system and can't be destroyed.
243 244 *
244 245 * nlm_shres:
245 246 * ns_shr: share reservation description
246 247 * ns_vp: a pointer to vnode where share reservation is located
247 248 * ns_next: next nlm_shres instance (or NULL if next item isn't
248 249 * present).
249 250 */
250 251 struct nlm_shres {
251 252 struct shrlock *ns_shr;
252 253 vnode_t *ns_vp;
253 254 struct nlm_shres *ns_next;
254 255 };
255 256
256 257 /*
257 258 * NLM RPC handle object.
258 259 *
259 260 * In kRPC subsystem it's unsafe to use one RPC handle by
260 261 * several threads simultaneously. It was designed so that
261 262 * each thread has to create an RPC handle that it'll use.
262 263 * RPC handle creation can be quite expensive operation, especially
263 264 * with session oriented protocols (such as TCP) that need to
264 265 * establish session at first. NLM RPC handle object is a sort of
265 266 * wrapper on kRPC handle object that can be cached and used in
266 267 * future. We store all created RPC handles for given host in a
267 268 * host's RPC handles cache, so that to make new requests threads
268 269 * can simply take ready objects from the cache. That improves
269 270 * NLM performance.
270 271 *
271 272 * nlm_rpc_t:
272 273 * nr_handle: a kRPC handle itself.
273 274 * nr_vers: a version of NLM protocol kRPC handle was
274 275 * created for.
275 276 * nr_link: a list node to store NLM RPC handles in the host
276 277 * RPC handles cache.
277 278 */
278 279 typedef struct nlm_rpc {
279 280 CLIENT *nr_handle; /* (l) */
280 281 rpcvers_t nr_vers; /* (c) */
281 282 TAILQ_ENTRY(nlm_rpc) nr_link; /* (l) */
282 283 } nlm_rpc_t;
283 284 TAILQ_HEAD(nlm_rpch_list, nlm_rpc);
284 285
285 286 /*
286 287 * Describes the state of NLM host's RPC binding.
287 288 * RPC binding can be in one of three states:
288 289 * 1) NRPCB_NEED_UPDATE:
289 290 * Binding is either not initialized or stale.
290 291 * 2) NRPCB_UPDATE_INPROGRESS:
291 292 * When some thread updates host's RPC binding,
292 293 * it sets binding's state to NRPCB_UPDATE_INPROGRESS
293 294 * which denotes that other threads must wait until
294 295 * update process is finished.
295 296 * 3) NRPCB_UPDATED:
296 297 * Denotes that host's RPC binding is both initialized
297 298 * and fresh.
298 299 */
299 300 enum nlm_rpcb_state {
300 301 NRPCB_NEED_UPDATE = 0,
301 302 NRPCB_UPDATE_INPROGRESS,
302 303 NRPCB_UPDATED
303 304 };
304 305
305 306 /*
306 307 * NLM host flags
307 308 */
308 309 #define NLM_NH_MONITORED 0x01
309 310 #define NLM_NH_RECLAIM 0x02
310 311 #define NLM_NH_INIDLE 0x04
311 312 #define NLM_NH_SUSPEND 0x08
312 313
313 314 /*
314 315 * NLM host object is the most major structure in NLM.
315 316 * It identifies remote client or remote server or both.
316 317 * NLM host object keep a track of all vnodes client/server
317 318 * locked and all sleeping locks it has. All lock/unlock
318 319 * operations are done using host object.
319 320 *
320 321 * nlm_host:
321 322 * nh_lock: a mutex protecting host object fields
322 323 * nh_refs: reference counter. Identifies how many threads
323 324 * uses this host object.
324 325 * nh_link: a list node for keeping host in zone-global list.
325 326 * nh_by_addr: an AVL tree node for keeping host in zone-global tree.
326 327 * Host can be looked up in the tree by <netid, address>
327 328 * pair.
328 329 * nh_name: host name.
329 330 * nh_netid: netid string identifying type of transport host uses.
330 331 * nh_knc: host's knetconfig (used by kRPC subsystem).
331 332 * nh_addr: host's address (either IPv4 or IPv6).
332 333 * nh_sysid: unique sysid associated with this host.
333 334 * nh_state: last seen host's state reported by NSM.
334 335 * nh_flags: ORed host flags.
335 336 * nh_idle_timeout: host idle timeout. When expired host is freed.
336 337 * nh_recl_cv: condition variable used for reporting that reclamation
337 338 * process is finished.
338 339 * nh_rpcb_cv: condition variable that is used to make sure
339 340 * that only one thread renews host's RPC binding.
340 341 * nh_rpcb_ustat: error code returned by RPC binding update operation.
341 342 * nh_rpcb_state: host's RPC binding state (see enum nlm_rpcb_state
342 343 * for more details).
343 344 * nh_rpchc: host's RPC handles cache.
344 345 * nh_vholds_by_vp: a hash table of all vholds host owns. (used for lookup)
345 346 * nh_vholds_list: a linked list of all vholds host owns. (used for iteration)
346 347 * nh_shrlist: a list of all active share resevations on the client side.
347 348 * nh_reclaimer: a pointer to reclamation thread (kthread_t)
348 349 * NULL if reclamation thread doesn't exist
349 350 */
350 351 struct nlm_host {
351 352 kmutex_t nh_lock; /* (c) */
352 353 volatile uint_t nh_refs; /* (z) */
353 354 TAILQ_ENTRY(nlm_host) nh_link; /* (z) */
354 355 avl_node_t nh_by_addr; /* (z) */
355 356 char *nh_name; /* (c) */
356 357 char *nh_netid; /* (c) */
357 358 struct knetconfig nh_knc; /* (c) */
358 359 struct netbuf nh_addr; /* (c) */
359 360 sysid_t nh_sysid; /* (c) */
360 361 int32_t nh_state; /* (z) */
361 362 clock_t nh_idle_timeout; /* (z) */
362 363 uint8_t nh_flags; /* (z) */
363 364 kcondvar_t nh_recl_cv; /* (z) */
364 365 kcondvar_t nh_rpcb_cv; /* (l) */
365 366 enum clnt_stat nh_rpcb_ustat; /* (l) */
366 367 enum nlm_rpcb_state nh_rpcb_state; /* (l) */
367 368 struct nlm_rpch_list nh_rpchc; /* (l) */
368 369 mod_hash_t *nh_vholds_by_vp; /* (l) */
369 370 struct nlm_vhold_list nh_vholds_list; /* (l) */
370 371 struct nlm_shres *nh_shrlist; /* (l) */
371 372 kthread_t *nh_reclaimer; /* (l) */
372 373 };
373 374 TAILQ_HEAD(nlm_host_list, nlm_host);
374 375
375 376 /*
376 377 * nlm_nsm structure describes RPC client handle that can be
377 378 * used to communicate with local NSM via kRPC.
378 379 *
379 380 * We need to wrap handle with nlm_nsm structure because kRPC
380 381 * can not share one handle between several threads. It's assumed
381 382 * that NLM uses only one NSM handle per zone, thus all RPC operations
382 383 * on NSM's handle are serialized using nlm_nsm->sem semaphore.
383 384 *
384 385 * nlm_nsm also contains refcnt field used for reference counting.
385 386 * It's used because there exist a possibility of simultaneous
386 387 * execution of NLM shutdown operation and host monitor/unmonitor
387 388 * operations.
388 389 *
389 390 * struct nlm_nsm:
390 391 * ns_sem: a semaphore for serialization network operations to statd
391 392 * ns_knc: a kneconfig describing transport that is used for communication
392 393 * ns_addr: an address of local statd we're talking to
393 394 * ns_handle: an RPC handle used for talking to local statd using the status
394 395 * monitor protocol (SM_PROG)
395 396 * ns_addr_handle: an RPC handle used for talking to local statd using the
396 397 * address registration protocol (NSM_ADDR_PROGRAM)
397 398 */
398 399 struct nlm_nsm {
399 400 ksema_t ns_sem;
400 401 struct knetconfig ns_knc; /* (c) */
401 402 struct netbuf ns_addr; /* (c) */
402 403 CLIENT *ns_handle; /* (c) */
403 404 CLIENT *ns_addr_handle; /* (c) */
404 405 };
405 406
406 407 /*
407 408 * Could use flock.h flk_nlm_status_t instead, but
408 409 * prefer our own enum with initial zero...
409 410 */
410 411 typedef enum {
411 412 NLM_ST_DOWN = 0,
412 413 NLM_ST_STOPPING,
413 414 NLM_ST_UP,
414 415 NLM_ST_STARTING
415 416 } nlm_run_status_t;
416 417
417 418 /*
418 419 * nlm_globals structure allows NLM be zone aware. The structure
419 420 * collects all "global variables" NLM has for each zone.
420 421 *
421 422 * struct nlm_globals:
422 423 * lock: mutex protecting all operations inside given zone
423 424 * grace_threshold: grace period expiration time (in ticks)
424 425 * lockd_pid: PID of lockd user space daemon
425 426 * run_status: run status of klmmod inside given zone
426 427 * nsm_state: state obtained from local statd during klmmod startup
427 428 * nlm_gc_thread: garbage collector thread
428 429 * nlm_gc_sched_cv: condvar that can be signalled to wakeup GC
429 430 * nlm_gc_finish_cv: condvar that is signalled just before GC thread exits
430 431 * nlm_nsm: an object describing RPC handle used for talking to local statd
431 432 * nlm_hosts_tree: an AVL tree of all hosts in the given zone
432 433 * (used for hosts lookup by <netid, address> pair)
433 434 * nlm_hosts_hash: a hash table of all hosts in the given zone
434 435 * (used for hosts lookup by sysid)
435 436 * nlm_idle_hosts: a list of all hosts that are idle state (i.e. unused)
436 437 * nlm_slocks: a list of all client-side sleeping locks in the zone
437 438 * cn_idle_tmo: a value of idle timeout (in seconds) obtained from lockd
438 439 * grace_period: a value of grace period (in seconds) obtained from lockd
439 440 * retrans_tmo: a value of retransmission timeout (in seconds) obtained
440 441 * from lockd.
441 442 * clean_lock: mutex used to serialize clear_locks calls.
442 443 * nlm_link: a list node used for keeping all nlm_globals objects
443 444 * in one global linked list.
444 445 */
445 446 struct nlm_globals {
446 447 kmutex_t lock;
447 448 clock_t grace_threshold; /* (z) */
448 449 pid_t lockd_pid; /* (z) */
449 450 nlm_run_status_t run_status; /* (z) */
450 451 int32_t nsm_state; /* (z) */
451 452 kthread_t *nlm_gc_thread; /* (z) */
452 453 kcondvar_t nlm_gc_sched_cv; /* (z) */
453 454 kcondvar_t nlm_gc_finish_cv; /* (z) */
454 455 struct nlm_nsm nlm_nsm; /* (z) */
455 456 avl_tree_t nlm_hosts_tree; /* (z) */
456 457 mod_hash_t *nlm_hosts_hash; /* (z) */
457 458 struct nlm_host_list nlm_idle_hosts; /* (z) */
458 459 struct nlm_slock_list nlm_slocks; /* (z) */
459 460 int cn_idle_tmo; /* (z) */
460 461 int grace_period; /* (z) */
461 462 int retrans_tmo; /* (z) */
462 463 kmutex_t clean_lock; /* (c) */
463 464 TAILQ_ENTRY(nlm_globals) nlm_link; /* (g) */
464 465 };
465 466 TAILQ_HEAD(nlm_globals_list, nlm_globals);
466 467
467 468
468 469 /*
469 470 * This is what we pass as the "owner handle" for NLM_LOCK.
470 471 * This lets us find the blocked lock in NLM_GRANTED.
471 472 * It also exposes on the wire what we're using as the
472 473 * sysid for any server, which can be very helpful for
473 474 * problem diagnosis. (Observability is good).
474 475 */
475 476 struct nlm_owner_handle {
476 477 sysid_t oh_sysid; /* of remote host */
477 478 };
478 479
479 480 /*
480 481 * Number retries NLM RPC call is repeatead in case of failure.
481 482 * (used in case of conectionless transport).
482 483 */
483 484 #define NLM_RPC_RETRIES 5
484 485
485 486 /*
486 487 * Klmmod global variables
487 488 */
488 489 extern krwlock_t lm_lck;
489 490 extern zone_key_t nlm_zone_key;
490 491
491 492 /*
492 493 * NLM interface functions (called directly by
493 494 * either klmmod or klmpos)
494 495 */
495 496 extern int nlm_frlock(struct vnode *, int, struct flock64 *, int, u_offset_t,
496 497 struct cred *, struct netobj *, struct flk_callback *, int);
497 498 extern int nlm_shrlock(struct vnode *, int, struct shrlock *, int,
498 499 struct netobj *, int);
499 500 extern int nlm_safemap(const vnode_t *);
500 501 extern int nlm_safelock(vnode_t *, const struct flock64 *, cred_t *);
501 502 extern int nlm_has_sleep(const vnode_t *);
502 503 extern void nlm_register_lock_locally(struct vnode *, struct nlm_host *,
503 504 struct flock64 *, int, u_offset_t);
504 505 int nlm_vp_active(const vnode_t *vp);
505 506 void nlm_sysid_free(sysid_t);
506 507 int nlm_vp_active(const vnode_t *);
507 508 void nlm_unexport(struct exportinfo *);
508 509
509 510 /*
510 511 * NLM startup/shutdown
511 512 */
512 513 int nlm_svc_starting(struct nlm_globals *, struct file *,
513 514 const char *, struct knetconfig *);
514 515 void nlm_svc_stopping(struct nlm_globals *);
515 516 int nlm_svc_add_ep(struct file *, const char *, struct knetconfig *);
516 517
517 518 /*
518 519 * NLM suspend/resume
519 520 */
520 521 void nlm_cprsuspend(void);
521 522 void nlm_cprresume(void);
|
↓ open down ↓ |
464 lines elided |
↑ open up ↑ |
522 523
523 524 /*
524 525 * NLM internal functions for initialization.
525 526 */
526 527 void nlm_init(void);
527 528 void nlm_rpc_init(void);
528 529 void nlm_rpc_cache_destroy(struct nlm_host *);
529 530 void nlm_globals_register(struct nlm_globals *);
530 531 void nlm_globals_unregister(struct nlm_globals *);
531 532 sysid_t nlm_sysid_alloc(void);
533 +int nlm_sysid_to_host(zoneid_t, sysid_t, struct sockaddr *, const char **);
532 534
533 535 /*
534 536 * Client reclamation/cancelation
535 537 */
536 538 void nlm_reclaim_client(struct nlm_globals *, struct nlm_host *);
537 539 void nlm_client_cancel_all(struct nlm_globals *, struct nlm_host *);
538 540
539 541 /* (nlm_rpc_clnt.c) */
540 542 enum clnt_stat nlm_null_rpc(CLIENT *, rpcvers_t);
541 543 enum clnt_stat nlm_test_rpc(nlm4_testargs *, nlm4_testres *,
542 544 CLIENT *, rpcvers_t);
543 545 enum clnt_stat nlm_lock_rpc(nlm4_lockargs *, nlm4_res *,
544 546 CLIENT *, rpcvers_t);
545 547 enum clnt_stat nlm_cancel_rpc(nlm4_cancargs *, nlm4_res *,
546 548 CLIENT *, rpcvers_t);
547 549 enum clnt_stat nlm_unlock_rpc(nlm4_unlockargs *, nlm4_res *,
548 550 CLIENT *, rpcvers_t);
549 551 enum clnt_stat nlm_share_rpc(nlm4_shareargs *, nlm4_shareres *,
550 552 CLIENT *, rpcvers_t);
551 553 enum clnt_stat nlm_unshare_rpc(nlm4_shareargs *, nlm4_shareres *,
552 554 CLIENT *, rpcvers_t);
553 555
554 556
555 557 /*
556 558 * RPC service functions.
557 559 * nlm_dispatch.c
558 560 */
559 561 void nlm_prog_3(struct svc_req *rqstp, SVCXPRT *transp);
560 562 void nlm_prog_4(struct svc_req *rqstp, SVCXPRT *transp);
561 563
562 564 /*
563 565 * Functions for working with knetconfigs (nlm_netconfig.c)
564 566 */
565 567 const char *nlm_knc_to_netid(struct knetconfig *);
566 568 int nlm_knc_from_netid(const char *, struct knetconfig *);
567 569
568 570 /*
569 571 * NLM host functions (nlm_impl.c)
570 572 */
571 573 struct nlm_host *nlm_host_findcreate(struct nlm_globals *, char *,
572 574 const char *, struct netbuf *);
573 575 struct nlm_host *nlm_host_find(struct nlm_globals *,
574 576 const char *, struct netbuf *);
575 577 struct nlm_host *nlm_host_find_by_sysid(struct nlm_globals *, sysid_t);
576 578 void nlm_host_release(struct nlm_globals *, struct nlm_host *);
577 579
578 580 void nlm_host_monitor(struct nlm_globals *, struct nlm_host *, int);
579 581 void nlm_host_unmonitor(struct nlm_globals *, struct nlm_host *);
580 582
581 583 void nlm_host_notify_server(struct nlm_host *, int32_t);
582 584 void nlm_host_notify_client(struct nlm_host *, int32_t);
583 585
584 586 int nlm_host_get_state(struct nlm_host *);
585 587
586 588 struct nlm_vhold *nlm_vhold_get(struct nlm_host *, vnode_t *);
587 589 void nlm_vhold_release(struct nlm_host *, struct nlm_vhold *);
588 590 struct nlm_vhold *nlm_vhold_find_locked(struct nlm_host *, const vnode_t *);
589 591
590 592 struct nlm_slock *nlm_slock_register(struct nlm_globals *,
591 593 struct nlm_host *, struct nlm4_lock *, struct vnode *);
592 594 void nlm_slock_unregister(struct nlm_globals *, struct nlm_slock *);
593 595 int nlm_slock_wait(struct nlm_globals *, struct nlm_slock *, uint_t);
594 596 int nlm_slock_grant(struct nlm_globals *,
595 597 struct nlm_host *, struct nlm4_lock *);
596 598 void nlm_host_cancel_slocks(struct nlm_globals *, struct nlm_host *);
597 599
598 600 int nlm_slreq_register(struct nlm_host *,
599 601 struct nlm_vhold *, struct flock64 *);
600 602 int nlm_slreq_unregister(struct nlm_host *,
601 603 struct nlm_vhold *, struct flock64 *);
602 604
603 605 void nlm_shres_track(struct nlm_host *, vnode_t *, struct shrlock *);
604 606 void nlm_shres_untrack(struct nlm_host *, vnode_t *, struct shrlock *);
605 607 struct nlm_shres *nlm_get_active_shres(struct nlm_host *);
606 608 void nlm_free_shrlist(struct nlm_shres *);
607 609
608 610 int nlm_host_wait_grace(struct nlm_host *);
609 611 int nlm_host_cmp(const void *, const void *);
610 612 void nlm_copy_netobj(struct netobj *, struct netobj *);
611 613
612 614 int nlm_host_get_rpc(struct nlm_host *, int, nlm_rpc_t **);
613 615 void nlm_host_rele_rpc(struct nlm_host *, nlm_rpc_t *);
614 616
615 617 /*
616 618 * NLM server functions (nlm_service.c)
617 619 */
618 620 int nlm_vp_active(const vnode_t *vp);
619 621 void nlm_do_notify1(nlm_sm_status *, void *, struct svc_req *);
620 622 void nlm_do_notify2(nlm_sm_status *, void *, struct svc_req *);
621 623 void nlm_do_test(nlm4_testargs *, nlm4_testres *,
622 624 struct svc_req *, nlm_testres_cb);
623 625 void nlm_do_lock(nlm4_lockargs *, nlm4_res *, struct svc_req *,
624 626 nlm_reply_cb, nlm_res_cb, nlm_testargs_cb);
625 627 void nlm_do_cancel(nlm4_cancargs *, nlm4_res *,
626 628 struct svc_req *, nlm_res_cb);
627 629 void nlm_do_unlock(nlm4_unlockargs *, nlm4_res *,
628 630 struct svc_req *, nlm_res_cb);
629 631 void nlm_do_granted(nlm4_testargs *, nlm4_res *,
630 632 struct svc_req *, nlm_res_cb);
631 633 void nlm_do_share(nlm4_shareargs *, nlm4_shareres *, struct svc_req *);
632 634 void nlm_do_unshare(nlm4_shareargs *, nlm4_shareres *, struct svc_req *);
633 635 void nlm_do_free_all(nlm4_notify *, void *, struct svc_req *);
634 636
635 637 /*
636 638 * NLM RPC functions
637 639 */
638 640 enum clnt_stat nlm_clnt_call(CLIENT *, rpcproc_t, xdrproc_t,
639 641 caddr_t, xdrproc_t, caddr_t, struct timeval);
640 642 bool_t nlm_caller_is_local(SVCXPRT *);
641 643
642 644 #endif /* _NLM_NLM_H_ */
|
↓ open down ↓ |
101 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX