Print this page
NEX-3758 Support for remote stale lock detection
Reviewed by: Gordon Ross <gordon.ross@nexenta.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/sys/flock_impl.h
+++ new/usr/src/uts/common/sys/flock_impl.h
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License, Version 1.0 only
6 6 * (the "License"). You may not use this file except in compliance
7 7 * with the License.
8 8 *
9 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 10 * or http://www.opensolaris.org/os/licensing.
11 11 * See the License for the specific language governing permissions
|
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
12 12 * and limitations under the License.
13 13 *
14 14 * When distributing Covered Code, include this CDDL HEADER in each
15 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 16 * If applicable, add the following below this CDDL HEADER, with the
17 17 * fields enclosed by brackets "[]" replaced with your own identifying
18 18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 19 *
20 20 * CDDL HEADER END
21 21 */
22 +
22 23 /*
24 + * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
25 + */
26 +
27 +/*
23 28 * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
24 29 * Use is subject to license terms.
25 30 * Copyright 2015 Joyent, Inc.
26 31 */
27 32
28 33 #ifndef _SYS_FLOCK_IMPL_H
29 34 #define _SYS_FLOCK_IMPL_H
30 35
31 36 #include <sys/types.h>
32 37 #include <sys/fcntl.h> /* flock definition */
33 38 #include <sys/file.h> /* FREAD etc */
34 39 #include <sys/flock.h> /* RCMD etc */
35 40 #include <sys/kmem.h>
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
36 41 #include <sys/user.h>
37 42 #include <sys/thread.h>
38 43 #include <sys/proc.h>
39 44 #include <sys/cred.h>
40 45 #include <sys/debug.h>
41 46 #include <sys/cmn_err.h>
42 47 #include <sys/errno.h>
43 48 #include <sys/systm.h>
44 49 #include <sys/vnode.h>
45 50 #include <sys/share.h> /* just to get GETSYSID def */
51 +#include <sys/time.h>
46 52
47 53 #ifdef __cplusplus
48 54 extern "C" {
49 55 #endif
50 56
51 57 struct edge {
52 58 struct edge *edge_adj_next; /* adjacency list next */
53 59 struct edge *edge_adj_prev; /* adjacency list prev */
54 60 struct edge *edge_in_next; /* incoming edges list next */
55 61 struct edge *edge_in_prev; /* incoming edges list prev */
56 62 struct lock_descriptor *from_vertex; /* edge emanating from lock */
57 63 struct lock_descriptor *to_vertex; /* edge pointing to lock */
58 64 };
59 65
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
60 66 typedef struct edge edge_t;
61 67
62 68 struct lock_descriptor {
63 69 struct lock_descriptor *l_next; /* next active/sleep lock */
64 70 struct lock_descriptor *l_prev; /* previous active/sleep lock */
65 71 struct edge l_edge; /* edge for adj and in lists */
66 72 struct lock_descriptor *l_stack; /* for stack operations */
67 73 struct lock_descriptor *l_stack1; /* for stack operations */
68 74 struct lock_descriptor *l_dstack; /* stack for debug functions */
69 75 struct edge *l_sedge; /* start edge for graph alg. */
70 - int l_index; /* used for barrier count */
71 - struct graph *l_graph; /* graph this belongs to */
72 - vnode_t *l_vnode; /* vnode being locked */
73 - int l_type; /* type of lock */
74 - int l_state; /* state described below */
75 - u_offset_t l_start; /* start offset */
76 - u_offset_t l_end; /* end offset */
77 - flock64_t l_flock; /* original flock request */
78 - int l_color; /* color used for graph alg */
79 - kcondvar_t l_cv; /* wait condition for lock */
80 - int pvertex; /* index to proc vertex */
81 - int l_status; /* status described below */
82 - flk_nlm_status_t l_nlm_state; /* state of NLM server */
83 - flk_callback_t *l_callbacks; /* callbacks, or NULL */
84 - zoneid_t l_zoneid; /* zone of request */
85 - file_t *l_ofd; /* OFD-style reference */
76 + int l_index; /* used for barrier count */
77 + struct graph *l_graph; /* graph this belongs to */
78 + vnode_t *l_vnode; /* vnode being locked */
79 + int l_type; /* type of lock */
80 + int l_state; /* state described below */
81 + u_offset_t l_start; /* start offset */
82 + u_offset_t l_end; /* end offset */
83 + flock64_t l_flock; /* original flock request */
84 + int l_color; /* color used for graph alg */
85 + kcondvar_t l_cv; /* wait condition for lock */
86 + int pvertex; /* index to proc vertex */
87 + int l_status; /* status described below */
88 + flk_nlm_status_t l_nlm_state; /* state of NLM server */
89 + flk_callback_t *l_callbacks; /* callbacks, or NULL */
90 + zoneid_t l_zoneid; /* zone of request */
91 + hrtime_t l_blocker; /* time when this lock */
92 + /* started to prevent other */
93 + /* locks from being set */
94 + file_t *l_ofd; /* OFD-style reference */
86 95 };
87 96
88 97 typedef struct lock_descriptor lock_descriptor_t;
89 98
90 99 /*
91 100 * Each graph holds locking information for some number of vnodes. The
92 101 * active and sleeping lists are circular, with a dummy head element.
93 102 */
94 103
95 104 struct graph {
96 105 kmutex_t gp_mutex; /* mutex for this graph */
97 106 struct lock_descriptor active_locks;
98 107 struct lock_descriptor sleeping_locks;
99 108 int index; /* index of this graph into the hash table */
100 109 int mark; /* used for coloring the graph */
101 110 };
102 111
103 112 typedef struct graph graph_t;
104 113
105 114 /*
106 115 * The possible states a lock can be in. These states are stored in the
107 116 * 'l_status' member of the 'lock_descriptor_t' structure. All locks start
108 117 * life in the INITIAL state, and end up in the DEAD state. Possible state
109 118 * transitions are :
110 119 *
111 120 * INITIAL--> START --> ACTIVE --> DEAD
112 121 *
113 122 * --> DEAD
114 123 *
115 124 * --> ACTIVE --> DEAD (new locks from flk_relation)
116 125 *
117 126 * --> SLEEPING --> GRANTED --> START --> ACTIVE --> DEAD
118 127 *
119 128 * --> INTR --> DEAD
120 129 *
121 130 * --> CANCELLED --> DEAD
122 131 *
123 132 * --> INTR --> DEAD
124 133 *
125 134 * --> INTR --> DEAD
126 135 *
127 136 * --> CANCELLED --> DEAD
128 137 *
129 138 * --> INTR --> DEAD
130 139 *
131 140 * Lock transitions are done in the following functions:
132 141 * --> INITIAL flk_get_lock(), reclock()
133 142 * --> START flk_execute_request()
134 143 * --> ACTIVE flk_insert_active_lock()
135 144 * --> SLEEPING flk_insert_sleeping_lock()
136 145 * --> GRANTED GRANT_WAKEUP
137 146 * --> INTERRUPTED INTERRUPT_WAKEUP
138 147 * --> CANCELLED CANCEL_WAKEUP
139 148 * --> DEAD reclock(), flk_delete_active_lock(), and
140 149 * flk_cancel_sleeping_lock()
141 150 */
142 151
143 152 #define FLK_INITIAL_STATE 1 /* Initial state of all requests */
144 153 #define FLK_START_STATE 2 /* Request has started execution */
145 154 #define FLK_ACTIVE_STATE 3 /* In active queue */
146 155 #define FLK_SLEEPING_STATE 4 /* Request is blocked */
147 156 #define FLK_GRANTED_STATE 5 /* Request is granted */
148 157 #define FLK_INTERRUPTED_STATE 6 /* Request is interrupted */
149 158 #define FLK_CANCELLED_STATE 7 /* Request is cancelled */
150 159 #define FLK_DEAD_STATE 8 /* Request is done - will be deleted */
151 160
152 161 /* flags defining state of locks */
153 162
154 163 /*
155 164 * The LLM design has been modified so that lock states are now stored
156 165 * in the l_status field of lock_descriptor_t. The l_state field is
157 166 * currently preserved for binary compatibility, but may be modified or
158 167 * removed in a minor release of Solaris. Note that both of these
159 168 * fields (and the rest of the lock_descriptor_t structure) are private
160 169 * to the implementation of the lock manager and should not be used
161 170 * externally.
162 171 */
163 172
164 173 #define ACTIVE_LOCK 0x0001 /* in active queue */
165 174 #define SLEEPING_LOCK 0x0002 /* in sleep queue */
166 175 #define IO_LOCK 0x0004 /* is an IO lock */
167 176 #define REFERENCED_LOCK 0x0008 /* referenced some where */
168 177 #define QUERY_LOCK 0x0010 /* querying about lock */
169 178 #define WILLING_TO_SLEEP_LOCK 0x0020 /* lock can be put in sleep queue */
170 179 #define RECOMPUTE_LOCK 0x0040 /* used for recomputing dependencies */
171 180 #define RECOMPUTE_DONE 0x0080 /* used for recomputing dependencies */
172 181 #define BARRIER_LOCK 0x0100 /* used for recomputing dependencies */
173 182 #define GRANTED_LOCK 0x0200 /* granted but still in sleep queue */
174 183 #define CANCELLED_LOCK 0x0400 /* cancelled will be thrown out */
175 184 #define DELETED_LOCK 0x0800 /* deleted - free at earliest */
176 185 #define INTERRUPTED_LOCK 0x1000 /* pretend signal */
177 186 #define LOCKMGR_LOCK 0x2000 /* remote lock (server-side) */
178 187 /* Clustering: flag for PXFS locks */
179 188 #define PXFS_LOCK 0x4000 /* lock created by PXFS file system */
180 189 #define NBMAND_LOCK 0x8000 /* non-blocking mandatory locking */
181 190
182 191 #define HASH_SIZE 32
183 192 #define HASH_SHIFT (HASH_SIZE - 1)
184 193 #define HASH_INDEX(vp) (((uintptr_t)vp >> 7) & HASH_SHIFT)
185 194
186 195 /* extern definitions */
187 196
188 197 extern struct graph *lock_graph[HASH_SIZE];
189 198 extern struct kmem_cache *flk_edge_cache;
190 199
191 200 /* Clustering: functions called by PXFS */
192 201 int flk_execute_request(lock_descriptor_t *);
193 202 void flk_cancel_sleeping_lock(lock_descriptor_t *, int);
194 203 void flk_set_state(lock_descriptor_t *, int);
195 204 graph_t *flk_get_lock_graph(vnode_t *, int);
196 205
197 206 /* flags used for readability in flock.c */
198 207
199 208 #define FLK_USE_GRAPH 0 /* don't initialize the lock_graph */
200 209 #define FLK_INIT_GRAPH 1 /* initialize the lock graph */
201 210 #define NO_COLOR 0 /* vertex is not colored */
202 211 #define NO_CHECK_CYCLE 0 /* don't mark vertex's in flk_add_edge */
203 212 #define CHECK_CYCLE 1 /* mark vertex's in flk_add_edge */
204 213
205 214 #define SAME_OWNER(lock1, lock2) \
206 215 (((lock1)->l_flock.l_pid == (lock2)->l_flock.l_pid) && \
207 216 ((lock1)->l_flock.l_sysid == (lock2)->l_flock.l_sysid) && \
208 217 ((lock1)->l_ofd == (lock2)->l_ofd))
209 218
210 219 #define COLORED(vertex) ((vertex)->l_color == (vertex)->l_graph->mark)
211 220 #define COLOR(vertex) ((vertex)->l_color = (vertex)->l_graph->mark)
212 221
213 222 /*
214 223 * stack data structure and operations
215 224 */
216 225
217 226 #define STACK_INIT(stack) ((stack) = NULL)
218 227 #define STACK_PUSH(stack, ptr, stack_link) (ptr)->stack_link = (stack),\
219 228 (stack) = (ptr)
220 229 #define STACK_POP(stack, stack_link) (stack) = (stack)->stack_link
221 230 #define STACK_TOP(stack) (stack)
222 231 #define STACK_EMPTY(stack) ((stack) == NULL)
223 232
224 233
225 234 #define ACTIVE_HEAD(gp) (&(gp)->active_locks)
226 235
227 236 #define SLEEPING_HEAD(gp) (&(gp)->sleeping_locks)
228 237
229 238 #define SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp) \
230 239 { \
231 240 (lock) = (lock_descriptor_t *)vp->v_filocks; \
232 241 }
233 242
234 243 #define SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp) \
235 244 { \
236 245 for ((lock) = SLEEPING_HEAD((gp))->l_next; ((lock) != SLEEPING_HEAD((gp)) && \
237 246 (lock)->l_vnode != (vp)); (lock) = (lock)->l_next) \
238 247 ; \
239 248 (lock) = ((lock) == SLEEPING_HEAD((gp))) ? NULL : (lock); \
240 249 }
241 250
242 251 #define OVERLAP(lock1, lock2) \
243 252 (((lock1)->l_start <= (lock2)->l_start && \
244 253 (lock2)->l_start <= (lock1)->l_end) || \
245 254 ((lock2)->l_start <= (lock1)->l_start && \
246 255 (lock1)->l_start <= (lock2)->l_end))
247 256
248 257 #define IS_INITIAL(lock) ((lock)->l_status == FLK_INITIAL_STATE)
249 258 #define IS_ACTIVE(lock) ((lock)->l_status == FLK_ACTIVE_STATE)
250 259 #define IS_SLEEPING(lock) ((lock)->l_status == FLK_SLEEPING_STATE)
251 260 #define IS_GRANTED(lock) ((lock)->l_status == FLK_GRANTED_STATE)
252 261 #define IS_INTERRUPTED(lock) ((lock)->l_status == FLK_INTERRUPTED_STATE)
253 262 #define IS_CANCELLED(lock) ((lock)->l_status == FLK_CANCELLED_STATE)
254 263 #define IS_DEAD(lock) ((lock)->l_status == FLK_DEAD_STATE)
255 264
256 265 #define IS_QUERY_LOCK(lock) ((lock)->l_state & QUERY_LOCK)
257 266 #define IS_RECOMPUTE(lock) ((lock)->l_state & RECOMPUTE_LOCK)
258 267 #define IS_BARRIER(lock) ((lock)->l_state & BARRIER_LOCK)
259 268 #define IS_DELETED(lock) ((lock)->l_state & DELETED_LOCK)
260 269 #define IS_REFERENCED(lock) ((lock)->l_state & REFERENCED_LOCK)
261 270 #define IS_IO_LOCK(lock) ((lock)->l_state & IO_LOCK)
262 271 #define IS_WILLING_TO_SLEEP(lock) \
263 272 ((lock)->l_state & WILLING_TO_SLEEP_LOCK)
264 273 #define IS_LOCKMGR(lock) ((lock)->l_state & LOCKMGR_LOCK)
265 274 #define IS_NLM_UP(lock) ((lock)->l_nlm_state == FLK_NLM_UP)
266 275 /* Clustering: Macro for PXFS locks */
267 276 #define IS_PXFS(lock) ((lock)->l_state & PXFS_LOCK)
268 277
269 278 /*
270 279 * "local" requests don't involve the NFS lock manager in any way.
271 280 * "remote" requests can be on the server (requests from a remote client),
272 281 * in which case they should be associated with a local vnode (UFS, tmpfs,
273 282 * etc.). These requests are flagged with LOCKMGR_LOCK and are made using
274 283 * kernel service threads. Remote requests can also be on an NFS client,
275 284 * because the NFS lock manager uses local locking for some of its
276 285 * bookkeeping. These requests are made by regular user processes.
277 286 */
278 287 #define IS_LOCAL(lock) (GETSYSID((lock)->l_flock.l_sysid) == 0)
279 288 #define IS_REMOTE(lock) (! IS_LOCAL(lock))
280 289
281 290 /* Clustering: Return value for blocking PXFS locks */
282 291 /*
283 292 * For PXFS locks, reclock() will return this error code for requests that
284 293 * need to block
285 294 */
286 295 #define PXFS_LOCK_BLOCKED -1
287 296
288 297 /* Clustering: PXFS callback function */
289 298 /*
290 299 * This function is a callback from the LLM into the PXFS server module. It
291 300 * is initialized as a weak stub, and is functional when the pxfs server module
292 301 * is loaded.
293 302 */
294 303 extern void cl_flk_state_transition_notify(lock_descriptor_t *lock,
295 304 int old_state, int new_state);
296 305
297 306 #define BLOCKS(lock1, lock2) (!SAME_OWNER((lock1), (lock2)) && \
298 307 (((lock1)->l_type == F_WRLCK) || \
299 308 ((lock2)->l_type == F_WRLCK)) && \
300 309 OVERLAP((lock1), (lock2)))
301 310
302 311 #define COVERS(lock1, lock2) \
303 312 (((lock1)->l_start <= (lock2)->l_start) && \
304 313 ((lock1)->l_end >= (lock2)->l_end))
305 314
306 315 #define IN_LIST_REMOVE(ep) \
307 316 { \
308 317 (ep)->edge_in_next->edge_in_prev = (ep)->edge_in_prev; \
309 318 (ep)->edge_in_prev->edge_in_next = (ep)->edge_in_next; \
310 319 }
311 320
312 321 #define ADJ_LIST_REMOVE(ep) \
313 322 { \
314 323 (ep)->edge_adj_next->edge_adj_prev = (ep)->edge_adj_prev; \
315 324 (ep)->edge_adj_prev->edge_adj_next = (ep)->edge_adj_next; \
316 325 }
317 326
318 327 #define NOT_BLOCKED(lock) \
319 328 ((lock)->l_edge.edge_adj_next == &(lock)->l_edge && !IS_GRANTED(lock))
320 329
321 330 #define GRANT_WAKEUP(lock) \
322 331 { \
323 332 flk_set_state(lock, FLK_GRANTED_STATE); \
324 333 (lock)->l_state |= GRANTED_LOCK; \
325 334 /* \
326 335 * Clustering: PXFS locks do not sleep in the LLM, \
327 336 * so there is no need to signal them \
328 337 */ \
329 338 if (!IS_PXFS(lock)) { \
330 339 cv_signal(&(lock)->l_cv); \
331 340 } \
332 341 }
333 342
334 343 #define CANCEL_WAKEUP(lock) \
335 344 { \
336 345 flk_set_state(lock, FLK_CANCELLED_STATE); \
337 346 (lock)->l_state |= CANCELLED_LOCK; \
338 347 /* \
339 348 * Clustering: PXFS locks do not sleep in the LLM, \
340 349 * so there is no need to signal them \
341 350 */ \
342 351 if (!IS_PXFS(lock)) { \
343 352 cv_signal(&(lock)->l_cv); \
344 353 } \
345 354 }
346 355
347 356 #define INTERRUPT_WAKEUP(lock) \
348 357 { \
349 358 flk_set_state(lock, FLK_INTERRUPTED_STATE); \
350 359 (lock)->l_state |= INTERRUPTED_LOCK; \
351 360 /* \
352 361 * Clustering: PXFS locks do not sleep in the LLM, \
353 362 * so there is no need to signal them \
354 363 */ \
355 364 if (!IS_PXFS(lock)) { \
356 365 cv_signal(&(lock)->l_cv); \
357 366 } \
358 367 }
359 368
360 369 #define REMOVE_SLEEP_QUEUE(lock) \
361 370 { \
362 371 ASSERT(IS_SLEEPING(lock) || IS_GRANTED(lock) || \
363 372 IS_INTERRUPTED(lock) || IS_CANCELLED(lock)); \
364 373 (lock)->l_state &= ~SLEEPING_LOCK; \
365 374 (lock)->l_next->l_prev = (lock)->l_prev; \
366 375 (lock)->l_prev->l_next = (lock)->l_next; \
367 376 (lock)->l_next = (lock)->l_prev = (lock_descriptor_t *)NULL; \
368 377 }
369 378
370 379 #define NO_DEPENDENTS(lock) \
371 380 ((lock)->l_edge.edge_in_next == &(lock)->l_edge)
372 381
373 382 #define GRANT(lock) \
374 383 { \
375 384 (lock)->l_state |= GRANTED_LOCK; \
376 385 flk_set_state(lock, FLK_GRANTED_STATE); \
377 386 }
378 387
379 388 #define FIRST_IN(lock) ((lock)->l_edge.edge_in_next)
380 389 #define FIRST_ADJ(lock) ((lock)->l_edge.edge_adj_next)
381 390 #define HEAD(lock) (&(lock)->l_edge)
382 391 #define NEXT_ADJ(ep) ((ep)->edge_adj_next)
383 392 #define NEXT_IN(ep) ((ep)->edge_in_next)
384 393 #define IN_ADJ_INIT(lock) \
385 394 { \
386 395 (lock)->l_edge.edge_adj_next = (lock)->l_edge.edge_adj_prev = &(lock)->l_edge; \
387 396 (lock)->l_edge.edge_in_next = (lock)->l_edge.edge_in_prev = &(lock)->l_edge; \
388 397 }
389 398
390 399 #define COPY(lock1, lock2) \
|
↓ open down ↓ |
295 lines elided |
↑ open up ↑ |
391 400 { \
392 401 (lock1)->l_graph = (lock2)->l_graph; \
393 402 (lock1)->l_vnode = (lock2)->l_vnode; \
394 403 (lock1)->l_type = (lock2)->l_type; \
395 404 (lock1)->l_state = (lock2)->l_state; \
396 405 (lock1)->l_start = (lock2)->l_start; \
397 406 (lock1)->l_end = (lock2)->l_end; \
398 407 (lock1)->l_flock = (lock2)->l_flock; \
399 408 (lock1)->l_zoneid = (lock2)->l_zoneid; \
400 409 (lock1)->pvertex = (lock2)->pvertex; \
410 +(lock1)->l_blocker = (lock2)->l_blocker; \
401 411 }
402 412
403 413 /*
404 414 * Clustering
405 415 */
406 416 /* Routines to set and get the NLM state in a lock request */
407 417 #define SET_NLM_STATE(lock, nlm_state) ((lock)->l_nlm_state = nlm_state)
408 418 #define GET_NLM_STATE(lock) ((lock)->l_nlm_state)
409 419 /*
410 420 * NLM registry abstraction:
411 421 * Abstraction overview:
412 422 * This registry keeps track of the NLM servers via their nlmids
413 423 * that have requested locks at the LLM this registry is associated
414 424 * with.
415 425 */
416 426 /* Routines to manipulate the NLM registry object state */
417 427 #define FLK_REGISTRY_IS_NLM_UNKNOWN(nlmreg, nlmid) \
418 428 ((nlmreg)[nlmid] == FLK_NLM_UNKNOWN)
419 429 #define FLK_REGISTRY_IS_NLM_UP(nlmreg, nlmid) \
420 430 ((nlmreg)[nlmid] == FLK_NLM_UP)
421 431 #define FLK_REGISTRY_ADD_NLMID(nlmreg, nlmid) \
422 432 ((nlmreg)[nlmid] = FLK_NLM_UP)
423 433 #define FLK_REGISTRY_CHANGE_NLM_STATE(nlmreg, nlmid, state) \
424 434 ((nlmreg)[nlmid] = state)
425 435
426 436 /* Indicates the effect of executing a request on the existing locks */
427 437
428 438 #define FLK_UNLOCK 0x1 /* request unlocks the existing lock */
429 439 #define FLK_DOWNGRADE 0x2 /* request downgrades the existing lock */
430 440 #define FLK_UPGRADE 0x3 /* request upgrades the existing lock */
431 441 #define FLK_STAY_SAME 0x4 /* request type is same as existing lock */
432 442
433 443
434 444 /* proc graph definitions */
435 445
436 446 /*
437 447 * Proc graph is the global process graph that maintains information
438 448 * about the dependencies between processes. An edge is added between two
439 449 * processes represented by proc_vertex's A and B, iff there exists l1
440 450 * owned by process A in any of the lock_graph's dependent on l2
441 451 * (thus having an edge to l2) owned by process B.
442 452 */
443 453 struct proc_vertex {
444 454 pid_t pid; /* pid of the process */
445 455 long sysid; /* sysid of the process */
446 456 struct proc_edge *edge; /* adajcent edges of this process */
447 457 int incount; /* Number of inedges to this process */
448 458 struct proc_edge *p_sedge; /* used for implementing stack alg. */
449 459 struct proc_vertex *p_stack; /* used for stack alg. */
450 460 int atime; /* used for cycle detection algorithm */
451 461 int dtime; /* used for cycle detection algorithm */
452 462 int index; /* index into the array of proc_graph vertices */
453 463 };
454 464
455 465 typedef struct proc_vertex proc_vertex_t;
456 466
457 467 struct proc_edge {
458 468 struct proc_edge *next; /* next edge in adjacency list */
459 469 int refcount; /* reference count of this edge */
460 470 struct proc_vertex *to_proc; /* process this points to */
461 471 };
462 472
463 473 typedef struct proc_edge proc_edge_t;
464 474
465 475
466 476 #define PROC_CHUNK 100
467 477
468 478 struct proc_graph {
469 479 struct proc_vertex **proc; /* list of proc_vertexes */
470 480 int gcount; /* list size */
471 481 int free; /* number of free slots in the list */
472 482 int mark; /* used for graph coloring */
473 483 };
474 484
475 485 typedef struct proc_graph proc_graph_t;
476 486
477 487 extern struct proc_graph pgraph;
478 488
479 489 #define PROC_SAME_OWNER(lock, pvertex) \
480 490 (((lock)->l_flock.l_pid == (pvertex)->pid) && \
481 491 ((lock)->l_flock.l_sysid == (pvertex)->sysid))
482 492
483 493 #define PROC_ARRIVE(pvertex) ((pvertex)->atime = pgraph.mark)
484 494 #define PROC_DEPART(pvertex) ((pvertex)->dtime = pgraph.mark)
485 495 #define PROC_ARRIVED(pvertex) ((pvertex)->atime == pgraph.mark)
486 496 #define PROC_DEPARTED(pvertex) ((pvertex)->dtime == pgraph.mark)
487 497
488 498 #ifdef __cplusplus
489 499 }
490 500 #endif
491 501
492 502 #endif /* _SYS_FLOCK_IMPL_H */
|
↓ open down ↓ |
82 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX