506                         lock = nlock;
 507                 } while (lock->l_vnode == vp);
 508         }
 509 
 510         CHECK_SLEEPING_LOCKS(gp);
 511         CHECK_ACTIVE_LOCKS(gp);
 512         mutex_exit(&gp->gp_mutex);
 513 }
 514 
 515 /*
 516  * Routine called from fs_frlock in fs/fs_subr.c
 517  *
 518  * This implements traditional POSIX style record locking. The two primary
 519  * drawbacks to this style of locking are:
 520  * 1) It is per-process, so any close of a file descriptor that refers to the
 521  *    file will drop the lock (e.g. lock /etc/passwd, call a library function
 522  *    which opens /etc/passwd to read the file, when the library closes it's
 523  *    file descriptor the application loses its lock and does not know).
 524  * 2) Locks are not preserved across fork(2).
 525  *
 526  * Because these locks are only assoiciated with a pid they are per-process.
 527  * This is why any close will drop the lock and is also why once the process
 528  * forks then the lock is no longer related to the new process. These locks can
 529  * be considered as pid-ful.
 530  *
 531  * See ofdlock() for the implementation of a similar but improved locking
 532  * scheme.
 533  */
 534 int
 535 reclock(vnode_t         *vp,
 536         flock64_t       *lckdat,
 537         int             cmd,
 538         int             flag,
 539         u_offset_t      offset,
 540         flk_callback_t  *flk_cbp)
 541 {
 542         lock_descriptor_t       stack_lock_request;
 543         lock_descriptor_t       *lock_request;
 544         int error = 0;
 545         graph_t *gp;
 546         int                     nlmid;
 547 
 548         /*
 549          * Check access permissions
 
 986         l->l_edge.edge_adj_next = &l->l_edge;
 987         l->l_edge.edge_adj_prev = &l->l_edge;
 988         l->pvertex = -1;
 989         l->l_status = FLK_INITIAL_STATE;
 990         flk_lock_allocs++;
 991         return (l);
 992 }
 993 
 994 /*
 995  * Free a lock_descriptor structure. Just sets the DELETED_LOCK flag
 996  * when some thread has a reference to it as in reclock().
 997  */
 998 
 999 void
1000 flk_free_lock(lock_descriptor_t *lock)
1001 {
1002         file_t *fp;
1003 
1004         ASSERT(IS_DEAD(lock));
1005 
1006         if ((fp = lock->l_ofd) != NULL)
1007                 fp->f_filock = NULL;
1008 
1009         if (IS_REFERENCED(lock)) {
1010                 lock->l_state |= DELETED_LOCK;
1011                 return;
1012         }
1013         flk_lock_frees++;
1014         kmem_free((void *)lock, sizeof (lock_descriptor_t));
1015 }
1016 
1017 void
1018 flk_set_state(lock_descriptor_t *lock, int new_state)
1019 {
1020         /*
1021          * Locks in the sleeping list may be woken up in a number of ways,
1022          * and more than once.  If a sleeping lock is signaled awake more
1023          * than once, then it may or may not change state depending on its
1024          * current state.
1025          * Also note that NLM locks that are sleeping could be moved to an
1026          * interrupted state more than once if the unlock request is
 
 | 
 
 
 506                         lock = nlock;
 507                 } while (lock->l_vnode == vp);
 508         }
 509 
 510         CHECK_SLEEPING_LOCKS(gp);
 511         CHECK_ACTIVE_LOCKS(gp);
 512         mutex_exit(&gp->gp_mutex);
 513 }
 514 
 515 /*
 516  * Routine called from fs_frlock in fs/fs_subr.c
 517  *
 518  * This implements traditional POSIX style record locking. The two primary
 519  * drawbacks to this style of locking are:
 520  * 1) It is per-process, so any close of a file descriptor that refers to the
 521  *    file will drop the lock (e.g. lock /etc/passwd, call a library function
 522  *    which opens /etc/passwd to read the file, when the library closes it's
 523  *    file descriptor the application loses its lock and does not know).
 524  * 2) Locks are not preserved across fork(2).
 525  *
 526  * Because these locks are only associated with a PID, they are per-process.
 527  * This is why any close will drop the lock and is also why, once the process
 528  * forks, the lock is no longer related to the new process. These locks can
 529  * be considered as PID-ful.
 530  *
 531  * See ofdlock() for the implementation of a similar but improved locking
 532  * scheme.
 533  */
 534 int
 535 reclock(vnode_t         *vp,
 536         flock64_t       *lckdat,
 537         int             cmd,
 538         int             flag,
 539         u_offset_t      offset,
 540         flk_callback_t  *flk_cbp)
 541 {
 542         lock_descriptor_t       stack_lock_request;
 543         lock_descriptor_t       *lock_request;
 544         int error = 0;
 545         graph_t *gp;
 546         int                     nlmid;
 547 
 548         /*
 549          * Check access permissions
 
 986         l->l_edge.edge_adj_next = &l->l_edge;
 987         l->l_edge.edge_adj_prev = &l->l_edge;
 988         l->pvertex = -1;
 989         l->l_status = FLK_INITIAL_STATE;
 990         flk_lock_allocs++;
 991         return (l);
 992 }
 993 
 994 /*
 995  * Free a lock_descriptor structure. Just sets the DELETED_LOCK flag
 996  * when some thread has a reference to it as in reclock().
 997  */
 998 
 999 void
1000 flk_free_lock(lock_descriptor_t *lock)
1001 {
1002         file_t *fp;
1003 
1004         ASSERT(IS_DEAD(lock));
1005 
1006         if ((fp = lock->l_ofd) != NULL && fp->f_filock == (struct filock *)lock)
1007                 fp->f_filock = NULL;
1008 
1009         if (IS_REFERENCED(lock)) {
1010                 lock->l_state |= DELETED_LOCK;
1011                 return;
1012         }
1013         flk_lock_frees++;
1014         kmem_free((void *)lock, sizeof (lock_descriptor_t));
1015 }
1016 
1017 void
1018 flk_set_state(lock_descriptor_t *lock, int new_state)
1019 {
1020         /*
1021          * Locks in the sleeping list may be woken up in a number of ways,
1022          * and more than once.  If a sleeping lock is signaled awake more
1023          * than once, then it may or may not change state depending on its
1024          * current state.
1025          * Also note that NLM locks that are sleeping could be moved to an
1026          * interrupted state more than once if the unlock request is
 
 |