2553 if (nvp != NULL) {
2554 active = 1;
2555 break;
2556 }
2557
2558 hostp = AVL_NEXT(&g->nlm_hosts_tree, hostp);
2559 }
2560
2561 mutex_exit(&g->lock);
2562 return (active);
2563 }
2564
2565 /*
2566 * Called right before NFS export is going to
2567 * dissapear. The function finds all vnodes
2568 * belonging to the given export and cleans
2569 * all remote locks and share reservations
2570 * on them.
2571 */
2572 void
2573 nlm_unexport(struct exportinfo *exi)
2574 {
2575 struct nlm_globals *g;
2576 struct nlm_host *hostp;
2577
2578 /* This may be called on behalf of global-zone doing shutdown. */
2579 ASSERT(exi->exi_zone == curzone || curzone == global_zone);
2580 g = zone_getspecific(nlm_zone_key, exi->exi_zone);
2581 if (g == NULL) {
2582 /* Did zone cleanup get here already? */
2583 return;
2584 }
2585
2586 mutex_enter(&g->lock);
2587 hostp = avl_first(&g->nlm_hosts_tree);
2588 while (hostp != NULL) {
2589 struct nlm_vhold *nvp;
2590
2591 if (hostp->nh_flags & NLM_NH_INIDLE) {
2592 TAILQ_REMOVE(&g->nlm_idle_hosts, hostp, nh_link);
2593 hostp->nh_flags &= ~NLM_NH_INIDLE;
2594 }
2595 hostp->nh_refs++;
2596
2597 mutex_exit(&g->lock);
2598
2599 mutex_enter(&hostp->nh_lock);
2600 TAILQ_FOREACH(nvp, &hostp->nh_vholds_list, nv_link) {
2601 vnode_t *vp;
2602
2603 nvp->nv_refcnt++;
2604 mutex_exit(&hostp->nh_lock);
2605
2606 vp = nvp->nv_vp;
2614 * to drop all locks from this vnode, let's
2615 * do it.
2616 */
2617 nlm_vhold_clean(nvp, hostp->nh_sysid);
2618
2619 next_iter:
2620 mutex_enter(&hostp->nh_lock);
2621 nvp->nv_refcnt--;
2622 }
2623 mutex_exit(&hostp->nh_lock);
2624
2625 mutex_enter(&g->lock);
2626 nlm_host_release_locked(g, hostp);
2627
2628 hostp = AVL_NEXT(&g->nlm_hosts_tree, hostp);
2629 }
2630
2631 mutex_exit(&g->lock);
2632 }
2633
2634 /*
2635 * Allocate new unique sysid.
2636 * In case of failure (no available sysids)
2637 * return LM_NOSYSID.
2638 */
2639 sysid_t
2640 nlm_sysid_alloc(void)
2641 {
2642 sysid_t ret_sysid = LM_NOSYSID;
2643
2644 rw_enter(&lm_lck, RW_WRITER);
2645 if (nlm_sysid_nidx > LM_SYSID_MAX)
2646 nlm_sysid_nidx = LM_SYSID;
2647
2648 if (!BT_TEST(nlm_sysid_bmap, nlm_sysid_nidx)) {
2649 BT_SET(nlm_sysid_bmap, nlm_sysid_nidx);
2650 ret_sysid = nlm_sysid_nidx++;
2651 } else {
2652 index_t id;
2653
|
2553 if (nvp != NULL) {
2554 active = 1;
2555 break;
2556 }
2557
2558 hostp = AVL_NEXT(&g->nlm_hosts_tree, hostp);
2559 }
2560
2561 mutex_exit(&g->lock);
2562 return (active);
2563 }
2564
2565 /*
2566 * Called right before NFS export is going to
2567 * dissapear. The function finds all vnodes
2568 * belonging to the given export and cleans
2569 * all remote locks and share reservations
2570 * on them.
2571 */
2572 void
2573 nlm_zone_unexport(struct nlm_globals *g, struct exportinfo *exi)
2574 {
2575 struct nlm_host *hostp;
2576
2577 mutex_enter(&g->lock);
2578 if (g->run_status != NLM_ST_UP) {
2579 /* nothing to do */
2580 mutex_exit(&g->lock);
2581 return;
2582 }
2583
2584 hostp = avl_first(&g->nlm_hosts_tree);
2585 while (hostp != NULL) {
2586 struct nlm_vhold *nvp;
2587
2588 if (hostp->nh_flags & NLM_NH_INIDLE) {
2589 TAILQ_REMOVE(&g->nlm_idle_hosts, hostp, nh_link);
2590 hostp->nh_flags &= ~NLM_NH_INIDLE;
2591 }
2592 hostp->nh_refs++;
2593
2594 mutex_exit(&g->lock);
2595
2596 mutex_enter(&hostp->nh_lock);
2597 TAILQ_FOREACH(nvp, &hostp->nh_vholds_list, nv_link) {
2598 vnode_t *vp;
2599
2600 nvp->nv_refcnt++;
2601 mutex_exit(&hostp->nh_lock);
2602
2603 vp = nvp->nv_vp;
2611 * to drop all locks from this vnode, let's
2612 * do it.
2613 */
2614 nlm_vhold_clean(nvp, hostp->nh_sysid);
2615
2616 next_iter:
2617 mutex_enter(&hostp->nh_lock);
2618 nvp->nv_refcnt--;
2619 }
2620 mutex_exit(&hostp->nh_lock);
2621
2622 mutex_enter(&g->lock);
2623 nlm_host_release_locked(g, hostp);
2624
2625 hostp = AVL_NEXT(&g->nlm_hosts_tree, hostp);
2626 }
2627
2628 mutex_exit(&g->lock);
2629 }
2630
2631 void
2632 nlm_unexport(struct exportinfo *exi)
2633 {
2634 struct nlm_globals *g;
2635
2636 rw_enter(&lm_lck, RW_READER);
2637 TAILQ_FOREACH(g, &nlm_zones_list, nlm_link) {
2638 if (g->nlm_zoneid == exi->exi_zoneid) {
2639 /*
2640 * NOTE: If we want to drop lm_lock before
2641 * calling nlm_zone_unexport(), we should break,
2642 * and have a post-rw_exit() snippit like:
2643 * if (g != NULL)
2644 * nlm_zone_unexport(g, exi);
2645 */
2646 nlm_zone_unexport(g, exi);
2647 break; /* Only going to match once! */
2648 }
2649 }
2650 rw_exit(&lm_lck);
2651 }
2652
2653 /*
2654 * Allocate new unique sysid.
2655 * In case of failure (no available sysids)
2656 * return LM_NOSYSID.
2657 */
2658 sysid_t
2659 nlm_sysid_alloc(void)
2660 {
2661 sysid_t ret_sysid = LM_NOSYSID;
2662
2663 rw_enter(&lm_lck, RW_WRITER);
2664 if (nlm_sysid_nidx > LM_SYSID_MAX)
2665 nlm_sysid_nidx = LM_SYSID;
2666
2667 if (!BT_TEST(nlm_sysid_bmap, nlm_sysid_nidx)) {
2668 BT_SET(nlm_sysid_bmap, nlm_sysid_nidx);
2669 ret_sysid = nlm_sysid_nidx++;
2670 } else {
2671 index_t id;
2672
|