4133                 kmem_cache_free(kmem_defrag_cache, defrag); /* unused */
4134         }
4135 }
4136 
4137 void
4138 kmem_cache_destroy(kmem_cache_t *cp)
4139 {
4140         int cpu_seqid;
4141 
4142         /*
4143          * Remove the cache from the global cache list so that no one else
4144          * can schedule tasks on its behalf, wait for any pending tasks to
4145          * complete, purge the cache, and then destroy it.
4146          */
4147         mutex_enter(&kmem_cache_lock);
4148         list_remove(&kmem_caches, cp);
4149         mutex_exit(&kmem_cache_lock);
4150 
4151         if (kmem_taskq != NULL)
4152                 taskq_wait(kmem_taskq);
4153         if (kmem_move_taskq != NULL)
4154                 taskq_wait(kmem_move_taskq);
4155 
4156         kmem_cache_magazine_purge(cp);
4157 
4158         mutex_enter(&cp->cache_lock);
4159         if (cp->cache_buftotal != 0)
4160                 cmn_err(CE_WARN, "kmem_cache_destroy: '%s' (%p) not empty",
4161                     cp->cache_name, (void *)cp);
4162         if (cp->cache_defrag != NULL) {
4163                 avl_destroy(&cp->cache_defrag->kmd_moves_pending);
4164                 list_destroy(&cp->cache_defrag->kmd_deadlist);
4165                 kmem_cache_free(kmem_defrag_cache, cp->cache_defrag);
4166                 cp->cache_defrag = NULL;
4167         }
4168         /*
4169          * The cache is now dead.  There should be no further activity.  We
4170          * enforce this by setting land mines in the constructor, destructor,
4171          * reclaim, and move routines that induce a kernel text fault if
4172          * invoked.
4173          */
 
 | 
 
 
4133                 kmem_cache_free(kmem_defrag_cache, defrag); /* unused */
4134         }
4135 }
4136 
4137 void
4138 kmem_cache_destroy(kmem_cache_t *cp)
4139 {
4140         int cpu_seqid;
4141 
4142         /*
4143          * Remove the cache from the global cache list so that no one else
4144          * can schedule tasks on its behalf, wait for any pending tasks to
4145          * complete, purge the cache, and then destroy it.
4146          */
4147         mutex_enter(&kmem_cache_lock);
4148         list_remove(&kmem_caches, cp);
4149         mutex_exit(&kmem_cache_lock);
4150 
4151         if (kmem_taskq != NULL)
4152                 taskq_wait(kmem_taskq);
4153 
4154         if (kmem_move_taskq != NULL && cp->cache_defrag != NULL)
4155                 taskq_wait(kmem_move_taskq);
4156 
4157         kmem_cache_magazine_purge(cp);
4158 
4159         mutex_enter(&cp->cache_lock);
4160         if (cp->cache_buftotal != 0)
4161                 cmn_err(CE_WARN, "kmem_cache_destroy: '%s' (%p) not empty",
4162                     cp->cache_name, (void *)cp);
4163         if (cp->cache_defrag != NULL) {
4164                 avl_destroy(&cp->cache_defrag->kmd_moves_pending);
4165                 list_destroy(&cp->cache_defrag->kmd_deadlist);
4166                 kmem_cache_free(kmem_defrag_cache, cp->cache_defrag);
4167                 cp->cache_defrag = NULL;
4168         }
4169         /*
4170          * The cache is now dead.  There should be no further activity.  We
4171          * enforce this by setting land mines in the constructor, destructor,
4172          * reclaim, and move routines that induce a kernel text fault if
4173          * invoked.
4174          */
 
 |