101 * These two calculations are done simultaneously, with most of the work
102 * being done in vmu_calculate_seg(). The results of the calculation are
103 * copied into "vmu_data.vmu_cache_results".
104 *
105 * To perform the calculation, various things are tracked and cached:
106 *
107 * - incore/not-incore page ranges for all vnodes.
108 * (vmu_data.vmu_all_vnodes_hash)
109 * This eliminates looking up the same page more than once.
110 *
111 * - incore/not-incore page ranges for all shared amps.
112 * (vmu_data.vmu_all_amps_hash)
113 * This eliminates looking up the same page more than once.
114 *
115 * - visited page ranges for each collective.
116 * - per vnode (entity->vme_vnode_hash)
117 * - per shared amp (entity->vme_amp_hash)
118 * For accurate counting of map-shared and COW-shared pages.
119 *
120 * - visited private anons (refcnt > 1) for each collective.
121 * (entity->vme_anon)
122 * For accurate counting of COW-shared pages.
123 *
124 * The common accounting structure is the vmu_entity_t, which represents
125 * collectives:
126 *
127 * - A zone.
128 * - A project, task, or user within a zone.
129 * - The entire system (vmu_data.vmu_system).
130 * - Each collapsed (col) project and user. This means a given projid or
131 * uid, regardless of which zone the process is in. For instance,
132 * project 0 in the global zone and project 0 in a non global zone are
133 * the same collapsed project.
134 *
135 * Each entity structure tracks which pages have been already visited for
136 * that entity (via previously inspected processes) so that these pages are
137 * not double counted.
138 */
139
140 #include <sys/errno.h>
141 #include <sys/types.h>
142 #include <sys/zone.h>
143 #include <sys/proc.h>
144 #include <sys/project.h>
145 #include <sys/task.h>
146 #include <sys/thread.h>
147 #include <sys/time.h>
148 #include <sys/mman.h>
149 #include <sys/modhash.h>
150 #include <sys/modhash_impl.h>
151 #include <sys/shm.h>
152 #include <sys/swap.h>
153 #include <sys/synch.h>
154 #include <sys/systm.h>
155 #include <sys/var.h>
156 #include <sys/vm_usage.h>
157 #include <sys/zone.h>
158 #include <sys/sunddi.h>
159 #include <sys/sysmacros.h>
160 #include <sys/avl.h>
161 #include <vm/anon.h>
162 #include <vm/as.h>
163 #include <vm/seg_vn.h>
164 #include <vm/seg_spt.h>
165
166 #define VMUSAGE_HASH_SIZE 512
167
168 #define VMUSAGE_TYPE_VNODE 1
169 #define VMUSAGE_TYPE_AMP 2
170 #define VMUSAGE_TYPE_ANON 3
171
172 #define VMUSAGE_BOUND_UNKNOWN 0
173 #define VMUSAGE_BOUND_INCORE 1
174 #define VMUSAGE_BOUND_NOT_INCORE 2
175
176 #define ISWITHIN(node, addr) ((node)->vmb_start <= addr && \
177 (node)->vmb_end >= addr ? 1 : 0)
178
179 /*
187 avl_node_t vmb_node;
188 struct vmu_bound *vmb_next; /* NULL in tree else on free or temp list */
189 pgcnt_t vmb_start; /* page offset in vnode/amp on which bound starts */
190 pgcnt_t vmb_end; /* page offset in vnode/amp on which bound ends */
191 char vmb_type; /* One of VMUSAGE_BOUND_* */
192 } vmu_bound_t;
193
194 /*
195 * hash of visited objects (vnodes or shared amps)
196 * key is address of vnode or amp. Bounds lists known incore/non-incore
197 * bounds for vnode/amp.
198 */
199 typedef struct vmu_object {
200 struct vmu_object *vmo_next; /* free list */
201 caddr_t vmo_key;
202 short vmo_type;
203 avl_tree_t vmo_bounds;
204 } vmu_object_t;
205
206 /*
207 * Node for tree of visited COW anons.
208 */
209 typedef struct vmu_anon {
210 avl_node_t vma_node;
211 uintptr_t vma_addr;
212 } vmu_anon_t;
213
214 /*
215 * Entity by which to count results.
216 *
217 * The entity structure keeps the current rss/swap counts for each entity
218 * (zone, project, etc), and hashes of vm structures that have already
219 * been visited for the entity.
220 *
221 * vme_next: links the list of all entities currently being counted by
222 * vmu_calculate().
223 *
224 * vme_next_calc: links the list of entities related to the current process
225 * being counted by vmu_calculate_proc().
226 *
227 * vmu_calculate_proc() walks all processes. For each process, it makes a
228 * list of the entities related to that process using vme_next_calc. This
229 * list changes each time vmu_calculate_proc() is called.
230 *
231 */
232 typedef struct vmu_entity {
233 struct vmu_entity *vme_next;
234 struct vmu_entity *vme_next_calc;
235 mod_hash_t *vme_vnode_hash; /* vnodes visited for entity */
236 mod_hash_t *vme_amp_hash; /* shared amps visited for entity */
237 avl_tree_t vme_anon; /* COW anons visited for entity */
238 vmusage_t vme_result; /* identifies entity and results */
239 } vmu_entity_t;
240
241 /*
242 * Hash of entities visited within a zone, and an entity for the zone
243 * itself.
244 */
245 typedef struct vmu_zone {
246 struct vmu_zone *vmz_next; /* free list */
247 id_t vmz_id;
248 vmu_entity_t *vmz_zone;
249 mod_hash_t *vmz_projects_hash;
250 mod_hash_t *vmz_tasks_hash;
251 mod_hash_t *vmz_rusers_hash;
252 mod_hash_t *vmz_eusers_hash;
253 } vmu_zone_t;
254
255 /*
256 * Cache of results from last calculation
257 */
320 /*
321 * Comparison routine for AVL tree. We base our comparison on vmb_start.
322 */
323 static int
324 bounds_cmp(const void *bnd1, const void *bnd2)
325 {
326 const vmu_bound_t *bound1 = bnd1;
327 const vmu_bound_t *bound2 = bnd2;
328
329 if (bound1->vmb_start == bound2->vmb_start) {
330 return (0);
331 }
332 if (bound1->vmb_start < bound2->vmb_start) {
333 return (-1);
334 }
335
336 return (1);
337 }
338
339 /*
340 * Comparison routine for our AVL tree of anon structures.
341 */
342 static int
343 vmu_anon_cmp(const void *lhs, const void *rhs)
344 {
345 const vmu_anon_t *l = lhs, *r = rhs;
346
347 if (l->vma_addr == r->vma_addr)
348 return (0);
349
350 if (l->vma_addr < r->vma_addr)
351 return (-1);
352
353 return (1);
354 }
355
356 /*
357 * Save a bound on the free list.
358 */
359 static void
360 vmu_free_bound(vmu_bound_t *bound)
361 {
362 bound->vmb_next = vmu_data.vmu_free_bounds;
363 bound->vmb_start = 0;
364 bound->vmb_end = 0;
365 bound->vmb_type = 0;
366 vmu_data.vmu_free_bounds = bound;
367 }
368
369 /*
370 * Free an object, and all visited bound info.
371 */
372 static void
373 vmu_free_object(mod_hash_val_t val)
374 {
375 vmu_object_t *obj = (vmu_object_t *)val;
376 avl_tree_t *tree = &(obj->vmo_bounds);
377 vmu_bound_t *bound;
378 void *cookie = NULL;
379
380 while ((bound = avl_destroy_nodes(tree, &cookie)) != NULL)
381 vmu_free_bound(bound);
382 avl_destroy(tree);
383
384 obj->vmo_type = 0;
385 obj->vmo_next = vmu_data.vmu_free_objects;
386 vmu_data.vmu_free_objects = obj;
387 }
388
389 /*
390 * Free an entity, and hashes of visited objects for that entity.
391 */
392 static void
393 vmu_free_entity(mod_hash_val_t val)
394 {
395 vmu_entity_t *entity = (vmu_entity_t *)val;
396 vmu_anon_t *anon;
397 void *cookie = NULL;
398
399 if (entity->vme_vnode_hash != NULL)
400 i_mod_hash_clear_nosync(entity->vme_vnode_hash);
401 if (entity->vme_amp_hash != NULL)
402 i_mod_hash_clear_nosync(entity->vme_amp_hash);
403
404 while ((anon = avl_destroy_nodes(&entity->vme_anon, &cookie)) != NULL)
405 kmem_free(anon, sizeof (vmu_anon_t));
406
407 avl_destroy(&entity->vme_anon);
408
409 entity->vme_next = vmu_data.vmu_free_entities;
410 vmu_data.vmu_free_entities = entity;
411 }
412
413 /*
414 * Free zone entity, and all hashes of entities inside that zone,
415 * which are projects, tasks, and users.
416 */
417 static void
418 vmu_free_zone(mod_hash_val_t val)
419 {
420 vmu_zone_t *zone = (vmu_zone_t *)val;
421
422 if (zone->vmz_zone != NULL) {
423 vmu_free_entity((mod_hash_val_t)zone->vmz_zone);
424 zone->vmz_zone = NULL;
425 }
426 if (zone->vmz_projects_hash != NULL)
427 i_mod_hash_clear_nosync(zone->vmz_projects_hash);
428 if (zone->vmz_tasks_hash != NULL)
503 vmu_data.vmu_free_entities =
504 vmu_data.vmu_free_entities->vme_next;
505 bzero(&entity->vme_result, sizeof (vmusage_t));
506 } else {
507 entity = kmem_zalloc(sizeof (vmu_entity_t), KM_SLEEP);
508 }
509 entity->vme_result.vmu_id = id;
510 entity->vme_result.vmu_zoneid = zoneid;
511 entity->vme_result.vmu_type = type;
512
513 if (entity->vme_vnode_hash == NULL)
514 entity->vme_vnode_hash = mod_hash_create_ptrhash(
515 "vmusage vnode hash", VMUSAGE_HASH_SIZE, vmu_free_object,
516 sizeof (vnode_t));
517
518 if (entity->vme_amp_hash == NULL)
519 entity->vme_amp_hash = mod_hash_create_ptrhash(
520 "vmusage amp hash", VMUSAGE_HASH_SIZE, vmu_free_object,
521 sizeof (struct anon_map));
522
523 VERIFY(avl_first(&entity->vme_anon) == NULL);
524
525 avl_create(&entity->vme_anon, vmu_anon_cmp, sizeof (struct vmu_anon),
526 offsetof(struct vmu_anon, vma_node));
527
528 entity->vme_next = vmu_data.vmu_entities;
529 vmu_data.vmu_entities = entity;
530 vmu_data.vmu_nentities++;
531
532 return (entity);
533 }
534
535 /*
536 * Allocate a zone entity, and hashes for tracking visited vm objects
537 * for projects, tasks, and users within that zone.
538 */
539 static vmu_zone_t *
540 vmu_alloc_zone(id_t id)
541 {
542 vmu_zone_t *zone;
543
544 if (vmu_data.vmu_free_zones != NULL) {
545 zone = vmu_data.vmu_free_zones;
546 vmu_data.vmu_free_zones =
547 vmu_data.vmu_free_zones->vmz_next;
632 * insert operations.
633 */
634 static vmu_object_t *
635 vmu_find_insert_object(mod_hash_t *hash, caddr_t key, uint_t type)
636 {
637 int ret;
638 vmu_object_t *object;
639
640 ret = i_mod_hash_find_nosync(hash, (mod_hash_key_t)key,
641 (mod_hash_val_t *)&object);
642 if (ret != 0) {
643 object = vmu_alloc_object(key, type);
644 ret = i_mod_hash_insert_nosync(hash, (mod_hash_key_t)key,
645 (mod_hash_val_t)object, (mod_hash_hndl_t)0);
646 ASSERT(ret == 0);
647 }
648 return (object);
649 }
650
651 static int
652 vmu_find_insert_anon(vmu_entity_t *entity, void *key)
653 {
654 vmu_anon_t anon, *ap;
655
656 anon.vma_addr = (uintptr_t)key;
657
658 if (avl_find(&entity->vme_anon, &anon, NULL) != NULL)
659 return (0);
660
661 ap = kmem_alloc(sizeof (vmu_anon_t), KM_SLEEP);
662 ap->vma_addr = (uintptr_t)key;
663
664 avl_add(&entity->vme_anon, ap);
665
666 return (1);
667 }
668
669 static vmu_entity_t *
670 vmu_find_insert_entity(mod_hash_t *hash, id_t id, uint_t type, id_t zoneid)
671 {
672 int ret;
673 vmu_entity_t *entity;
674
675 ret = i_mod_hash_find_nosync(hash, (mod_hash_key_t)(uintptr_t)id,
676 (mod_hash_val_t *)&entity);
677 if (ret != 0) {
678 entity = vmu_alloc_entity(id, type, zoneid);
679 ret = i_mod_hash_insert_nosync(hash,
680 (mod_hash_key_t)(uintptr_t)id, (mod_hash_val_t)entity,
681 (mod_hash_hndl_t)0);
682 ASSERT(ret == 0);
683 }
684 return (entity);
1356 if (PP_ISFREE(page))
1357 continue;
1358
1359 /*
1360 * Assume anon structs with a refcnt
1361 * of 1 are not COW shared, so there
1362 * is no reason to track them per entity.
1363 */
1364 if (cnt == 1) {
1365 panon += pgcnt;
1366 continue;
1367 }
1368 for (entity = vmu_entities; entity != NULL;
1369 entity = entity->vme_next_calc) {
1370
1371 result = &entity->vme_result;
1372 /*
1373 * Track COW anons per entity so
1374 * they are not double counted.
1375 */
1376 if (vmu_find_insert_anon(entity, ap) == 0)
1377 continue;
1378
1379 result->vmu_rss_all += (pgcnt << PAGESHIFT);
1380 result->vmu_rss_private +=
1381 (pgcnt << PAGESHIFT);
1382 }
1383 }
1384 ANON_LOCK_EXIT(&private_amp->a_rwlock);
1385 }
1386
1387 /* Add up resident anon and swap reserved for private mappings */
1388 if (swresv > 0 || panon > 0) {
1389 for (entity = vmu_entities; entity != NULL;
1390 entity = entity->vme_next_calc) {
1391 result = &entity->vme_result;
1392 result->vmu_swap_all += swresv;
1393 result->vmu_swap_private += swresv;
1394 result->vmu_rss_all += (panon << PAGESHIFT);
1395 result->vmu_rss_private += (panon << PAGESHIFT);
1396 }
1630
1631 while (vmu_data.vmu_free_bounds != NULL) {
1632 tb = vmu_data.vmu_free_bounds;
1633 vmu_data.vmu_free_bounds = vmu_data.vmu_free_bounds->vmb_next;
1634 kmem_cache_free(vmu_bound_cache, tb);
1635 }
1636 while (vmu_data.vmu_free_objects != NULL) {
1637 to = vmu_data.vmu_free_objects;
1638 vmu_data.vmu_free_objects =
1639 vmu_data.vmu_free_objects->vmo_next;
1640 kmem_cache_free(vmu_object_cache, to);
1641 }
1642 while (vmu_data.vmu_free_entities != NULL) {
1643 te = vmu_data.vmu_free_entities;
1644 vmu_data.vmu_free_entities =
1645 vmu_data.vmu_free_entities->vme_next;
1646 if (te->vme_vnode_hash != NULL)
1647 mod_hash_destroy_hash(te->vme_vnode_hash);
1648 if (te->vme_amp_hash != NULL)
1649 mod_hash_destroy_hash(te->vme_amp_hash);
1650 VERIFY(avl_first(&te->vme_anon) == NULL);
1651 kmem_free(te, sizeof (vmu_entity_t));
1652 }
1653 while (vmu_data.vmu_free_zones != NULL) {
1654 tz = vmu_data.vmu_free_zones;
1655 vmu_data.vmu_free_zones =
1656 vmu_data.vmu_free_zones->vmz_next;
1657 if (tz->vmz_projects_hash != NULL)
1658 mod_hash_destroy_hash(tz->vmz_projects_hash);
1659 if (tz->vmz_tasks_hash != NULL)
1660 mod_hash_destroy_hash(tz->vmz_tasks_hash);
1661 if (tz->vmz_rusers_hash != NULL)
1662 mod_hash_destroy_hash(tz->vmz_rusers_hash);
1663 if (tz->vmz_eusers_hash != NULL)
1664 mod_hash_destroy_hash(tz->vmz_eusers_hash);
1665 kmem_free(tz, sizeof (vmu_zone_t));
1666 }
1667 }
1668
1669 extern kcondvar_t *pr_pid_cv;
1670
|
101 * These two calculations are done simultaneously, with most of the work
102 * being done in vmu_calculate_seg(). The results of the calculation are
103 * copied into "vmu_data.vmu_cache_results".
104 *
105 * To perform the calculation, various things are tracked and cached:
106 *
107 * - incore/not-incore page ranges for all vnodes.
108 * (vmu_data.vmu_all_vnodes_hash)
109 * This eliminates looking up the same page more than once.
110 *
111 * - incore/not-incore page ranges for all shared amps.
112 * (vmu_data.vmu_all_amps_hash)
113 * This eliminates looking up the same page more than once.
114 *
115 * - visited page ranges for each collective.
116 * - per vnode (entity->vme_vnode_hash)
117 * - per shared amp (entity->vme_amp_hash)
118 * For accurate counting of map-shared and COW-shared pages.
119 *
120 * - visited private anons (refcnt > 1) for each collective.
121 * (entity->vme_anon_hash)
122 * For accurate counting of COW-shared pages.
123 *
124 * The common accounting structure is the vmu_entity_t, which represents
125 * collectives:
126 *
127 * - A zone.
128 * - A project, task, or user within a zone.
129 * - The entire system (vmu_data.vmu_system).
130 * - Each collapsed (col) project and user. This means a given projid or
131 * uid, regardless of which zone the process is in. For instance,
132 * project 0 in the global zone and project 0 in a non global zone are
133 * the same collapsed project.
134 *
135 * Each entity structure tracks which pages have been already visited for
136 * that entity (via previously inspected processes) so that these pages are
137 * not double counted.
138 */
139
140 #include <sys/errno.h>
141 #include <sys/types.h>
142 #include <sys/zone.h>
143 #include <sys/proc.h>
144 #include <sys/project.h>
145 #include <sys/task.h>
146 #include <sys/thread.h>
147 #include <sys/time.h>
148 #include <sys/mman.h>
149 #include <sys/modhash.h>
150 #include <sys/modhash_impl.h>
151 #include <sys/shm.h>
152 #include <sys/swap.h>
153 #include <sys/synch.h>
154 #include <sys/systm.h>
155 #include <sys/var.h>
156 #include <sys/vm_usage.h>
157 #include <sys/zone.h>
158 #include <sys/sunddi.h>
159 #include <sys/avl.h>
160 #include <vm/anon.h>
161 #include <vm/as.h>
162 #include <vm/seg_vn.h>
163 #include <vm/seg_spt.h>
164
165 #define VMUSAGE_HASH_SIZE 512
166
167 #define VMUSAGE_TYPE_VNODE 1
168 #define VMUSAGE_TYPE_AMP 2
169 #define VMUSAGE_TYPE_ANON 3
170
171 #define VMUSAGE_BOUND_UNKNOWN 0
172 #define VMUSAGE_BOUND_INCORE 1
173 #define VMUSAGE_BOUND_NOT_INCORE 2
174
175 #define ISWITHIN(node, addr) ((node)->vmb_start <= addr && \
176 (node)->vmb_end >= addr ? 1 : 0)
177
178 /*
186 avl_node_t vmb_node;
187 struct vmu_bound *vmb_next; /* NULL in tree else on free or temp list */
188 pgcnt_t vmb_start; /* page offset in vnode/amp on which bound starts */
189 pgcnt_t vmb_end; /* page offset in vnode/amp on which bound ends */
190 char vmb_type; /* One of VMUSAGE_BOUND_* */
191 } vmu_bound_t;
192
193 /*
194 * hash of visited objects (vnodes or shared amps)
195 * key is address of vnode or amp. Bounds lists known incore/non-incore
196 * bounds for vnode/amp.
197 */
198 typedef struct vmu_object {
199 struct vmu_object *vmo_next; /* free list */
200 caddr_t vmo_key;
201 short vmo_type;
202 avl_tree_t vmo_bounds;
203 } vmu_object_t;
204
205 /*
206 * Entity by which to count results.
207 *
208 * The entity structure keeps the current rss/swap counts for each entity
209 * (zone, project, etc), and hashes of vm structures that have already
210 * been visited for the entity.
211 *
212 * vme_next: links the list of all entities currently being counted by
213 * vmu_calculate().
214 *
215 * vme_next_calc: links the list of entities related to the current process
216 * being counted by vmu_calculate_proc().
217 *
218 * vmu_calculate_proc() walks all processes. For each process, it makes a
219 * list of the entities related to that process using vme_next_calc. This
220 * list changes each time vmu_calculate_proc() is called.
221 *
222 */
223 typedef struct vmu_entity {
224 struct vmu_entity *vme_next;
225 struct vmu_entity *vme_next_calc;
226 mod_hash_t *vme_vnode_hash; /* vnodes visited for entity */
227 mod_hash_t *vme_amp_hash; /* shared amps visited for entity */
228 mod_hash_t *vme_anon_hash; /* COW anons visited for entity */
229 vmusage_t vme_result; /* identifies entity and results */
230 } vmu_entity_t;
231
232 /*
233 * Hash of entities visited within a zone, and an entity for the zone
234 * itself.
235 */
236 typedef struct vmu_zone {
237 struct vmu_zone *vmz_next; /* free list */
238 id_t vmz_id;
239 vmu_entity_t *vmz_zone;
240 mod_hash_t *vmz_projects_hash;
241 mod_hash_t *vmz_tasks_hash;
242 mod_hash_t *vmz_rusers_hash;
243 mod_hash_t *vmz_eusers_hash;
244 } vmu_zone_t;
245
246 /*
247 * Cache of results from last calculation
248 */
311 /*
312 * Comparison routine for AVL tree. We base our comparison on vmb_start.
313 */
314 static int
315 bounds_cmp(const void *bnd1, const void *bnd2)
316 {
317 const vmu_bound_t *bound1 = bnd1;
318 const vmu_bound_t *bound2 = bnd2;
319
320 if (bound1->vmb_start == bound2->vmb_start) {
321 return (0);
322 }
323 if (bound1->vmb_start < bound2->vmb_start) {
324 return (-1);
325 }
326
327 return (1);
328 }
329
330 /*
331 * Save a bound on the free list.
332 */
333 static void
334 vmu_free_bound(vmu_bound_t *bound)
335 {
336 bound->vmb_next = vmu_data.vmu_free_bounds;
337 bound->vmb_start = 0;
338 bound->vmb_end = 0;
339 bound->vmb_type = 0;
340 vmu_data.vmu_free_bounds = bound;
341 }
342
343 /*
344 * Free an object, and all visited bound info.
345 */
346 static void
347 vmu_free_object(mod_hash_val_t val)
348 {
349 vmu_object_t *obj = (vmu_object_t *)val;
350 avl_tree_t *tree = &(obj->vmo_bounds);
351 vmu_bound_t *bound;
352 void *cookie = NULL;
353
354 while ((bound = avl_destroy_nodes(tree, &cookie)) != NULL)
355 vmu_free_bound(bound);
356 avl_destroy(tree);
357
358 obj->vmo_type = 0;
359 obj->vmo_next = vmu_data.vmu_free_objects;
360 vmu_data.vmu_free_objects = obj;
361 }
362
363 /*
364 * Free an entity, and hashes of visited objects for that entity.
365 */
366 static void
367 vmu_free_entity(mod_hash_val_t val)
368 {
369 vmu_entity_t *entity = (vmu_entity_t *)val;
370
371 if (entity->vme_vnode_hash != NULL)
372 i_mod_hash_clear_nosync(entity->vme_vnode_hash);
373 if (entity->vme_amp_hash != NULL)
374 i_mod_hash_clear_nosync(entity->vme_amp_hash);
375 if (entity->vme_anon_hash != NULL)
376 i_mod_hash_clear_nosync(entity->vme_anon_hash);
377
378 entity->vme_next = vmu_data.vmu_free_entities;
379 vmu_data.vmu_free_entities = entity;
380 }
381
382 /*
383 * Free zone entity, and all hashes of entities inside that zone,
384 * which are projects, tasks, and users.
385 */
386 static void
387 vmu_free_zone(mod_hash_val_t val)
388 {
389 vmu_zone_t *zone = (vmu_zone_t *)val;
390
391 if (zone->vmz_zone != NULL) {
392 vmu_free_entity((mod_hash_val_t)zone->vmz_zone);
393 zone->vmz_zone = NULL;
394 }
395 if (zone->vmz_projects_hash != NULL)
396 i_mod_hash_clear_nosync(zone->vmz_projects_hash);
397 if (zone->vmz_tasks_hash != NULL)
472 vmu_data.vmu_free_entities =
473 vmu_data.vmu_free_entities->vme_next;
474 bzero(&entity->vme_result, sizeof (vmusage_t));
475 } else {
476 entity = kmem_zalloc(sizeof (vmu_entity_t), KM_SLEEP);
477 }
478 entity->vme_result.vmu_id = id;
479 entity->vme_result.vmu_zoneid = zoneid;
480 entity->vme_result.vmu_type = type;
481
482 if (entity->vme_vnode_hash == NULL)
483 entity->vme_vnode_hash = mod_hash_create_ptrhash(
484 "vmusage vnode hash", VMUSAGE_HASH_SIZE, vmu_free_object,
485 sizeof (vnode_t));
486
487 if (entity->vme_amp_hash == NULL)
488 entity->vme_amp_hash = mod_hash_create_ptrhash(
489 "vmusage amp hash", VMUSAGE_HASH_SIZE, vmu_free_object,
490 sizeof (struct anon_map));
491
492 if (entity->vme_anon_hash == NULL)
493 entity->vme_anon_hash = mod_hash_create_ptrhash(
494 "vmusage anon hash", VMUSAGE_HASH_SIZE,
495 mod_hash_null_valdtor, sizeof (struct anon));
496
497 entity->vme_next = vmu_data.vmu_entities;
498 vmu_data.vmu_entities = entity;
499 vmu_data.vmu_nentities++;
500
501 return (entity);
502 }
503
504 /*
505 * Allocate a zone entity, and hashes for tracking visited vm objects
506 * for projects, tasks, and users within that zone.
507 */
508 static vmu_zone_t *
509 vmu_alloc_zone(id_t id)
510 {
511 vmu_zone_t *zone;
512
513 if (vmu_data.vmu_free_zones != NULL) {
514 zone = vmu_data.vmu_free_zones;
515 vmu_data.vmu_free_zones =
516 vmu_data.vmu_free_zones->vmz_next;
601 * insert operations.
602 */
603 static vmu_object_t *
604 vmu_find_insert_object(mod_hash_t *hash, caddr_t key, uint_t type)
605 {
606 int ret;
607 vmu_object_t *object;
608
609 ret = i_mod_hash_find_nosync(hash, (mod_hash_key_t)key,
610 (mod_hash_val_t *)&object);
611 if (ret != 0) {
612 object = vmu_alloc_object(key, type);
613 ret = i_mod_hash_insert_nosync(hash, (mod_hash_key_t)key,
614 (mod_hash_val_t)object, (mod_hash_hndl_t)0);
615 ASSERT(ret == 0);
616 }
617 return (object);
618 }
619
620 static int
621 vmu_find_insert_anon(mod_hash_t *hash, caddr_t key)
622 {
623 int ret;
624 caddr_t val;
625
626 ret = i_mod_hash_find_nosync(hash, (mod_hash_key_t)key,
627 (mod_hash_val_t *)&val);
628
629 if (ret == 0)
630 return (0);
631
632 ret = i_mod_hash_insert_nosync(hash, (mod_hash_key_t)key,
633 (mod_hash_val_t)key, (mod_hash_hndl_t)0);
634
635 ASSERT(ret == 0);
636
637 return (1);
638 }
639
640 static vmu_entity_t *
641 vmu_find_insert_entity(mod_hash_t *hash, id_t id, uint_t type, id_t zoneid)
642 {
643 int ret;
644 vmu_entity_t *entity;
645
646 ret = i_mod_hash_find_nosync(hash, (mod_hash_key_t)(uintptr_t)id,
647 (mod_hash_val_t *)&entity);
648 if (ret != 0) {
649 entity = vmu_alloc_entity(id, type, zoneid);
650 ret = i_mod_hash_insert_nosync(hash,
651 (mod_hash_key_t)(uintptr_t)id, (mod_hash_val_t)entity,
652 (mod_hash_hndl_t)0);
653 ASSERT(ret == 0);
654 }
655 return (entity);
1327 if (PP_ISFREE(page))
1328 continue;
1329
1330 /*
1331 * Assume anon structs with a refcnt
1332 * of 1 are not COW shared, so there
1333 * is no reason to track them per entity.
1334 */
1335 if (cnt == 1) {
1336 panon += pgcnt;
1337 continue;
1338 }
1339 for (entity = vmu_entities; entity != NULL;
1340 entity = entity->vme_next_calc) {
1341
1342 result = &entity->vme_result;
1343 /*
1344 * Track COW anons per entity so
1345 * they are not double counted.
1346 */
1347 if (vmu_find_insert_anon(entity->vme_anon_hash,
1348 (caddr_t)ap) == 0)
1349 continue;
1350
1351 result->vmu_rss_all += (pgcnt << PAGESHIFT);
1352 result->vmu_rss_private +=
1353 (pgcnt << PAGESHIFT);
1354 }
1355 }
1356 ANON_LOCK_EXIT(&private_amp->a_rwlock);
1357 }
1358
1359 /* Add up resident anon and swap reserved for private mappings */
1360 if (swresv > 0 || panon > 0) {
1361 for (entity = vmu_entities; entity != NULL;
1362 entity = entity->vme_next_calc) {
1363 result = &entity->vme_result;
1364 result->vmu_swap_all += swresv;
1365 result->vmu_swap_private += swresv;
1366 result->vmu_rss_all += (panon << PAGESHIFT);
1367 result->vmu_rss_private += (panon << PAGESHIFT);
1368 }
1602
1603 while (vmu_data.vmu_free_bounds != NULL) {
1604 tb = vmu_data.vmu_free_bounds;
1605 vmu_data.vmu_free_bounds = vmu_data.vmu_free_bounds->vmb_next;
1606 kmem_cache_free(vmu_bound_cache, tb);
1607 }
1608 while (vmu_data.vmu_free_objects != NULL) {
1609 to = vmu_data.vmu_free_objects;
1610 vmu_data.vmu_free_objects =
1611 vmu_data.vmu_free_objects->vmo_next;
1612 kmem_cache_free(vmu_object_cache, to);
1613 }
1614 while (vmu_data.vmu_free_entities != NULL) {
1615 te = vmu_data.vmu_free_entities;
1616 vmu_data.vmu_free_entities =
1617 vmu_data.vmu_free_entities->vme_next;
1618 if (te->vme_vnode_hash != NULL)
1619 mod_hash_destroy_hash(te->vme_vnode_hash);
1620 if (te->vme_amp_hash != NULL)
1621 mod_hash_destroy_hash(te->vme_amp_hash);
1622 if (te->vme_anon_hash != NULL)
1623 mod_hash_destroy_hash(te->vme_anon_hash);
1624 kmem_free(te, sizeof (vmu_entity_t));
1625 }
1626 while (vmu_data.vmu_free_zones != NULL) {
1627 tz = vmu_data.vmu_free_zones;
1628 vmu_data.vmu_free_zones =
1629 vmu_data.vmu_free_zones->vmz_next;
1630 if (tz->vmz_projects_hash != NULL)
1631 mod_hash_destroy_hash(tz->vmz_projects_hash);
1632 if (tz->vmz_tasks_hash != NULL)
1633 mod_hash_destroy_hash(tz->vmz_tasks_hash);
1634 if (tz->vmz_rusers_hash != NULL)
1635 mod_hash_destroy_hash(tz->vmz_rusers_hash);
1636 if (tz->vmz_eusers_hash != NULL)
1637 mod_hash_destroy_hash(tz->vmz_eusers_hash);
1638 kmem_free(tz, sizeof (vmu_zone_t));
1639 }
1640 }
1641
1642 extern kcondvar_t *pr_pid_cv;
1643
|