Print this page
OS-4937 lxbrand ptracer count updates can race
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Joshua M. Clulow <jmc@joyent.com>
OS-2834 ship lx brand
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/os/pid.c
+++ new/usr/src/uts/common/os/pid.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
|
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved.
24 + * Copyright 2015 Joyent, Inc.
24 25 */
25 26
26 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
27 28 /* All Rights Reserved */
28 29
29 30 #include <sys/types.h>
30 31 #include <sys/param.h>
31 32 #include <sys/sysmacros.h>
32 33 #include <sys/proc.h>
33 34 #include <sys/kmem.h>
34 35 #include <sys/tuneable.h>
35 36 #include <sys/var.h>
36 37 #include <sys/cred.h>
37 38 #include <sys/systm.h>
38 39 #include <sys/prsystm.h>
39 40 #include <sys/vnode.h>
40 41 #include <sys/session.h>
41 42 #include <sys/cpuvar.h>
42 43 #include <sys/cmn_err.h>
43 44 #include <sys/bitmap.h>
44 45 #include <sys/debug.h>
45 46 #include <c2/audit.h>
46 47 #include <sys/project.h>
47 48 #include <sys/task.h>
48 49 #include <sys/zone.h>
49 50
50 51 /* directory entries for /proc */
51 52 union procent {
52 53 proc_t *pe_proc;
53 54 union procent *pe_next;
54 55 };
55 56
56 57 struct pid pid0 = {
57 58 0, /* pid_prinactive */
58 59 1, /* pid_pgorphaned */
59 60 0, /* pid_padding */
60 61 0, /* pid_prslot */
61 62 0, /* pid_id */
62 63 NULL, /* pid_pglink */
63 64 NULL, /* pid_pgtail */
64 65 NULL, /* pid_link */
65 66 3 /* pid_ref */
66 67 };
67 68
68 69 static int pid_hashlen = 4; /* desired average hash chain length */
69 70 static int pid_hashsz; /* number of buckets in the hash table */
70 71
71 72 #define HASHPID(pid) (pidhash[((pid)&(pid_hashsz-1))])
72 73
73 74 extern uint_t nproc;
74 75 extern struct kmem_cache *process_cache;
75 76 static void upcount_init(void);
76 77
77 78 kmutex_t pidlock; /* global process lock */
78 79 kmutex_t pr_pidlock; /* /proc global process lock */
79 80 kcondvar_t *pr_pid_cv; /* for /proc, one per process slot */
80 81 struct plock *proc_lock; /* persistent array of p_lock's */
81 82
82 83 /*
83 84 * See the comment above pid_getlockslot() for a detailed explanation of this
84 85 * constant. Note that a PLOCK_SHIFT of 3 implies 64-byte coherence
85 86 * granularity; if the coherence granularity is ever changed, this constant
86 87 * should be modified to reflect the change to minimize proc_lock false
87 88 * sharing (correctness, however, is guaranteed regardless of the coherence
88 89 * granularity).
89 90 */
90 91 #define PLOCK_SHIFT 3
91 92
92 93 static kmutex_t pidlinklock;
93 94 static struct pid **pidhash;
94 95 static pid_t minpid;
95 96 static pid_t mpid = FAMOUS_PIDS; /* one more than the last famous pid */
96 97 static union procent *procdir;
97 98 static union procent *procentfree;
98 99
99 100 static struct pid *
100 101 pid_lookup(pid_t pid)
101 102 {
102 103 struct pid *pidp;
103 104
104 105 ASSERT(MUTEX_HELD(&pidlinklock));
|
↓ open down ↓ |
71 lines elided |
↑ open up ↑ |
105 106
106 107 for (pidp = HASHPID(pid); pidp; pidp = pidp->pid_link) {
107 108 if (pidp->pid_id == pid) {
108 109 ASSERT(pidp->pid_ref > 0);
109 110 break;
110 111 }
111 112 }
112 113 return (pidp);
113 114 }
114 115
116 +struct pid *
117 +pid_find(pid_t pid)
118 +{
119 + struct pid *pidp;
120 +
121 + mutex_enter(&pidlinklock);
122 + pidp = pid_lookup(pid);
123 + mutex_exit(&pidlinklock);
124 +
125 + return (pidp);
126 +}
127 +
115 128 void
116 129 pid_setmin(void)
117 130 {
118 131 if (jump_pid && jump_pid > mpid)
119 132 minpid = mpid = jump_pid;
120 133 else
121 134 minpid = mpid;
122 135 }
123 136
124 137 /*
125 138 * When prslots are simply used as an index to determine a process' p_lock,
126 139 * adjacent prslots share adjacent p_locks. On machines where the size
127 140 * of a mutex is smaller than that of a cache line (which, as of this writing,
128 141 * is true for all machines on which Solaris runs), this can potentially
129 142 * induce false sharing. The standard solution for false sharing is to pad
130 143 * out one's data structures (in this case, struct plock). However,
131 144 * given the size and (generally) sparse use of the proc_lock array, this
132 145 * is suboptimal. We therefore stride through the proc_lock array with
133 146 * a stride of PLOCK_SHIFT. PLOCK_SHIFT should be defined as:
134 147 *
135 148 * log_2 (coherence_granularity / sizeof (kmutex_t))
136 149 *
137 150 * Under this scheme, false sharing is still possible -- but only when
138 151 * the number of active processes is very large. Note that the one-to-one
139 152 * mapping between prslots and lockslots is maintained.
140 153 */
141 154 static int
142 155 pid_getlockslot(int prslot)
143 156 {
144 157 int even = (v.v_proc >> PLOCK_SHIFT) << PLOCK_SHIFT;
145 158 int perlap = even >> PLOCK_SHIFT;
146 159
147 160 if (prslot >= even)
148 161 return (prslot);
149 162
150 163 return (((prslot % perlap) << PLOCK_SHIFT) + (prslot / perlap));
151 164 }
152 165
153 166 /*
154 167 * This function allocates a pid structure, a free pid, and optionally a
155 168 * slot in the proc table for it.
156 169 *
157 170 * pid_allocate() returns the new pid on success, -1 on failure.
158 171 */
159 172 pid_t
160 173 pid_allocate(proc_t *prp, pid_t pid, int flags)
161 174 {
162 175 struct pid *pidp;
163 176 union procent *pep;
164 177 pid_t newpid, startpid;
165 178
166 179 pidp = kmem_zalloc(sizeof (struct pid), KM_SLEEP);
167 180
168 181 mutex_enter(&pidlinklock);
169 182 if ((flags & PID_ALLOC_PROC) && (pep = procentfree) == NULL) {
170 183 /*
171 184 * ran out of /proc directory entries
172 185 */
173 186 goto failed;
174 187 }
175 188
176 189 if (pid != 0) {
177 190 VERIFY(minpid == 0);
178 191 VERIFY3P(pid, <, mpid);
179 192 VERIFY3P(pid_lookup(pid), ==, NULL);
180 193 newpid = pid;
181 194 } else {
182 195 /*
183 196 * Allocate a pid
184 197 */
185 198 ASSERT(minpid <= mpid && mpid < maxpid);
186 199
187 200 startpid = mpid;
188 201 for (;;) {
189 202 newpid = mpid;
190 203 if (++mpid == maxpid)
191 204 mpid = minpid;
192 205
193 206 if (pid_lookup(newpid) == NULL)
194 207 break;
195 208
196 209 if (mpid == startpid)
197 210 goto failed;
198 211 }
199 212 }
200 213
201 214 /*
202 215 * Put pid into the pid hash table.
203 216 */
204 217 pidp->pid_link = HASHPID(newpid);
205 218 HASHPID(newpid) = pidp;
206 219 pidp->pid_ref = 1;
207 220 pidp->pid_id = newpid;
208 221
209 222 if (flags & PID_ALLOC_PROC) {
210 223 procentfree = pep->pe_next;
211 224 pidp->pid_prslot = pep - procdir;
212 225 pep->pe_proc = prp;
213 226 prp->p_pidp = pidp;
214 227 prp->p_lockp = &proc_lock[pid_getlockslot(pidp->pid_prslot)];
215 228 } else {
216 229 pidp->pid_prslot = 0;
217 230 }
218 231
219 232 mutex_exit(&pidlinklock);
220 233
221 234 return (newpid);
222 235
223 236 failed:
224 237 mutex_exit(&pidlinklock);
225 238 kmem_free(pidp, sizeof (struct pid));
226 239 return (-1);
227 240 }
228 241
229 242 /*
230 243 * decrement the reference count for pid
231 244 */
232 245 int
233 246 pid_rele(struct pid *pidp)
234 247 {
235 248 struct pid **pidpp;
236 249
237 250 mutex_enter(&pidlinklock);
238 251 ASSERT(pidp != &pid0);
239 252
240 253 pidpp = &HASHPID(pidp->pid_id);
241 254 for (;;) {
242 255 ASSERT(*pidpp != NULL);
243 256 if (*pidpp == pidp)
244 257 break;
245 258 pidpp = &(*pidpp)->pid_link;
246 259 }
247 260
248 261 *pidpp = pidp->pid_link;
249 262 mutex_exit(&pidlinklock);
250 263
251 264 kmem_free(pidp, sizeof (*pidp));
252 265 return (0);
253 266 }
254 267
255 268 void
256 269 proc_entry_free(struct pid *pidp)
257 270 {
258 271 mutex_enter(&pidlinklock);
259 272 pidp->pid_prinactive = 1;
260 273 procdir[pidp->pid_prslot].pe_next = procentfree;
261 274 procentfree = &procdir[pidp->pid_prslot];
262 275 mutex_exit(&pidlinklock);
263 276 }
264 277
265 278 /*
266 279 * The original task needs to be passed in since the process has already been
267 280 * detached from the task at this point in time.
268 281 */
269 282 void
270 283 pid_exit(proc_t *prp, struct task *tk)
271 284 {
272 285 struct pid *pidp;
273 286 zone_t *zone = prp->p_zone;
274 287
275 288 ASSERT(MUTEX_HELD(&pidlock));
276 289
277 290 /*
278 291 * Exit process group. If it is NULL, it's because fork failed
279 292 * before calling pgjoin().
280 293 */
281 294 ASSERT(prp->p_pgidp != NULL || prp->p_stat == SIDL);
282 295 if (prp->p_pgidp != NULL)
283 296 pgexit(prp);
284 297
285 298 sess_rele(prp->p_sessp, B_TRUE);
286 299
287 300 pidp = prp->p_pidp;
288 301
289 302 proc_entry_free(pidp);
290 303
291 304 if (audit_active)
292 305 audit_pfree(prp);
293 306
294 307 if (practive == prp) {
295 308 practive = prp->p_next;
296 309 }
297 310
298 311 if (prp->p_next) {
299 312 prp->p_next->p_prev = prp->p_prev;
300 313 }
301 314 if (prp->p_prev) {
302 315 prp->p_prev->p_next = prp->p_next;
303 316 }
304 317
305 318 PID_RELE(pidp);
306 319
307 320 mutex_destroy(&prp->p_crlock);
308 321 kmem_cache_free(process_cache, prp);
309 322 nproc--;
310 323
311 324 /*
312 325 * Decrement the process counts of the original task, project and zone.
313 326 */
314 327 mutex_enter(&zone->zone_nlwps_lock);
315 328 tk->tk_nprocs--;
316 329 tk->tk_proj->kpj_nprocs--;
317 330 zone->zone_nprocs--;
318 331 mutex_exit(&zone->zone_nlwps_lock);
319 332 }
320 333
321 334 /*
322 335 * Find a process visible from the specified zone given its process ID.
323 336 */
324 337 proc_t *
325 338 prfind_zone(pid_t pid, zoneid_t zoneid)
326 339 {
327 340 struct pid *pidp;
328 341 proc_t *p;
329 342
330 343 ASSERT(MUTEX_HELD(&pidlock));
331 344
332 345 mutex_enter(&pidlinklock);
333 346 pidp = pid_lookup(pid);
334 347 mutex_exit(&pidlinklock);
335 348 if (pidp != NULL && pidp->pid_prinactive == 0) {
336 349 p = procdir[pidp->pid_prslot].pe_proc;
337 350 if (zoneid == ALL_ZONES || p->p_zone->zone_id == zoneid)
338 351 return (p);
339 352 }
340 353 return (NULL);
341 354 }
342 355
343 356 /*
344 357 * Find a process given its process ID. This obeys zone restrictions,
345 358 * so if the caller is in a non-global zone it won't find processes
346 359 * associated with other zones. Use prfind_zone(pid, ALL_ZONES) to
347 360 * bypass this restriction.
348 361 */
349 362 proc_t *
350 363 prfind(pid_t pid)
351 364 {
352 365 zoneid_t zoneid;
353 366
354 367 if (INGLOBALZONE(curproc))
355 368 zoneid = ALL_ZONES;
356 369 else
357 370 zoneid = getzoneid();
358 371 return (prfind_zone(pid, zoneid));
359 372 }
360 373
361 374 proc_t *
362 375 pgfind_zone(pid_t pgid, zoneid_t zoneid)
363 376 {
364 377 struct pid *pidp;
365 378
366 379 ASSERT(MUTEX_HELD(&pidlock));
367 380
368 381 mutex_enter(&pidlinklock);
369 382 pidp = pid_lookup(pgid);
370 383 mutex_exit(&pidlinklock);
371 384 if (pidp != NULL) {
372 385 proc_t *p = pidp->pid_pglink;
373 386
374 387 if (zoneid == ALL_ZONES || pgid == 0 || p == NULL ||
375 388 p->p_zone->zone_id == zoneid)
376 389 return (p);
377 390 }
378 391 return (NULL);
379 392 }
380 393
381 394 /*
382 395 * return the head of the list of processes whose process group ID is 'pgid',
383 396 * or NULL, if no such process group
384 397 */
385 398 proc_t *
386 399 pgfind(pid_t pgid)
387 400 {
388 401 zoneid_t zoneid;
389 402
390 403 if (INGLOBALZONE(curproc))
391 404 zoneid = ALL_ZONES;
392 405 else
393 406 zoneid = getzoneid();
394 407 return (pgfind_zone(pgid, zoneid));
395 408 }
396 409
397 410 /*
398 411 * Sets P_PR_LOCK on a non-system process. Process must be fully created
399 412 * and not exiting to succeed.
400 413 *
401 414 * Returns 0 on success.
402 415 * Returns 1 if P_PR_LOCK is set.
403 416 * Returns -1 if proc is in invalid state.
404 417 */
405 418 int
406 419 sprtrylock_proc(proc_t *p)
407 420 {
408 421 ASSERT(MUTEX_HELD(&p->p_lock));
409 422
410 423 /* skip system and incomplete processes */
411 424 if (p->p_stat == SIDL || p->p_stat == SZOMB ||
412 425 (p->p_flag & (SSYS | SEXITING | SEXITLWPS))) {
413 426 return (-1);
414 427 }
415 428
416 429 if (p->p_proc_flag & P_PR_LOCK)
417 430 return (1);
418 431
419 432 p->p_proc_flag |= P_PR_LOCK;
420 433 THREAD_KPRI_REQUEST();
421 434
422 435 return (0);
423 436 }
424 437
425 438 /*
426 439 * Wait for P_PR_LOCK to become clear. Returns with p_lock dropped,
427 440 * and the proc pointer no longer valid, as the proc may have exited.
428 441 */
429 442 void
430 443 sprwaitlock_proc(proc_t *p)
431 444 {
432 445 kmutex_t *mp;
433 446
434 447 ASSERT(MUTEX_HELD(&p->p_lock));
435 448 ASSERT(p->p_proc_flag & P_PR_LOCK);
436 449
437 450 /*
438 451 * p_lock is persistent, but p itself is not -- it could
439 452 * vanish during cv_wait(). Load p->p_lock now so we can
440 453 * drop it after cv_wait() without referencing p.
441 454 */
442 455 mp = &p->p_lock;
443 456 cv_wait(&pr_pid_cv[p->p_slot], mp);
444 457 mutex_exit(mp);
445 458 }
446 459
447 460 /*
448 461 * If pid exists, find its proc, acquire its p_lock and mark it P_PR_LOCK.
449 462 * Returns the proc pointer on success, NULL on failure. sprlock() is
450 463 * really just a stripped-down version of pr_p_lock() to allow practive
451 464 * walkers like dofusers() and dumpsys() to synchronize with /proc.
452 465 */
453 466 proc_t *
454 467 sprlock_zone(pid_t pid, zoneid_t zoneid)
455 468 {
456 469 proc_t *p;
457 470 int ret;
458 471
459 472 for (;;) {
460 473 mutex_enter(&pidlock);
461 474 if ((p = prfind_zone(pid, zoneid)) == NULL) {
462 475 mutex_exit(&pidlock);
463 476 return (NULL);
464 477 }
465 478 mutex_enter(&p->p_lock);
466 479 mutex_exit(&pidlock);
467 480
468 481 if (panicstr)
469 482 return (p);
470 483
471 484 ret = sprtrylock_proc(p);
472 485 if (ret == -1) {
473 486 mutex_exit(&p->p_lock);
474 487 return (NULL);
475 488 } else if (ret == 0) {
476 489 break;
477 490 }
478 491 sprwaitlock_proc(p);
479 492 }
480 493 return (p);
481 494 }
482 495
483 496 proc_t *
484 497 sprlock(pid_t pid)
485 498 {
486 499 zoneid_t zoneid;
487 500
488 501 if (INGLOBALZONE(curproc))
489 502 zoneid = ALL_ZONES;
490 503 else
491 504 zoneid = getzoneid();
492 505 return (sprlock_zone(pid, zoneid));
493 506 }
494 507
495 508 void
496 509 sprlock_proc(proc_t *p)
497 510 {
498 511 ASSERT(MUTEX_HELD(&p->p_lock));
499 512
500 513 while (p->p_proc_flag & P_PR_LOCK) {
501 514 cv_wait(&pr_pid_cv[p->p_slot], &p->p_lock);
502 515 }
503 516
504 517 p->p_proc_flag |= P_PR_LOCK;
505 518 THREAD_KPRI_REQUEST();
506 519 }
507 520
508 521 void
509 522 sprunlock(proc_t *p)
510 523 {
511 524 if (panicstr) {
512 525 mutex_exit(&p->p_lock);
513 526 return;
|
↓ open down ↓ |
389 lines elided |
↑ open up ↑ |
514 527 }
515 528
516 529 ASSERT(p->p_proc_flag & P_PR_LOCK);
517 530 ASSERT(MUTEX_HELD(&p->p_lock));
518 531
519 532 cv_signal(&pr_pid_cv[p->p_slot]);
520 533 p->p_proc_flag &= ~P_PR_LOCK;
521 534 mutex_exit(&p->p_lock);
522 535 THREAD_KPRI_RELEASE();
523 536 }
537 +
538 +/*
539 + * Undo effects of sprlock but without dropping p->p_lock
540 + */
541 +void
542 +sprunprlock(proc_t *p)
543 +{
544 + ASSERT(p->p_proc_flag & P_PR_LOCK);
545 + ASSERT(MUTEX_HELD(&p->p_lock));
546 +
547 + cv_signal(&pr_pid_cv[p->p_slot]);
548 + p->p_proc_flag &= ~P_PR_LOCK;
549 + THREAD_KPRI_RELEASE();
550 +}
524 551
525 552 void
526 553 pid_init(void)
527 554 {
528 555 int i;
529 556
530 557 pid_hashsz = 1 << highbit(v.v_proc / pid_hashlen);
531 558
532 559 pidhash = kmem_zalloc(sizeof (struct pid *) * pid_hashsz, KM_SLEEP);
533 560 procdir = kmem_alloc(sizeof (union procent) * v.v_proc, KM_SLEEP);
534 561 pr_pid_cv = kmem_zalloc(sizeof (kcondvar_t) * v.v_proc, KM_SLEEP);
535 562 proc_lock = kmem_zalloc(sizeof (struct plock) * v.v_proc, KM_SLEEP);
536 563
537 564 nproc = 1;
538 565 practive = proc_sched;
539 566 proc_sched->p_next = NULL;
540 567 procdir[0].pe_proc = proc_sched;
541 568
542 569 procentfree = &procdir[1];
543 570 for (i = 1; i < v.v_proc - 1; i++)
544 571 procdir[i].pe_next = &procdir[i+1];
545 572 procdir[i].pe_next = NULL;
546 573
547 574 HASHPID(0) = &pid0;
548 575
549 576 upcount_init();
550 577 }
551 578
552 579 proc_t *
553 580 pid_entry(int slot)
554 581 {
555 582 union procent *pep;
556 583 proc_t *prp;
557 584
558 585 ASSERT(MUTEX_HELD(&pidlock));
559 586 ASSERT(slot >= 0 && slot < v.v_proc);
560 587
561 588 pep = procdir[slot].pe_next;
562 589 if (pep >= procdir && pep < &procdir[v.v_proc])
563 590 return (NULL);
564 591 prp = procdir[slot].pe_proc;
565 592 if (prp != 0 && prp->p_stat == SIDL)
566 593 return (NULL);
567 594 return (prp);
568 595 }
569 596
570 597 /*
571 598 * Send the specified signal to all processes whose process group ID is
572 599 * equal to 'pgid'
573 600 */
574 601
575 602 void
576 603 signal(pid_t pgid, int sig)
577 604 {
578 605 struct pid *pidp;
579 606 proc_t *prp;
580 607
581 608 mutex_enter(&pidlock);
582 609 mutex_enter(&pidlinklock);
583 610 if (pgid == 0 || (pidp = pid_lookup(pgid)) == NULL) {
584 611 mutex_exit(&pidlinklock);
585 612 mutex_exit(&pidlock);
586 613 return;
587 614 }
588 615 mutex_exit(&pidlinklock);
589 616 for (prp = pidp->pid_pglink; prp; prp = prp->p_pglink) {
590 617 mutex_enter(&prp->p_lock);
591 618 sigtoproc(prp, NULL, sig);
592 619 mutex_exit(&prp->p_lock);
593 620 }
594 621 mutex_exit(&pidlock);
595 622 }
596 623
597 624 /*
598 625 * Send the specified signal to the specified process
599 626 */
600 627
601 628 void
602 629 prsignal(struct pid *pidp, int sig)
603 630 {
604 631 if (!(pidp->pid_prinactive))
605 632 psignal(procdir[pidp->pid_prslot].pe_proc, sig);
606 633 }
607 634
608 635 #include <sys/sunddi.h>
609 636
610 637 /*
611 638 * DDI/DKI interfaces for drivers to send signals to processes
612 639 */
613 640
614 641 /*
615 642 * obtain an opaque reference to a process for signaling
616 643 */
617 644 void *
618 645 proc_ref(void)
619 646 {
620 647 struct pid *pidp;
621 648
622 649 mutex_enter(&pidlock);
623 650 pidp = curproc->p_pidp;
624 651 PID_HOLD(pidp);
625 652 mutex_exit(&pidlock);
626 653
627 654 return (pidp);
628 655 }
629 656
630 657 /*
631 658 * release a reference to a process
632 659 * - a process can exit even if a driver has a reference to it
633 660 * - one proc_unref for every proc_ref
634 661 */
635 662 void
636 663 proc_unref(void *pref)
637 664 {
638 665 mutex_enter(&pidlock);
639 666 PID_RELE((struct pid *)pref);
640 667 mutex_exit(&pidlock);
641 668 }
642 669
643 670 /*
644 671 * send a signal to a process
645 672 *
646 673 * - send the process the signal
647 674 * - if the process went away, return a -1
648 675 * - if the process is still there return 0
649 676 */
650 677 int
651 678 proc_signal(void *pref, int sig)
652 679 {
653 680 struct pid *pidp = pref;
654 681
655 682 prsignal(pidp, sig);
656 683 return (pidp->pid_prinactive ? -1 : 0);
657 684 }
658 685
659 686
660 687 static struct upcount **upc_hash; /* a boot time allocated array */
661 688 static ulong_t upc_hashmask;
662 689 #define UPC_HASH(x, y) ((ulong_t)(x ^ y) & upc_hashmask)
663 690
664 691 /*
665 692 * Get us off the ground. Called once at boot.
666 693 */
667 694 void
668 695 upcount_init(void)
669 696 {
670 697 ulong_t upc_hashsize;
671 698
672 699 /*
673 700 * An entry per MB of memory is our current guess
674 701 */
675 702 /*
676 703 * 2^20 is a meg, so shifting right by 20 - PAGESHIFT
677 704 * converts pages to megs (without overflowing a u_int
678 705 * if you have more than 4G of memory, like ptob(physmem)/1M
679 706 * would).
680 707 */
681 708 upc_hashsize = (1 << highbit(physmem >> (20 - PAGESHIFT)));
682 709 upc_hashmask = upc_hashsize - 1;
683 710 upc_hash = kmem_zalloc(upc_hashsize * sizeof (struct upcount *),
684 711 KM_SLEEP);
685 712 }
686 713
687 714 /*
688 715 * Increment the number of processes associated with a given uid and zoneid.
689 716 */
690 717 void
691 718 upcount_inc(uid_t uid, zoneid_t zoneid)
692 719 {
693 720 struct upcount **upc, **hupc;
694 721 struct upcount *new;
695 722
696 723 ASSERT(MUTEX_HELD(&pidlock));
697 724 new = NULL;
698 725 hupc = &upc_hash[UPC_HASH(uid, zoneid)];
699 726 top:
700 727 upc = hupc;
701 728 while ((*upc) != NULL) {
702 729 if ((*upc)->up_uid == uid && (*upc)->up_zoneid == zoneid) {
703 730 (*upc)->up_count++;
704 731 if (new) {
705 732 /*
706 733 * did not need `new' afterall.
707 734 */
708 735 kmem_free(new, sizeof (*new));
709 736 }
710 737 return;
711 738 }
712 739 upc = &(*upc)->up_next;
713 740 }
714 741
715 742 /*
716 743 * There is no entry for this <uid,zoneid> pair.
717 744 * Allocate one. If we have to drop pidlock, check
718 745 * again.
719 746 */
720 747 if (new == NULL) {
721 748 new = (struct upcount *)kmem_alloc(sizeof (*new), KM_NOSLEEP);
722 749 if (new == NULL) {
723 750 mutex_exit(&pidlock);
724 751 new = (struct upcount *)kmem_alloc(sizeof (*new),
725 752 KM_SLEEP);
726 753 mutex_enter(&pidlock);
727 754 goto top;
728 755 }
729 756 }
730 757
731 758
732 759 /*
733 760 * On the assumption that a new user is going to do some
734 761 * more forks, put the new upcount structure on the front.
735 762 */
736 763 upc = hupc;
737 764
738 765 new->up_uid = uid;
739 766 new->up_zoneid = zoneid;
740 767 new->up_count = 1;
741 768 new->up_next = *upc;
742 769
743 770 *upc = new;
744 771 }
745 772
746 773 /*
747 774 * Decrement the number of processes a given uid and zoneid has.
748 775 */
749 776 void
750 777 upcount_dec(uid_t uid, zoneid_t zoneid)
751 778 {
752 779 struct upcount **upc;
753 780 struct upcount *done;
754 781
755 782 ASSERT(MUTEX_HELD(&pidlock));
756 783
757 784 upc = &upc_hash[UPC_HASH(uid, zoneid)];
758 785 while ((*upc) != NULL) {
759 786 if ((*upc)->up_uid == uid && (*upc)->up_zoneid == zoneid) {
760 787 (*upc)->up_count--;
761 788 if ((*upc)->up_count == 0) {
762 789 done = *upc;
763 790 *upc = (*upc)->up_next;
764 791 kmem_free(done, sizeof (*done));
765 792 }
766 793 return;
767 794 }
768 795 upc = &(*upc)->up_next;
769 796 }
770 797 cmn_err(CE_PANIC, "decr_upcount-off the end");
771 798 }
772 799
773 800 /*
774 801 * Returns the number of processes a uid has.
775 802 * Non-existent uid's are assumed to have no processes.
776 803 */
777 804 int
778 805 upcount_get(uid_t uid, zoneid_t zoneid)
779 806 {
780 807 struct upcount *upc;
781 808
782 809 ASSERT(MUTEX_HELD(&pidlock));
783 810
784 811 upc = upc_hash[UPC_HASH(uid, zoneid)];
785 812 while (upc != NULL) {
786 813 if (upc->up_uid == uid && upc->up_zoneid == zoneid) {
787 814 return (upc->up_count);
788 815 }
789 816 upc = upc->up_next;
790 817 }
791 818 return (0);
792 819 }
|
↓ open down ↓ |
259 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX