Print this page
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/os/sig.c
+++ new/usr/src/uts/common/os/sig.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 * Copyright 2015, Joyent, Inc.
26 26 */
27 27
28 28 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
29 29 /* All Rights Reserved */
30 30
31 31 #include <sys/param.h>
32 32 #include <sys/types.h>
33 33 #include <sys/bitmap.h>
34 34 #include <sys/sysmacros.h>
35 35 #include <sys/systm.h>
36 36 #include <sys/cred.h>
37 37 #include <sys/user.h>
38 38 #include <sys/errno.h>
39 39 #include <sys/proc.h>
40 40 #include <sys/poll_impl.h> /* only needed for kludge in sigwaiting_send() */
41 41 #include <sys/signal.h>
42 42 #include <sys/siginfo.h>
43 43 #include <sys/fault.h>
44 44 #include <sys/ucontext.h>
45 45 #include <sys/procfs.h>
46 46 #include <sys/wait.h>
47 47 #include <sys/class.h>
48 48 #include <sys/mman.h>
49 49 #include <sys/procset.h>
50 50 #include <sys/kmem.h>
51 51 #include <sys/cpuvar.h>
52 52 #include <sys/prsystm.h>
|
↓ open down ↓ |
52 lines elided |
↑ open up ↑ |
53 53 #include <sys/debug.h>
54 54 #include <vm/as.h>
55 55 #include <sys/bitmap.h>
56 56 #include <c2/audit.h>
57 57 #include <sys/core.h>
58 58 #include <sys/schedctl.h>
59 59 #include <sys/contract/process_impl.h>
60 60 #include <sys/cyclic.h>
61 61 #include <sys/dtrace.h>
62 62 #include <sys/sdt.h>
63 -#include <sys/brand.h>
64 63 #include <sys/signalfd.h>
64 +#include <sys/brand.h>
65 65
66 66 const k_sigset_t nullsmask = {0, 0, 0};
67 67
68 68 const k_sigset_t fillset = /* MUST be contiguous */
69 69 {FILLSET0, FILLSET1, FILLSET2};
70 70
71 71 const k_sigset_t cantmask =
72 72 {CANTMASK0, CANTMASK1, CANTMASK2};
73 73
74 74 const k_sigset_t cantreset =
75 75 {(sigmask(SIGILL)|sigmask(SIGTRAP)|sigmask(SIGPWR)), 0, 0};
76 76
77 77 const k_sigset_t ignoredefault =
78 78 {(sigmask(SIGCONT)|sigmask(SIGCLD)|sigmask(SIGPWR)
79 79 |sigmask(SIGWINCH)|sigmask(SIGURG)|sigmask(SIGWAITING)),
80 80 (sigmask(SIGLWP)|sigmask(SIGCANCEL)|sigmask(SIGFREEZE)
81 81 |sigmask(SIGTHAW)|sigmask(SIGXRES)|sigmask(SIGJVM1)
82 82 |sigmask(SIGJVM2)|sigmask(SIGINFO)), 0};
83 83
84 84 const k_sigset_t stopdefault =
85 85 {(sigmask(SIGSTOP)|sigmask(SIGTSTP)|sigmask(SIGTTOU)|sigmask(SIGTTIN)),
86 86 0, 0};
87 87
88 88 const k_sigset_t coredefault =
89 89 {(sigmask(SIGQUIT)|sigmask(SIGILL)|sigmask(SIGTRAP)|sigmask(SIGIOT)
90 90 |sigmask(SIGEMT)|sigmask(SIGFPE)|sigmask(SIGBUS)|sigmask(SIGSEGV)
91 91 |sigmask(SIGSYS)|sigmask(SIGXCPU)|sigmask(SIGXFSZ)), 0, 0};
92 92
93 93 const k_sigset_t holdvfork =
94 94 {(sigmask(SIGTTOU)|sigmask(SIGTTIN)|sigmask(SIGTSTP)), 0, 0};
95 95
96 96 static int isjobstop(int);
97 97 static void post_sigcld(proc_t *, sigqueue_t *);
98 98
99 99
100 100 /*
101 101 * signalfd helper function which is set when the signalfd driver loads.
102 102 */
103 103 void (*sigfd_exit_helper)();
104 104
105 105 /*
106 106 * Internal variables for counting number of user thread stop requests posted.
107 107 * They may not be accurate at some special situation such as that a virtually
108 108 * stopped thread starts to run.
109 109 */
110 110 static int num_utstop;
111 111 /*
112 112 * Internal variables for broadcasting an event when all thread stop requests
113 113 * are processed.
114 114 */
115 115 static kcondvar_t utstop_cv;
116 116
117 117 static kmutex_t thread_stop_lock;
118 118 void del_one_utstop(void);
119 119
120 120 /*
121 121 * Send the specified signal to the specified process.
122 122 */
123 123 void
124 124 psignal(proc_t *p, int sig)
125 125 {
126 126 mutex_enter(&p->p_lock);
127 127 sigtoproc(p, NULL, sig);
128 128 mutex_exit(&p->p_lock);
129 129 }
130 130
131 131 /*
132 132 * Send the specified signal to the specified thread.
133 133 */
134 134 void
135 135 tsignal(kthread_t *t, int sig)
136 136 {
137 137 proc_t *p = ttoproc(t);
138 138
139 139 mutex_enter(&p->p_lock);
140 140 sigtoproc(p, t, sig);
141 141 mutex_exit(&p->p_lock);
142 142 }
143 143
144 144 int
145 145 signal_is_blocked(kthread_t *t, int sig)
146 146 {
147 147 return (sigismember(&t->t_hold, sig) ||
148 148 (schedctl_sigblock(t) && !sigismember(&cantmask, sig)));
149 149 }
150 150
151 151 /*
152 152 * Return true if the signal can safely be ignored.
153 153 * That is, if the signal is included in the p_ignore mask and doing so is not
154 154 * forbidden by any process branding.
155 155 */
156 156 static int
157 157 sig_ignorable(proc_t *p, klwp_t *lwp, int sig)
158 158 {
159 159 return (sigismember(&p->p_ignore, sig) && /* sig in ignore mask */
160 160 !(PROC_IS_BRANDED(p) && /* allowed by brand */
161 161 BROP(p)->b_sig_ignorable != NULL &&
162 162 BROP(p)->b_sig_ignorable(p, lwp, sig) == B_FALSE));
163 163
164 164 }
165 165
166 166 /*
167 167 * Return true if the signal can safely be discarded on generation.
168 168 * That is, if there is no need for the signal on the receiving end.
169 169 * The answer is true if the process is a zombie or
170 170 * if all of these conditions are true:
171 171 * the signal is being ignored
172 172 * the process is single-threaded
173 173 * the signal is not being traced by /proc
174 174 * the signal is not blocked by the process
175 175 * the signal is not being accepted via sigwait()
176 176 */
177 177 static int
178 178 sig_discardable(proc_t *p, kthread_t *tp, int sig)
179 179 {
180 180 kthread_t *t = p->p_tlist;
181 181 klwp_t *lwp = (tp == NULL) ? NULL : tp->t_lwp;
182 182
183 183 return (t == NULL || /* if zombie or ... */
184 184 (sig_ignorable(p, lwp, sig) && /* signal is ignored */
185 185 t->t_forw == t && /* and single-threaded */
186 186 !tracing(p, sig) && /* and no /proc tracing */
187 187 !signal_is_blocked(t, sig) && /* and signal not blocked */
188 188 !sigismember(&t->t_sigwait, sig))); /* and not being accepted */
189 189 }
190 190
191 191 /*
192 192 * Return true if this thread is going to eat this signal soon.
193 193 * Note that, if the signal is SIGKILL, we force stopped threads to be
194 194 * set running (to make SIGKILL be a sure kill), but only if the process
195 195 * is not currently locked by /proc (the P_PR_LOCK flag). Code in /proc
196 196 * relies on the fact that a process will not change shape while P_PR_LOCK
197 197 * is set (it drops and reacquires p->p_lock while leaving P_PR_LOCK set).
198 198 * We wish that we could simply call prbarrier() below, in sigtoproc(), to
199 199 * ensure that the process is not locked by /proc, but prbarrier() drops
200 200 * and reacquires p->p_lock and dropping p->p_lock here would be damaging.
201 201 */
202 202 int
203 203 eat_signal(kthread_t *t, int sig)
204 204 {
205 205 int rval = 0;
206 206 ASSERT(THREAD_LOCK_HELD(t));
207 207
208 208 /*
209 209 * Do not do anything if the target thread has the signal blocked.
210 210 */
211 211 if (!signal_is_blocked(t, sig)) {
212 212 t->t_sig_check = 1; /* have thread do an issig */
213 213 if (ISWAKEABLE(t) || ISWAITING(t)) {
214 214 setrun_locked(t);
215 215 rval = 1;
216 216 } else if (t->t_state == TS_STOPPED && sig == SIGKILL &&
217 217 !(ttoproc(t)->p_proc_flag & P_PR_LOCK)) {
218 218 ttoproc(t)->p_stopsig = 0;
219 219 t->t_dtrace_stop = 0;
220 220 t->t_schedflag |= TS_XSTART | TS_PSTART | TS_BSTART;
221 221 setrun_locked(t);
222 222 } else if (t != curthread && t->t_state == TS_ONPROC) {
223 223 aston(t); /* make it do issig promptly */
224 224 if (t->t_cpu != CPU)
225 225 poke_cpu(t->t_cpu->cpu_id);
226 226 rval = 1;
227 227 } else if (t->t_state == TS_RUN) {
228 228 rval = 1;
229 229 }
230 230 }
231 231
232 232 return (rval);
233 233 }
234 234
235 235 /*
236 236 * Post a signal.
237 237 * If a non-null thread pointer is passed, then post the signal
238 238 * to the thread/lwp, otherwise post the signal to the process.
239 239 */
240 240 void
241 241 sigtoproc(proc_t *p, kthread_t *t, int sig)
242 242 {
243 243 kthread_t *tt;
244 244 int ext = !(curproc->p_flag & SSYS) &&
245 245 (curproc->p_ct_process != p->p_ct_process);
246 246
247 247 ASSERT(MUTEX_HELD(&p->p_lock));
248 248
249 249 /* System processes don't get signals */
250 250 if (sig <= 0 || sig >= NSIG || (p->p_flag & SSYS))
251 251 return;
252 252
253 253 /*
254 254 * Regardless of origin or directedness,
255 255 * SIGKILL kills all lwps in the process immediately
256 256 * and jobcontrol signals affect all lwps in the process.
257 257 */
258 258 if (sig == SIGKILL) {
259 259 p->p_flag |= SKILLED | (ext ? SEXTKILLED : 0);
260 260 t = NULL;
261 261 } else if (sig == SIGCONT) {
262 262 /*
263 263 * The SSCONT flag will remain set until a stopping
264 264 * signal comes in (below). This is harmless.
265 265 */
266 266 p->p_flag |= SSCONT;
267 267 sigdelq(p, NULL, SIGSTOP);
268 268 sigdelq(p, NULL, SIGTSTP);
269 269 sigdelq(p, NULL, SIGTTOU);
270 270 sigdelq(p, NULL, SIGTTIN);
271 271 sigdiffset(&p->p_sig, &stopdefault);
272 272 sigdiffset(&p->p_extsig, &stopdefault);
273 273 p->p_stopsig = 0;
274 274 if ((tt = p->p_tlist) != NULL) {
275 275 do {
276 276 sigdelq(p, tt, SIGSTOP);
277 277 sigdelq(p, tt, SIGTSTP);
278 278 sigdelq(p, tt, SIGTTOU);
279 279 sigdelq(p, tt, SIGTTIN);
280 280 sigdiffset(&tt->t_sig, &stopdefault);
281 281 sigdiffset(&tt->t_extsig, &stopdefault);
282 282 } while ((tt = tt->t_forw) != p->p_tlist);
283 283 }
284 284 if ((tt = p->p_tlist) != NULL) {
285 285 do {
286 286 thread_lock(tt);
287 287 if (tt->t_state == TS_STOPPED &&
288 288 tt->t_whystop == PR_JOBCONTROL) {
289 289 tt->t_schedflag |= TS_XSTART;
290 290 setrun_locked(tt);
291 291 }
292 292 thread_unlock(tt);
293 293 } while ((tt = tt->t_forw) != p->p_tlist);
294 294 }
295 295 } else if (sigismember(&stopdefault, sig)) {
296 296 /*
297 297 * This test has a race condition which we can't fix:
298 298 * By the time the stopping signal is received by
299 299 * the target process/thread, the signal handler
300 300 * and/or the detached state might have changed.
301 301 */
302 302 if (PTOU(p)->u_signal[sig-1] == SIG_DFL &&
303 303 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned))
304 304 p->p_flag &= ~SSCONT;
305 305 sigdelq(p, NULL, SIGCONT);
306 306 sigdelset(&p->p_sig, SIGCONT);
307 307 sigdelset(&p->p_extsig, SIGCONT);
308 308 if ((tt = p->p_tlist) != NULL) {
309 309 do {
310 310 sigdelq(p, tt, SIGCONT);
311 311 sigdelset(&tt->t_sig, SIGCONT);
312 312 sigdelset(&tt->t_extsig, SIGCONT);
313 313 } while ((tt = tt->t_forw) != p->p_tlist);
314 314 }
315 315 }
316 316
317 317 if (sig_discardable(p, t, sig)) {
318 318 DTRACE_PROC3(signal__discard, kthread_t *, p->p_tlist,
319 319 proc_t *, p, int, sig);
320 320 return;
321 321 }
322 322
323 323 if (t != NULL) {
324 324 /*
325 325 * This is a directed signal, wake up the lwp.
326 326 */
327 327 sigaddset(&t->t_sig, sig);
328 328 if (ext)
329 329 sigaddset(&t->t_extsig, sig);
330 330 thread_lock(t);
331 331 (void) eat_signal(t, sig);
332 332 thread_unlock(t);
333 333 DTRACE_PROC2(signal__send, kthread_t *, t, int, sig);
334 334 if (p->p_sigfd != NULL && ((sigfd_proc_state_t *)
335 335 (p->p_sigfd))->sigfd_pollwake_cb != NULL)
336 336 (*((sigfd_proc_state_t *)(p->p_sigfd))->
337 337 sigfd_pollwake_cb)(p, sig);
338 338
339 339 } else if ((tt = p->p_tlist) != NULL) {
340 340 /*
341 341 * Make sure that some lwp that already exists
342 342 * in the process fields the signal soon.
343 343 * Wake up an interruptibly sleeping lwp if necessary.
344 344 * For SIGKILL make all of the lwps see the signal;
345 345 * This is needed to guarantee a sure kill for processes
346 346 * with a mix of realtime and non-realtime threads.
347 347 */
348 348 int su = 0;
349 349
350 350 sigaddset(&p->p_sig, sig);
351 351 if (ext)
352 352 sigaddset(&p->p_extsig, sig);
353 353 do {
354 354 thread_lock(tt);
355 355 if (eat_signal(tt, sig) && sig != SIGKILL) {
356 356 thread_unlock(tt);
357 357 break;
358 358 }
359 359 if (SUSPENDED(tt))
360 360 su++;
361 361 thread_unlock(tt);
362 362 } while ((tt = tt->t_forw) != p->p_tlist);
363 363 /*
364 364 * If the process is deadlocked, make somebody run and die.
365 365 */
366 366 if (sig == SIGKILL && p->p_stat != SIDL &&
367 367 p->p_lwprcnt == 0 && p->p_lwpcnt == su &&
368 368 !(p->p_proc_flag & P_PR_LOCK)) {
369 369 thread_lock(tt);
370 370 p->p_lwprcnt++;
371 371 tt->t_schedflag |= TS_CSTART;
372 372 setrun_locked(tt);
373 373 thread_unlock(tt);
374 374 }
375 375
376 376 DTRACE_PROC2(signal__send, kthread_t *, tt, int, sig);
377 377 if (p->p_sigfd != NULL && ((sigfd_proc_state_t *)
378 378 (p->p_sigfd))->sigfd_pollwake_cb != NULL)
379 379 (*((sigfd_proc_state_t *)(p->p_sigfd))->
380 380 sigfd_pollwake_cb)(p, sig);
381 381 }
382 382 }
383 383
384 384 static int
385 385 isjobstop(int sig)
386 386 {
387 387 proc_t *p = ttoproc(curthread);
388 388
389 389 ASSERT(MUTEX_HELD(&p->p_lock));
390 390
391 391 if (PTOU(curproc)->u_signal[sig-1] == SIG_DFL &&
392 392 sigismember(&stopdefault, sig)) {
393 393 /*
394 394 * If SIGCONT has been posted since we promoted this signal
395 395 * from pending to current, then don't do a jobcontrol stop.
396 396 */
397 397 if (!(p->p_flag & SSCONT) &&
398 398 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned) &&
399 399 curthread != p->p_agenttp) {
400 400 sigqueue_t *sqp;
401 401
402 402 stop(PR_JOBCONTROL, sig);
403 403 mutex_exit(&p->p_lock);
404 404 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
405 405 mutex_enter(&pidlock);
406 406 /*
407 407 * Only the first lwp to continue notifies the parent.
408 408 */
409 409 if (p->p_pidflag & CLDCONT)
410 410 siginfofree(sqp);
411 411 else {
412 412 p->p_pidflag |= CLDCONT;
413 413 p->p_wcode = CLD_CONTINUED;
414 414 p->p_wdata = SIGCONT;
415 415 sigcld(p, sqp);
416 416 }
417 417 mutex_exit(&pidlock);
418 418 mutex_enter(&p->p_lock);
419 419 }
420 420 return (1);
421 421 }
422 422 return (0);
423 423 }
424 424
425 425 /*
426 426 * Returns true if the current process has a signal to process, and
427 427 * the signal is not held. The signal to process is put in p_cursig.
428 428 * This is asked at least once each time a process enters the system
429 429 * (though this can usually be done without actually calling issig by
430 430 * checking the pending signal masks). A signal does not do anything
431 431 * directly to a process; it sets a flag that asks the process to do
432 432 * something to itself.
433 433 *
434 434 * The "why" argument indicates the allowable side-effects of the call:
435 435 *
436 436 * FORREAL: Extract the next pending signal from p_sig into p_cursig;
437 437 * stop the process if a stop has been requested or if a traced signal
438 438 * is pending.
439 439 *
440 440 * JUSTLOOKING: Don't stop the process, just indicate whether or not
441 441 * a signal might be pending (FORREAL is needed to tell for sure).
442 442 *
443 443 * XXX: Changes to the logic in these routines should be propagated
444 444 * to lm_sigispending(). See bug 1201594.
445 445 */
446 446
447 447 static int issig_forreal(void);
448 448 static int issig_justlooking(void);
449 449
450 450 int
451 451 issig(int why)
452 452 {
453 453 ASSERT(why == FORREAL || why == JUSTLOOKING);
454 454
455 455 return ((why == FORREAL)? issig_forreal() : issig_justlooking());
456 456 }
457 457
458 458
459 459 static int
460 460 issig_justlooking(void)
461 461 {
462 462 kthread_t *t = curthread;
463 463 klwp_t *lwp = ttolwp(t);
464 464 proc_t *p = ttoproc(t);
465 465 k_sigset_t set;
466 466
467 467 /*
468 468 * This function answers the question:
469 469 * "Is there any reason to call issig_forreal()?"
470 470 *
471 471 * We have to answer the question w/o grabbing any locks
472 472 * because we are (most likely) being called after we
473 473 * put ourselves on the sleep queue.
474 474 */
475 475
476 476 if (t->t_dtrace_stop | t->t_dtrace_sig)
477 477 return (1);
478 478
479 479 /*
480 480 * Another piece of complexity in this process. When single-stepping a
481 481 * process, we don't want an intervening signal or TP_PAUSE request to
482 482 * suspend the current thread. Otherwise, the controlling process will
483 483 * hang beacuse we will be stopped with TS_PSTART set in t_schedflag.
484 484 * We will trigger any remaining signals when we re-enter the kernel on
485 485 * the single step trap.
486 486 */
487 487 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP)
488 488 return (0);
489 489
490 490 if ((lwp->lwp_asleep && MUSTRETURN(p, t)) ||
491 491 (p->p_flag & (SEXITLWPS|SKILLED)) ||
492 492 (lwp->lwp_nostop == 0 &&
493 493 (p->p_stopsig | (p->p_flag & (SHOLDFORK1|SHOLDWATCH)) |
494 494 (t->t_proc_flag &
495 495 (TP_PRSTOP|TP_HOLDLWP|TP_CHKPT|TP_PAUSE)))) ||
496 496 lwp->lwp_cursig)
497 497 return (1);
498 498
499 499 if (p->p_flag & SVFWAIT)
500 500 return (0);
501 501 set = p->p_sig;
502 502 sigorset(&set, &t->t_sig);
503 503 if (schedctl_sigblock(t)) /* all blockable signals blocked */
504 504 sigandset(&set, &cantmask);
505 505 else
506 506 sigdiffset(&set, &t->t_hold);
507 507 if (p->p_flag & SVFORK)
508 508 sigdiffset(&set, &holdvfork);
509 509
510 510 if (!sigisempty(&set)) {
511 511 int sig;
512 512
513 513 for (sig = 1; sig < NSIG; sig++) {
514 514 if (sigismember(&set, sig) &&
515 515 (tracing(p, sig) ||
516 516 sigismember(&t->t_sigwait, sig) ||
517 517 !sig_ignorable(p, lwp, sig))) {
518 518 /*
519 519 * Don't promote a signal that will stop
520 520 * the process when lwp_nostop is set.
521 521 */
522 522 if (!lwp->lwp_nostop ||
523 523 PTOU(p)->u_signal[sig-1] != SIG_DFL ||
524 524 !sigismember(&stopdefault, sig))
525 525 return (1);
526 526 }
527 527 }
528 528 }
529 529
530 530 return (0);
531 531 }
532 532
533 533 static int
534 534 issig_forreal(void)
535 535 {
536 536 int sig = 0, ext = 0;
537 537 kthread_t *t = curthread;
538 538 klwp_t *lwp = ttolwp(t);
539 539 proc_t *p = ttoproc(t);
540 540 int toproc = 0;
541 541 int sigcld_found = 0;
542 542 int nostop_break = 0;
543 543
544 544 ASSERT(t->t_state == TS_ONPROC);
545 545
546 546 mutex_enter(&p->p_lock);
547 547 schedctl_finish_sigblock(t);
548 548
549 549 if (t->t_dtrace_stop | t->t_dtrace_sig) {
550 550 if (t->t_dtrace_stop) {
551 551 /*
552 552 * If DTrace's "stop" action has been invoked on us,
553 553 * set TP_PRSTOP.
554 554 */
555 555 t->t_proc_flag |= TP_PRSTOP;
556 556 }
557 557
558 558 if (t->t_dtrace_sig != 0) {
559 559 k_siginfo_t info;
560 560
561 561 /*
562 562 * Post the signal generated as the result of
563 563 * DTrace's "raise" action as a normal signal before
564 564 * the full-fledged signal checking begins.
565 565 */
566 566 bzero(&info, sizeof (info));
567 567 info.si_signo = t->t_dtrace_sig;
568 568 info.si_code = SI_DTRACE;
569 569
570 570 sigaddq(p, NULL, &info, KM_NOSLEEP);
571 571
572 572 t->t_dtrace_sig = 0;
573 573 }
574 574 }
575 575
576 576 for (;;) {
577 577 if (p->p_flag & (SEXITLWPS|SKILLED)) {
578 578 lwp->lwp_cursig = sig = SIGKILL;
579 579 lwp->lwp_extsig = ext = (p->p_flag & SEXTKILLED) != 0;
580 580 t->t_sig_check = 1;
581 581 break;
582 582 }
583 583
584 584 /*
585 585 * Another piece of complexity in this process. When
586 586 * single-stepping a process, we don't want an intervening
587 587 * signal or TP_PAUSE request to suspend the current thread.
588 588 * Otherwise, the controlling process will hang beacuse we will
589 589 * be stopped with TS_PSTART set in t_schedflag. We will
590 590 * trigger any remaining signals when we re-enter the kernel on
591 591 * the single step trap.
592 592 */
593 593 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP) {
594 594 sig = 0;
595 595 break;
596 596 }
597 597
598 598 /*
599 599 * Hold the lwp here for watchpoint manipulation.
600 600 */
601 601 if ((t->t_proc_flag & TP_PAUSE) && !lwp->lwp_nostop) {
602 602 stop(PR_SUSPENDED, SUSPEND_PAUSE);
603 603 continue;
604 604 }
605 605
606 606 if (lwp->lwp_asleep && MUSTRETURN(p, t)) {
607 607 if ((sig = lwp->lwp_cursig) != 0) {
608 608 /*
609 609 * Make sure we call ISSIG() in post_syscall()
610 610 * to re-validate this current signal.
611 611 */
612 612 t->t_sig_check = 1;
613 613 }
614 614 break;
615 615 }
616 616
617 617 /*
618 618 * If the request is PR_CHECKPOINT, ignore the rest of signals
619 619 * or requests. Honor other stop requests or signals later.
620 620 * Go back to top of loop here to check if an exit or hold
621 621 * event has occurred while stopped.
622 622 */
623 623 if ((t->t_proc_flag & TP_CHKPT) && !lwp->lwp_nostop) {
624 624 stop(PR_CHECKPOINT, 0);
625 625 continue;
626 626 }
627 627
628 628 /*
629 629 * Honor SHOLDFORK1, SHOLDWATCH, and TP_HOLDLWP before dealing
630 630 * with signals or /proc. Another lwp is executing fork1(),
631 631 * or is undergoing watchpoint activity (remapping a page),
632 632 * or is executing lwp_suspend() on this lwp.
633 633 * Again, go back to top of loop to check if an exit
634 634 * or hold event has occurred while stopped.
635 635 */
636 636 if (((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) ||
637 637 (t->t_proc_flag & TP_HOLDLWP)) && !lwp->lwp_nostop) {
638 638 stop(PR_SUSPENDED, SUSPEND_NORMAL);
639 639 continue;
640 640 }
641 641
642 642 /*
643 643 * Allow the brand the chance to alter (or suppress) delivery
644 644 * of this signal.
645 645 */
646 646 if (PROC_IS_BRANDED(p) && BROP(p)->b_issig_stop != NULL) {
647 647 /*
648 648 * The brand hook will return 0 if it would like
649 649 * us to drive on, or -1 if we should restart
650 650 * the loop to check other conditions.
651 651 */
652 652 if (BROP(p)->b_issig_stop(p, lwp) != 0) {
653 653 continue;
654 654 }
655 655 }
656 656
657 657 /*
658 658 * Honor requested stop before dealing with the
659 659 * current signal; a debugger may change it.
660 660 * Do not want to go back to loop here since this is a special
661 661 * stop that means: make incremental progress before the next
662 662 * stop. The danger is that returning to top of loop would most
663 663 * likely drop the thread right back here to stop soon after it
664 664 * was continued, violating the incremental progress request.
665 665 */
666 666 if ((t->t_proc_flag & TP_PRSTOP) && !lwp->lwp_nostop)
667 667 stop(PR_REQUESTED, 0);
668 668
669 669 /*
670 670 * If a debugger wants us to take a signal it will have
671 671 * left it in lwp->lwp_cursig. If lwp_cursig has been cleared
672 672 * or if it's being ignored, we continue on looking for another
673 673 * signal. Otherwise we return the specified signal, provided
674 674 * it's not a signal that causes a job control stop.
675 675 *
676 676 * When stopped on PR_JOBCONTROL, there is no current
677 677 * signal; we cancel lwp->lwp_cursig temporarily before
678 678 * calling isjobstop(). The current signal may be reset
679 679 * by a debugger while we are stopped in isjobstop().
680 680 *
681 681 * If the current thread is accepting the signal
682 682 * (via sigwait(), sigwaitinfo(), or sigtimedwait()),
683 683 * we allow the signal to be accepted, even if it is
684 684 * being ignored, and without causing a job control stop.
685 685 */
686 686 if ((sig = lwp->lwp_cursig) != 0) {
687 687 ext = lwp->lwp_extsig;
688 688 lwp->lwp_cursig = 0;
689 689 lwp->lwp_extsig = 0;
690 690 if (sigismember(&t->t_sigwait, sig) ||
691 691 (!sig_ignorable(p, lwp, sig) &&
692 692 !isjobstop(sig))) {
693 693 if (p->p_flag & (SEXITLWPS|SKILLED)) {
694 694 sig = SIGKILL;
695 695 ext = (p->p_flag & SEXTKILLED) != 0;
696 696 }
697 697 lwp->lwp_cursig = (uchar_t)sig;
698 698 lwp->lwp_extsig = (uchar_t)ext;
699 699 break;
700 700 }
701 701 /*
702 702 * The signal is being ignored or it caused a
703 703 * job-control stop. If another current signal
704 704 * has not been established, return the current
705 705 * siginfo, if any, to the memory manager.
706 706 */
707 707 if (lwp->lwp_cursig == 0 && lwp->lwp_curinfo != NULL) {
708 708 siginfofree(lwp->lwp_curinfo);
709 709 lwp->lwp_curinfo = NULL;
710 710 }
711 711 /*
712 712 * Loop around again in case we were stopped
713 713 * on a job control signal and a /proc stop
714 714 * request was posted or another current signal
715 715 * was established while we were stopped.
716 716 */
717 717 continue;
718 718 }
719 719
720 720 if (p->p_stopsig && !lwp->lwp_nostop &&
721 721 curthread != p->p_agenttp) {
722 722 /*
723 723 * Some lwp in the process has already stopped
724 724 * showing PR_JOBCONTROL. This is a stop in
725 725 * sympathy with the other lwp, even if this
726 726 * lwp is blocking the stopping signal.
727 727 */
728 728 stop(PR_JOBCONTROL, p->p_stopsig);
729 729 continue;
730 730 }
731 731
732 732 /*
733 733 * Loop on the pending signals until we find a
734 734 * non-held signal that is traced or not ignored.
735 735 * First check the signals pending for the lwp,
736 736 * then the signals pending for the process as a whole.
737 737 */
738 738 for (;;) {
739 739 if ((sig = fsig(&t->t_sig, t)) != 0) {
740 740 toproc = 0;
741 741 if (tracing(p, sig) ||
742 742 sigismember(&t->t_sigwait, sig) ||
743 743 !sig_ignorable(p, lwp, sig)) {
744 744 if (sigismember(&t->t_extsig, sig))
745 745 ext = 1;
746 746 break;
747 747 }
748 748 sigdelset(&t->t_sig, sig);
749 749 sigdelset(&t->t_extsig, sig);
750 750 sigdelq(p, t, sig);
751 751 } else if ((sig = fsig(&p->p_sig, t)) != 0) {
752 752 if (sig == SIGCLD)
753 753 sigcld_found = 1;
754 754 toproc = 1;
755 755 if (tracing(p, sig) ||
756 756 sigismember(&t->t_sigwait, sig) ||
757 757 !sig_ignorable(p, lwp, sig)) {
758 758 if (sigismember(&p->p_extsig, sig))
759 759 ext = 1;
760 760 break;
761 761 }
762 762 sigdelset(&p->p_sig, sig);
763 763 sigdelset(&p->p_extsig, sig);
764 764 sigdelq(p, NULL, sig);
765 765 } else {
766 766 /* no signal was found */
767 767 break;
768 768 }
769 769 }
770 770
771 771 if (sig == 0) { /* no signal was found */
772 772 if (p->p_flag & (SEXITLWPS|SKILLED)) {
773 773 lwp->lwp_cursig = SIGKILL;
774 774 sig = SIGKILL;
775 775 ext = (p->p_flag & SEXTKILLED) != 0;
776 776 }
777 777 break;
778 778 }
779 779
780 780 /*
781 781 * If we have been informed not to stop (i.e., we are being
782 782 * called from within a network operation), then don't promote
783 783 * the signal at this time, just return the signal number.
784 784 * We will call issig() again later when it is safe.
785 785 *
786 786 * fsig() does not return a jobcontrol stopping signal
787 787 * with a default action of stopping the process if
788 788 * lwp_nostop is set, so we won't be causing a bogus
789 789 * EINTR by this action. (Such a signal is eaten by
790 790 * isjobstop() when we loop around to do final checks.)
791 791 */
792 792 if (lwp->lwp_nostop) {
793 793 nostop_break = 1;
794 794 break;
795 795 }
796 796
797 797 /*
798 798 * Promote the signal from pending to current.
799 799 *
800 800 * Note that sigdeq() will set lwp->lwp_curinfo to NULL
801 801 * if no siginfo_t exists for this signal.
802 802 */
803 803 lwp->lwp_cursig = (uchar_t)sig;
804 804 lwp->lwp_extsig = (uchar_t)ext;
805 805 t->t_sig_check = 1; /* so post_syscall will see signal */
806 806 ASSERT(lwp->lwp_curinfo == NULL);
807 807 sigdeq(p, toproc ? NULL : t, sig, &lwp->lwp_curinfo);
808 808
809 809 if (tracing(p, sig))
810 810 stop(PR_SIGNALLED, sig);
811 811
812 812 /*
813 813 * Loop around to check for requested stop before
814 814 * performing the usual current-signal actions.
815 815 */
816 816 }
817 817
818 818 mutex_exit(&p->p_lock);
819 819
820 820 /*
821 821 * If SIGCLD was dequeued from the process's signal queue,
822 822 * search for other pending SIGCLD's from the list of children.
823 823 */
824 824 if (sigcld_found)
825 825 sigcld_repost();
826 826
827 827 if (sig != 0)
828 828 (void) undo_watch_step(NULL);
829 829
830 830 /*
831 831 * If we have been blocked since the p_lock was dropped off
832 832 * above, then this promoted signal might have been handled
833 833 * already when we were on the way back from sleep queue, so
834 834 * just ignore it.
835 835 * If we have been informed not to stop, just return the signal
836 836 * number. Also see comments above.
837 837 */
838 838 if (!nostop_break) {
839 839 sig = lwp->lwp_cursig;
840 840 }
841 841
842 842 return (sig != 0);
843 843 }
844 844
845 845 /*
846 846 * Return true if the process is currently stopped showing PR_JOBCONTROL.
847 847 * This is true only if all of the process's lwp's are so stopped.
848 848 * If this is asked by one of the lwps in the process, exclude that lwp.
849 849 */
850 850 int
851 851 jobstopped(proc_t *p)
852 852 {
853 853 kthread_t *t;
854 854
855 855 ASSERT(MUTEX_HELD(&p->p_lock));
856 856
857 857 if ((t = p->p_tlist) == NULL)
858 858 return (0);
859 859
860 860 do {
861 861 thread_lock(t);
862 862 /* ignore current, zombie and suspended lwps in the test */
863 863 if (!(t == curthread || t->t_state == TS_ZOMB ||
864 864 SUSPENDED(t)) &&
865 865 (t->t_state != TS_STOPPED ||
866 866 t->t_whystop != PR_JOBCONTROL)) {
867 867 thread_unlock(t);
868 868 return (0);
869 869 }
870 870 thread_unlock(t);
871 871 } while ((t = t->t_forw) != p->p_tlist);
872 872
873 873 return (1);
874 874 }
875 875
876 876 /*
877 877 * Put ourself (curthread) into the stopped state and notify tracers.
878 878 */
879 879 void
880 880 stop(int why, int what)
881 881 {
882 882 kthread_t *t = curthread;
883 883 proc_t *p = ttoproc(t);
884 884 klwp_t *lwp = ttolwp(t);
885 885 kthread_t *tx;
886 886 lwpent_t *lep;
887 887 int procstop;
888 888 int flags = TS_ALLSTART;
889 889 hrtime_t stoptime;
890 890
891 891 /*
892 892 * Can't stop a system process.
893 893 */
894 894 if (p == NULL || lwp == NULL || (p->p_flag & SSYS) || p->p_as == &kas)
895 895 return;
896 896
897 897 ASSERT(MUTEX_HELD(&p->p_lock));
898 898
899 899 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) {
900 900 /*
901 901 * Don't stop an lwp with SIGKILL pending.
902 902 * Don't stop if the process or lwp is exiting.
903 903 */
904 904 if (lwp->lwp_cursig == SIGKILL ||
905 905 sigismember(&t->t_sig, SIGKILL) ||
906 906 sigismember(&p->p_sig, SIGKILL) ||
907 907 (t->t_proc_flag & TP_LWPEXIT) ||
908 908 (p->p_flag & (SEXITLWPS|SKILLED))) {
909 909 p->p_stopsig = 0;
910 910 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP);
911 911 return;
912 912 }
913 913 }
914 914
915 915 /*
916 916 * Make sure we don't deadlock on a recursive call to prstop().
917 917 * prstop() sets the lwp_nostop flag.
918 918 */
919 919 if (lwp->lwp_nostop)
920 920 return;
921 921
922 922 /*
923 923 * Make sure the lwp is in an orderly state for inspection
924 924 * by a debugger through /proc or for dumping via core().
925 925 */
926 926 schedctl_finish_sigblock(t);
927 927 t->t_proc_flag |= TP_STOPPING; /* must set before dropping p_lock */
928 928 mutex_exit(&p->p_lock);
929 929 stoptime = gethrtime();
930 930 prstop(why, what);
931 931 (void) undo_watch_step(NULL);
932 932 mutex_enter(&p->p_lock);
933 933 ASSERT(t->t_state == TS_ONPROC);
934 934
935 935 switch (why) {
936 936 case PR_CHECKPOINT:
937 937 /*
938 938 * The situation may have changed since we dropped
939 939 * and reacquired p->p_lock. Double-check now
940 940 * whether we should stop or not.
941 941 */
942 942 if (!(t->t_proc_flag & TP_CHKPT)) {
943 943 t->t_proc_flag &= ~TP_STOPPING;
944 944 return;
945 945 }
946 946 t->t_proc_flag &= ~TP_CHKPT;
947 947 flags &= ~TS_RESUME;
948 948 break;
949 949
950 950 case PR_JOBCONTROL:
951 951 ASSERT(what == SIGSTOP || what == SIGTSTP ||
952 952 what == SIGTTIN || what == SIGTTOU);
953 953 flags &= ~TS_XSTART;
954 954 break;
955 955
956 956 case PR_SUSPENDED:
957 957 ASSERT(what == SUSPEND_NORMAL || what == SUSPEND_PAUSE);
958 958 /*
959 959 * The situation may have changed since we dropped
960 960 * and reacquired p->p_lock. Double-check now
961 961 * whether we should stop or not.
962 962 */
963 963 if (what == SUSPEND_PAUSE) {
964 964 if (!(t->t_proc_flag & TP_PAUSE)) {
965 965 t->t_proc_flag &= ~TP_STOPPING;
966 966 return;
967 967 }
968 968 flags &= ~TS_UNPAUSE;
969 969 } else {
970 970 if (!((t->t_proc_flag & TP_HOLDLWP) ||
971 971 (p->p_flag & (SHOLDFORK|SHOLDFORK1|SHOLDWATCH)))) {
972 972 t->t_proc_flag &= ~TP_STOPPING;
973 973 return;
974 974 }
975 975 /*
976 976 * If SHOLDFORK is in effect and we are stopping
977 977 * while asleep (not at the top of the stack),
978 978 * we return now to allow the hold to take effect
979 979 * when we reach the top of the kernel stack.
980 980 */
981 981 if (lwp->lwp_asleep && (p->p_flag & SHOLDFORK)) {
982 982 t->t_proc_flag &= ~TP_STOPPING;
983 983 return;
984 984 }
985 985 flags &= ~TS_CSTART;
986 986 }
987 987 break;
988 988
989 989 case PR_BRAND:
990 990 /*
991 991 * We have been stopped by the brand code for a brand-private
992 992 * reason. This is an asynchronous stop affecting only this
993 993 * LWP.
994 994 */
995 995 VERIFY(PROC_IS_BRANDED(p));
996 996 flags &= ~TS_BSTART;
997 997 break;
998 998
999 999 default: /* /proc stop */
1000 1000 flags &= ~TS_PSTART;
1001 1001 /*
1002 1002 * Do synchronous stop unless the async-stop flag is set.
1003 1003 * If why is PR_REQUESTED and t->t_dtrace_stop flag is set,
1004 1004 * then no debugger is present and we also do synchronous stop.
1005 1005 */
1006 1006 if ((why != PR_REQUESTED || t->t_dtrace_stop) &&
1007 1007 !(p->p_proc_flag & P_PR_ASYNC)) {
1008 1008 int notify;
1009 1009
1010 1010 for (tx = t->t_forw; tx != t; tx = tx->t_forw) {
1011 1011 notify = 0;
1012 1012 thread_lock(tx);
1013 1013 if (ISTOPPED(tx) ||
1014 1014 (tx->t_proc_flag & TP_PRSTOP)) {
1015 1015 thread_unlock(tx);
1016 1016 continue;
1017 1017 }
1018 1018 tx->t_proc_flag |= TP_PRSTOP;
1019 1019 tx->t_sig_check = 1;
1020 1020 if (tx->t_state == TS_SLEEP &&
1021 1021 (tx->t_flag & T_WAKEABLE)) {
1022 1022 /*
1023 1023 * Don't actually wake it up if it's
1024 1024 * in one of the lwp_*() syscalls.
1025 1025 * Mark it virtually stopped and
1026 1026 * notify /proc waiters (below).
1027 1027 */
1028 1028 if (tx->t_wchan0 == NULL)
1029 1029 setrun_locked(tx);
1030 1030 else {
1031 1031 tx->t_proc_flag |= TP_PRVSTOP;
1032 1032 tx->t_stoptime = stoptime;
1033 1033 notify = 1;
1034 1034 }
1035 1035 }
1036 1036
1037 1037 /* Move waiting thread to run queue */
1038 1038 if (ISWAITING(tx))
1039 1039 setrun_locked(tx);
1040 1040
1041 1041 /*
1042 1042 * force the thread into the kernel
1043 1043 * if it is not already there.
1044 1044 */
1045 1045 if (tx->t_state == TS_ONPROC &&
1046 1046 tx->t_cpu != CPU)
1047 1047 poke_cpu(tx->t_cpu->cpu_id);
1048 1048 thread_unlock(tx);
1049 1049 lep = p->p_lwpdir[tx->t_dslot].ld_entry;
1050 1050 if (notify && lep->le_trace)
1051 1051 prnotify(lep->le_trace);
1052 1052 }
1053 1053 /*
1054 1054 * We do this just in case one of the threads we asked
1055 1055 * to stop is in holdlwps() (called from cfork()) or
1056 1056 * lwp_suspend().
1057 1057 */
1058 1058 cv_broadcast(&p->p_holdlwps);
1059 1059 }
1060 1060 break;
1061 1061 }
1062 1062
1063 1063 t->t_stoptime = stoptime;
1064 1064
1065 1065 if (why == PR_JOBCONTROL || (why == PR_SUSPENDED && p->p_stopsig)) {
1066 1066 /*
1067 1067 * Determine if the whole process is jobstopped.
1068 1068 */
1069 1069 if (jobstopped(p)) {
1070 1070 sigqueue_t *sqp;
1071 1071 int sig;
1072 1072
1073 1073 if ((sig = p->p_stopsig) == 0)
1074 1074 p->p_stopsig = (uchar_t)(sig = what);
1075 1075 mutex_exit(&p->p_lock);
1076 1076 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
1077 1077 mutex_enter(&pidlock);
1078 1078 /*
1079 1079 * The last lwp to stop notifies the parent.
1080 1080 * Turn off the CLDCONT flag now so the first
1081 1081 * lwp to continue knows what to do.
1082 1082 */
1083 1083 p->p_pidflag &= ~CLDCONT;
1084 1084 p->p_wcode = CLD_STOPPED;
1085 1085 p->p_wdata = sig;
1086 1086 sigcld(p, sqp);
1087 1087 /*
1088 1088 * Grab p->p_lock before releasing pidlock so the
1089 1089 * parent and the child don't have a race condition.
1090 1090 */
1091 1091 mutex_enter(&p->p_lock);
1092 1092 mutex_exit(&pidlock);
1093 1093 p->p_stopsig = 0;
1094 1094 } else if (why == PR_JOBCONTROL && p->p_stopsig == 0) {
1095 1095 /*
1096 1096 * Set p->p_stopsig and wake up sleeping lwps
1097 1097 * so they will stop in sympathy with this lwp.
1098 1098 */
1099 1099 p->p_stopsig = (uchar_t)what;
1100 1100 pokelwps(p);
1101 1101 /*
1102 1102 * We do this just in case one of the threads we asked
1103 1103 * to stop is in holdlwps() (called from cfork()) or
1104 1104 * lwp_suspend().
1105 1105 */
1106 1106 cv_broadcast(&p->p_holdlwps);
1107 1107 }
1108 1108 }
1109 1109
1110 1110 if (why != PR_JOBCONTROL && why != PR_CHECKPOINT && why != PR_BRAND) {
1111 1111 /*
1112 1112 * Do process-level notification when all lwps are
1113 1113 * either stopped on events of interest to /proc
1114 1114 * or are stopped showing PR_SUSPENDED or are zombies.
1115 1115 */
1116 1116 procstop = 1;
1117 1117 for (tx = t->t_forw; procstop && tx != t; tx = tx->t_forw) {
1118 1118 if (VSTOPPED(tx))
1119 1119 continue;
1120 1120 thread_lock(tx);
1121 1121 switch (tx->t_state) {
1122 1122 case TS_ZOMB:
1123 1123 break;
1124 1124 case TS_STOPPED:
1125 1125 /* neither ISTOPPED nor SUSPENDED? */
1126 1126 if ((tx->t_schedflag &
1127 1127 (TS_CSTART | TS_UNPAUSE | TS_PSTART)) ==
1128 1128 (TS_CSTART | TS_UNPAUSE | TS_PSTART))
1129 1129 procstop = 0;
1130 1130 break;
1131 1131 case TS_SLEEP:
1132 1132 /* not paused for watchpoints? */
1133 1133 if (!(tx->t_flag & T_WAKEABLE) ||
1134 1134 tx->t_wchan0 == NULL ||
1135 1135 !(tx->t_proc_flag & TP_PAUSE))
1136 1136 procstop = 0;
1137 1137 break;
1138 1138 default:
1139 1139 procstop = 0;
1140 1140 break;
1141 1141 }
1142 1142 thread_unlock(tx);
1143 1143 }
1144 1144 if (procstop) {
1145 1145 /* there must not be any remapped watched pages now */
1146 1146 ASSERT(p->p_mapcnt == 0);
1147 1147 if (p->p_proc_flag & P_PR_PTRACE) {
1148 1148 /* ptrace() compatibility */
1149 1149 mutex_exit(&p->p_lock);
1150 1150 mutex_enter(&pidlock);
1151 1151 p->p_wcode = CLD_TRAPPED;
1152 1152 p->p_wdata = (why == PR_SIGNALLED)?
1153 1153 what : SIGTRAP;
1154 1154 cv_broadcast(&p->p_parent->p_cv);
1155 1155 /*
1156 1156 * Grab p->p_lock before releasing pidlock so
1157 1157 * parent and child don't have a race condition.
1158 1158 */
1159 1159 mutex_enter(&p->p_lock);
1160 1160 mutex_exit(&pidlock);
1161 1161 }
1162 1162 if (p->p_trace) /* /proc */
1163 1163 prnotify(p->p_trace);
1164 1164 cv_broadcast(&pr_pid_cv[p->p_slot]); /* pauselwps() */
1165 1165 cv_broadcast(&p->p_holdlwps); /* holdwatch() */
1166 1166 }
1167 1167 if (why != PR_SUSPENDED) {
1168 1168 lep = p->p_lwpdir[t->t_dslot].ld_entry;
1169 1169 if (lep->le_trace) /* /proc */
1170 1170 prnotify(lep->le_trace);
1171 1171 /*
1172 1172 * Special notification for creation of the agent lwp.
1173 1173 */
1174 1174 if (t == p->p_agenttp &&
1175 1175 (t->t_proc_flag & TP_PRSTOP) &&
1176 1176 p->p_trace)
1177 1177 prnotify(p->p_trace);
1178 1178 /*
1179 1179 * The situation may have changed since we dropped
1180 1180 * and reacquired p->p_lock. Double-check now
1181 1181 * whether we should stop or not.
1182 1182 */
1183 1183 if (!(t->t_proc_flag & TP_STOPPING)) {
1184 1184 if (t->t_proc_flag & TP_PRSTOP)
1185 1185 t->t_proc_flag |= TP_STOPPING;
1186 1186 }
1187 1187 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP);
1188 1188 prnostep(lwp);
1189 1189 }
1190 1190 }
1191 1191
1192 1192 if (why == PR_SUSPENDED) {
1193 1193
1194 1194 /*
1195 1195 * We always broadcast in the case of SUSPEND_PAUSE. This is
1196 1196 * because checks for TP_PAUSE take precedence over checks for
1197 1197 * SHOLDWATCH. If a thread is trying to stop because of
1198 1198 * SUSPEND_PAUSE and tries to do a holdwatch(), it will be
1199 1199 * waiting for the rest of the threads to enter a stopped state.
1200 1200 * If we are stopping for a SUSPEND_PAUSE, we may be the last
1201 1201 * lwp and not know it, so broadcast just in case.
1202 1202 */
1203 1203 if (what == SUSPEND_PAUSE ||
1204 1204 --p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP))
1205 1205 cv_broadcast(&p->p_holdlwps);
1206 1206
1207 1207 }
1208 1208
1209 1209 /*
1210 1210 * Need to do this here (rather than after the thread is officially
1211 1211 * stopped) because we can't call mutex_enter from a stopped thread.
1212 1212 */
1213 1213 if (why == PR_CHECKPOINT)
1214 1214 del_one_utstop();
1215 1215
1216 1216 /*
1217 1217 * Allow the brand to post notification of this stop condition.
1218 1218 */
1219 1219 if (PROC_IS_BRANDED(p) && BROP(p)->b_stop_notify != NULL) {
1220 1220 BROP(p)->b_stop_notify(p, lwp, why, what);
1221 1221 }
1222 1222
1223 1223 thread_lock(t);
1224 1224 ASSERT((t->t_schedflag & TS_ALLSTART) == 0);
1225 1225 t->t_schedflag |= flags;
1226 1226 t->t_whystop = (short)why;
1227 1227 t->t_whatstop = (short)what;
1228 1228 CL_STOP(t, why, what);
1229 1229 (void) new_mstate(t, LMS_STOPPED);
1230 1230 thread_stop(t); /* set stop state and drop lock */
1231 1231
1232 1232 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) {
1233 1233 /*
1234 1234 * We may have gotten a SIGKILL or a SIGCONT when
1235 1235 * we released p->p_lock; make one last check.
1236 1236 * Also check for a /proc run-on-last-close.
1237 1237 */
1238 1238 if (sigismember(&t->t_sig, SIGKILL) ||
1239 1239 sigismember(&p->p_sig, SIGKILL) ||
1240 1240 (t->t_proc_flag & TP_LWPEXIT) ||
1241 1241 (p->p_flag & (SEXITLWPS|SKILLED))) {
1242 1242 p->p_stopsig = 0;
1243 1243 thread_lock(t);
1244 1244 t->t_schedflag |= TS_XSTART | TS_PSTART | TS_BSTART;
1245 1245 setrun_locked(t);
1246 1246 thread_unlock_nopreempt(t);
1247 1247 } else if (why == PR_JOBCONTROL) {
1248 1248 if (p->p_flag & SSCONT) {
1249 1249 /*
1250 1250 * This resulted from a SIGCONT posted
1251 1251 * while we were not holding p->p_lock.
1252 1252 */
1253 1253 p->p_stopsig = 0;
1254 1254 thread_lock(t);
1255 1255 t->t_schedflag |= TS_XSTART;
1256 1256 setrun_locked(t);
1257 1257 thread_unlock_nopreempt(t);
1258 1258 }
1259 1259 } else if (!(t->t_proc_flag & TP_STOPPING)) {
1260 1260 /*
1261 1261 * This resulted from a /proc run-on-last-close.
1262 1262 */
1263 1263 thread_lock(t);
1264 1264 t->t_schedflag |= TS_PSTART;
1265 1265 setrun_locked(t);
1266 1266 thread_unlock_nopreempt(t);
1267 1267 }
1268 1268 }
1269 1269
1270 1270 t->t_proc_flag &= ~TP_STOPPING;
1271 1271 mutex_exit(&p->p_lock);
1272 1272
1273 1273 swtch();
1274 1274 setallwatch(); /* reestablish any watchpoints set while stopped */
1275 1275 mutex_enter(&p->p_lock);
1276 1276 prbarrier(p); /* barrier against /proc locking */
1277 1277 }
1278 1278
1279 1279 /* Interface for resetting user thread stop count. */
1280 1280 void
1281 1281 utstop_init(void)
1282 1282 {
1283 1283 mutex_enter(&thread_stop_lock);
1284 1284 num_utstop = 0;
1285 1285 mutex_exit(&thread_stop_lock);
1286 1286 }
1287 1287
1288 1288 /* Interface for registering a user thread stop request. */
1289 1289 void
1290 1290 add_one_utstop(void)
1291 1291 {
1292 1292 mutex_enter(&thread_stop_lock);
1293 1293 num_utstop++;
1294 1294 mutex_exit(&thread_stop_lock);
1295 1295 }
1296 1296
1297 1297 /* Interface for cancelling a user thread stop request */
1298 1298 void
1299 1299 del_one_utstop(void)
1300 1300 {
1301 1301 mutex_enter(&thread_stop_lock);
1302 1302 num_utstop--;
1303 1303 if (num_utstop == 0)
1304 1304 cv_broadcast(&utstop_cv);
1305 1305 mutex_exit(&thread_stop_lock);
1306 1306 }
1307 1307
1308 1308 /* Interface to wait for all user threads to be stopped */
1309 1309 void
1310 1310 utstop_timedwait(clock_t ticks)
1311 1311 {
1312 1312 mutex_enter(&thread_stop_lock);
1313 1313 if (num_utstop > 0)
1314 1314 (void) cv_reltimedwait(&utstop_cv, &thread_stop_lock, ticks,
1315 1315 TR_CLOCK_TICK);
1316 1316 mutex_exit(&thread_stop_lock);
1317 1317 }
1318 1318
1319 1319 /*
1320 1320 * Perform the action specified by the current signal.
1321 1321 * The usual sequence is:
1322 1322 * if (issig())
1323 1323 * psig();
1324 1324 * The signal bit has already been cleared by issig(),
1325 1325 * the current signal number has been stored in lwp_cursig,
1326 1326 * and the current siginfo is now referenced by lwp_curinfo.
1327 1327 */
1328 1328 void
1329 1329 psig(void)
1330 1330 {
1331 1331 kthread_t *t = curthread;
1332 1332 proc_t *p = ttoproc(t);
1333 1333 klwp_t *lwp = ttolwp(t);
1334 1334 void (*func)();
1335 1335 int sig, rc, code, ext;
1336 1336 pid_t pid = -1;
1337 1337 id_t ctid = 0;
1338 1338 zoneid_t zoneid = -1;
1339 1339 sigqueue_t *sqp = NULL;
1340 1340 uint32_t auditing = AU_AUDITING();
1341 1341
1342 1342 mutex_enter(&p->p_lock);
1343 1343 schedctl_finish_sigblock(t);
1344 1344 code = CLD_KILLED;
1345 1345
1346 1346 if (p->p_flag & SEXITLWPS) {
1347 1347 lwp_exit();
1348 1348 return; /* not reached */
1349 1349 }
1350 1350 sig = lwp->lwp_cursig;
1351 1351 ext = lwp->lwp_extsig;
1352 1352
1353 1353 ASSERT(sig < NSIG);
1354 1354
1355 1355 /*
1356 1356 * Re-check lwp_cursig after we acquire p_lock. Since p_lock was
1357 1357 * dropped between issig() and psig(), a debugger may have cleared
1358 1358 * lwp_cursig via /proc in the intervening window.
1359 1359 */
1360 1360 if (sig == 0) {
1361 1361 if (lwp->lwp_curinfo) {
1362 1362 siginfofree(lwp->lwp_curinfo);
1363 1363 lwp->lwp_curinfo = NULL;
1364 1364 }
1365 1365 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */
1366 1366 t->t_flag &= ~T_TOMASK;
1367 1367 t->t_hold = lwp->lwp_sigoldmask;
1368 1368 }
1369 1369 mutex_exit(&p->p_lock);
1370 1370 return;
1371 1371 }
1372 1372 func = PTOU(curproc)->u_signal[sig-1];
1373 1373
1374 1374 /*
1375 1375 * The signal disposition could have changed since we promoted
1376 1376 * this signal from pending to current (we dropped p->p_lock).
1377 1377 * This can happen only in a multi-threaded process.
1378 1378 */
1379 1379 if (sig_ignorable(p, lwp, sig) ||
1380 1380 (func == SIG_DFL && sigismember(&stopdefault, sig))) {
1381 1381 lwp->lwp_cursig = 0;
1382 1382 lwp->lwp_extsig = 0;
1383 1383 if (lwp->lwp_curinfo) {
1384 1384 siginfofree(lwp->lwp_curinfo);
1385 1385 lwp->lwp_curinfo = NULL;
1386 1386 }
1387 1387 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */
1388 1388 t->t_flag &= ~T_TOMASK;
1389 1389 t->t_hold = lwp->lwp_sigoldmask;
1390 1390 }
1391 1391 mutex_exit(&p->p_lock);
1392 1392 return;
1393 1393 }
1394 1394
1395 1395 /*
1396 1396 * We check lwp_curinfo first since pr_setsig can actually
1397 1397 * stuff a sigqueue_t there for SIGKILL.
1398 1398 */
1399 1399 if (lwp->lwp_curinfo) {
1400 1400 sqp = lwp->lwp_curinfo;
1401 1401 } else if (sig == SIGKILL && p->p_killsqp) {
1402 1402 sqp = p->p_killsqp;
1403 1403 }
1404 1404
1405 1405 if (sqp != NULL) {
1406 1406 if (SI_FROMUSER(&sqp->sq_info)) {
1407 1407 pid = sqp->sq_info.si_pid;
1408 1408 ctid = sqp->sq_info.si_ctid;
1409 1409 zoneid = sqp->sq_info.si_zoneid;
1410 1410 }
1411 1411 /*
1412 1412 * If we have a sigqueue_t, its sq_external value
1413 1413 * trumps the lwp_extsig value. It is theoretically
1414 1414 * possible to make lwp_extsig reflect reality, but it
1415 1415 * would unnecessarily complicate things elsewhere.
1416 1416 */
1417 1417 ext = sqp->sq_external;
1418 1418 }
1419 1419
1420 1420 if (func == SIG_DFL) {
1421 1421 mutex_exit(&p->p_lock);
1422 1422 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *,
1423 1423 NULL, void (*)(void), func);
1424 1424 } else {
1425 1425 k_siginfo_t *sip = NULL;
1426 1426
1427 1427 /*
1428 1428 * If DTrace user-land tracing is active, give DTrace a
1429 1429 * chance to defer the signal until after tracing is
1430 1430 * complete.
1431 1431 */
1432 1432 if (t->t_dtrace_on && dtrace_safe_defer_signal()) {
1433 1433 mutex_exit(&p->p_lock);
1434 1434 return;
1435 1435 }
1436 1436
1437 1437 /*
1438 1438 * save siginfo pointer here, in case the
1439 1439 * the signal's reset bit is on
1440 1440 *
1441 1441 * The presence of a current signal prevents paging
1442 1442 * from succeeding over a network. We copy the current
1443 1443 * signal information to the side and cancel the current
1444 1444 * signal so that sendsig() will succeed.
1445 1445 */
1446 1446 if (sigismember(&p->p_siginfo, sig)) {
1447 1447 sip = &lwp->lwp_siginfo;
1448 1448 if (sqp) {
1449 1449 bcopy(&sqp->sq_info, sip, sizeof (*sip));
1450 1450 /*
1451 1451 * If we were interrupted out of a system call
1452 1452 * due to pthread_cancel(), inform libc.
1453 1453 */
1454 1454 if (sig == SIGCANCEL &&
1455 1455 sip->si_code == SI_LWP &&
1456 1456 t->t_sysnum != 0)
1457 1457 schedctl_cancel_eintr();
1458 1458 } else if (sig == SIGPROF && sip->si_signo == SIGPROF &&
1459 1459 t->t_rprof != NULL && t->t_rprof->rp_anystate) {
1460 1460 /* EMPTY */;
1461 1461 } else {
1462 1462 bzero(sip, sizeof (*sip));
1463 1463 sip->si_signo = sig;
1464 1464 sip->si_code = SI_NOINFO;
1465 1465 }
1466 1466 }
1467 1467
1468 1468 if (t->t_flag & T_TOMASK)
1469 1469 t->t_flag &= ~T_TOMASK;
1470 1470 else
1471 1471 lwp->lwp_sigoldmask = t->t_hold;
1472 1472 sigorset(&t->t_hold, &PTOU(curproc)->u_sigmask[sig-1]);
1473 1473 if (!sigismember(&PTOU(curproc)->u_signodefer, sig))
1474 1474 sigaddset(&t->t_hold, sig);
1475 1475 if (sigismember(&PTOU(curproc)->u_sigresethand, sig))
1476 1476 setsigact(sig, SIG_DFL, &nullsmask, 0);
1477 1477
1478 1478 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *,
1479 1479 sip, void (*)(void), func);
1480 1480
1481 1481 lwp->lwp_cursig = 0;
1482 1482 lwp->lwp_extsig = 0;
1483 1483 if (lwp->lwp_curinfo) {
1484 1484 /* p->p_killsqp is freed by freeproc */
1485 1485 siginfofree(lwp->lwp_curinfo);
1486 1486 lwp->lwp_curinfo = NULL;
1487 1487 }
1488 1488 mutex_exit(&p->p_lock);
1489 1489 lwp->lwp_ru.nsignals++;
1490 1490
1491 1491 if (p->p_model == DATAMODEL_NATIVE)
1492 1492 rc = sendsig(sig, sip, func);
1493 1493 #ifdef _SYSCALL32_IMPL
1494 1494 else
1495 1495 rc = sendsig32(sig, sip, func);
1496 1496 #endif /* _SYSCALL32_IMPL */
1497 1497 if (rc)
1498 1498 return;
1499 1499 sig = lwp->lwp_cursig = SIGSEGV;
1500 1500 ext = 0; /* lwp_extsig was set above */
1501 1501 pid = -1;
1502 1502 ctid = 0;
1503 1503 }
1504 1504
1505 1505 if (sigismember(&coredefault, sig)) {
1506 1506 /*
1507 1507 * Terminate all LWPs but don't discard them.
1508 1508 * If another lwp beat us to the punch by calling exit(),
1509 1509 * evaporate now.
1510 1510 */
1511 1511 proc_is_exiting(p);
1512 1512 if (exitlwps(1) != 0) {
1513 1513 mutex_enter(&p->p_lock);
1514 1514 lwp_exit();
1515 1515 }
1516 1516 /* if we got a SIGKILL from anywhere, no core dump */
1517 1517 if (p->p_flag & SKILLED) {
1518 1518 sig = SIGKILL;
1519 1519 ext = (p->p_flag & SEXTKILLED) != 0;
1520 1520 } else {
1521 1521 if (auditing) /* audit core dump */
1522 1522 audit_core_start(sig);
1523 1523 if (core(sig, ext) == 0)
1524 1524 code = CLD_DUMPED;
1525 1525 if (auditing) /* audit core dump */
1526 1526 audit_core_finish(code);
1527 1527 }
1528 1528 }
1529 1529
1530 1530 /*
1531 1531 * Generate a contract event once if the process is killed
1532 1532 * by a signal.
1533 1533 */
1534 1534 if (ext) {
1535 1535 proc_is_exiting(p);
1536 1536 if (exitlwps(0) != 0) {
1537 1537 mutex_enter(&p->p_lock);
1538 1538 lwp_exit();
1539 1539 }
1540 1540 contract_process_sig(p->p_ct_process, p, sig, pid, ctid,
1541 1541 zoneid);
1542 1542 }
1543 1543
1544 1544 exit(code, sig);
1545 1545 }
1546 1546
1547 1547 /*
1548 1548 * Find next unheld signal in ssp for thread t.
1549 1549 */
1550 1550 int
1551 1551 fsig(k_sigset_t *ssp, kthread_t *t)
1552 1552 {
1553 1553 proc_t *p = ttoproc(t);
1554 1554 user_t *up = PTOU(p);
1555 1555 int i;
1556 1556 k_sigset_t temp;
1557 1557
1558 1558 ASSERT(MUTEX_HELD(&p->p_lock));
1559 1559
1560 1560 /*
1561 1561 * Don't promote any signals for the parent of a vfork()d
1562 1562 * child that hasn't yet released the parent's memory.
1563 1563 */
1564 1564 if (p->p_flag & SVFWAIT)
1565 1565 return (0);
1566 1566
1567 1567 temp = *ssp;
1568 1568 sigdiffset(&temp, &t->t_hold);
1569 1569
1570 1570 /*
1571 1571 * Don't promote stopping signals (except SIGSTOP) for a child
1572 1572 * of vfork() that hasn't yet released the parent's memory.
1573 1573 */
1574 1574 if (p->p_flag & SVFORK)
1575 1575 sigdiffset(&temp, &holdvfork);
1576 1576
1577 1577 /*
1578 1578 * Don't promote a signal that will stop
1579 1579 * the process when lwp_nostop is set.
1580 1580 */
1581 1581 if (ttolwp(t)->lwp_nostop) {
1582 1582 sigdelset(&temp, SIGSTOP);
1583 1583 if (!p->p_pgidp->pid_pgorphaned) {
1584 1584 if (up->u_signal[SIGTSTP-1] == SIG_DFL)
1585 1585 sigdelset(&temp, SIGTSTP);
1586 1586 if (up->u_signal[SIGTTIN-1] == SIG_DFL)
1587 1587 sigdelset(&temp, SIGTTIN);
1588 1588 if (up->u_signal[SIGTTOU-1] == SIG_DFL)
1589 1589 sigdelset(&temp, SIGTTOU);
1590 1590 }
1591 1591 }
1592 1592
1593 1593 /*
1594 1594 * Choose SIGKILL and SIGPROF before all other pending signals.
1595 1595 * The rest are promoted in signal number order.
1596 1596 */
1597 1597 if (sigismember(&temp, SIGKILL))
1598 1598 return (SIGKILL);
1599 1599 if (sigismember(&temp, SIGPROF))
1600 1600 return (SIGPROF);
1601 1601
1602 1602 for (i = 0; i < sizeof (temp) / sizeof (temp.__sigbits[0]); i++) {
1603 1603 if (temp.__sigbits[i])
1604 1604 return ((i * NBBY * sizeof (temp.__sigbits[0])) +
1605 1605 lowbit(temp.__sigbits[i]));
1606 1606 }
1607 1607
1608 1608 return (0);
1609 1609 }
1610 1610
1611 1611 void
1612 1612 setsigact(int sig, void (*disp)(), const k_sigset_t *mask, int flags)
1613 1613 {
1614 1614 proc_t *p = ttoproc(curthread);
1615 1615 kthread_t *t;
1616 1616
1617 1617 ASSERT(MUTEX_HELD(&p->p_lock));
1618 1618
1619 1619 PTOU(curproc)->u_signal[sig - 1] = disp;
1620 1620
1621 1621 /*
1622 1622 * Honor the SA_SIGINFO flag if the signal is being caught.
1623 1623 * Force the SA_SIGINFO flag if the signal is not being caught.
1624 1624 * This is necessary to make sigqueue() and sigwaitinfo() work
1625 1625 * properly together when the signal is set to default or is
1626 1626 * being temporarily ignored.
1627 1627 */
1628 1628 if ((flags & SA_SIGINFO) || disp == SIG_DFL || disp == SIG_IGN)
1629 1629 sigaddset(&p->p_siginfo, sig);
1630 1630 else
1631 1631 sigdelset(&p->p_siginfo, sig);
1632 1632
1633 1633 if (disp != SIG_DFL && disp != SIG_IGN) {
1634 1634 sigdelset(&p->p_ignore, sig);
1635 1635 PTOU(curproc)->u_sigmask[sig - 1] = *mask;
1636 1636 if (!sigismember(&cantreset, sig)) {
1637 1637 if (flags & SA_RESETHAND)
1638 1638 sigaddset(&PTOU(curproc)->u_sigresethand, sig);
1639 1639 else
1640 1640 sigdelset(&PTOU(curproc)->u_sigresethand, sig);
1641 1641 }
1642 1642 if (flags & SA_NODEFER)
1643 1643 sigaddset(&PTOU(curproc)->u_signodefer, sig);
1644 1644 else
1645 1645 sigdelset(&PTOU(curproc)->u_signodefer, sig);
1646 1646 if (flags & SA_RESTART)
1647 1647 sigaddset(&PTOU(curproc)->u_sigrestart, sig);
1648 1648 else
1649 1649 sigdelset(&PTOU(curproc)->u_sigrestart, sig);
1650 1650 if (flags & SA_ONSTACK)
1651 1651 sigaddset(&PTOU(curproc)->u_sigonstack, sig);
1652 1652 else
1653 1653 sigdelset(&PTOU(curproc)->u_sigonstack, sig);
1654 1654 } else if (disp == SIG_IGN ||
1655 1655 (disp == SIG_DFL && sigismember(&ignoredefault, sig))) {
1656 1656 /*
1657 1657 * Setting the signal action to SIG_IGN results in the
1658 1658 * discarding of all pending signals of that signal number.
1659 1659 * Setting the signal action to SIG_DFL does the same *only*
1660 1660 * if the signal's default behavior is to be ignored.
1661 1661 */
1662 1662 sigaddset(&p->p_ignore, sig);
1663 1663 sigdelset(&p->p_sig, sig);
1664 1664 sigdelset(&p->p_extsig, sig);
1665 1665 sigdelq(p, NULL, sig);
1666 1666 t = p->p_tlist;
1667 1667 do {
1668 1668 sigdelset(&t->t_sig, sig);
1669 1669 sigdelset(&t->t_extsig, sig);
1670 1670 sigdelq(p, t, sig);
1671 1671 } while ((t = t->t_forw) != p->p_tlist);
1672 1672 } else {
1673 1673 /*
1674 1674 * The signal action is being set to SIG_DFL and the default
1675 1675 * behavior is to do something: make sure it is not ignored.
1676 1676 */
1677 1677 sigdelset(&p->p_ignore, sig);
1678 1678 }
1679 1679
1680 1680 if (sig == SIGCLD) {
1681 1681 if (flags & SA_NOCLDWAIT)
1682 1682 p->p_flag |= SNOWAIT;
1683 1683 else
1684 1684 p->p_flag &= ~SNOWAIT;
1685 1685
1686 1686 if (flags & SA_NOCLDSTOP)
1687 1687 p->p_flag &= ~SJCTL;
1688 1688 else
1689 1689 p->p_flag |= SJCTL;
1690 1690
1691 1691 if ((p->p_flag & SNOWAIT) || disp == SIG_IGN) {
1692 1692 proc_t *cp, *tp;
1693 1693
1694 1694 mutex_exit(&p->p_lock);
1695 1695 mutex_enter(&pidlock);
1696 1696 for (cp = p->p_child; cp != NULL; cp = tp) {
1697 1697 tp = cp->p_sibling;
1698 1698 if (cp->p_stat == SZOMB &&
1699 1699 !(cp->p_pidflag & CLDWAITPID))
1700 1700 freeproc(cp);
1701 1701 }
1702 1702 mutex_exit(&pidlock);
1703 1703 mutex_enter(&p->p_lock);
1704 1704 }
1705 1705 }
1706 1706 }
1707 1707
1708 1708 /*
1709 1709 * Set all signal actions not already set to SIG_DFL or SIG_IGN to SIG_DFL.
1710 1710 * Called from exec_common() for a process undergoing execve()
1711 1711 * and from cfork() for a newly-created child of vfork().
1712 1712 * In the vfork() case, 'p' is not the current process.
1713 1713 * In both cases, there is only one thread in the process.
1714 1714 */
1715 1715 void
1716 1716 sigdefault(proc_t *p)
1717 1717 {
1718 1718 kthread_t *t = p->p_tlist;
1719 1719 struct user *up = PTOU(p);
1720 1720 int sig;
1721 1721
1722 1722 ASSERT(MUTEX_HELD(&p->p_lock));
1723 1723
1724 1724 for (sig = 1; sig < NSIG; sig++) {
1725 1725 if (up->u_signal[sig - 1] != SIG_DFL &&
1726 1726 up->u_signal[sig - 1] != SIG_IGN) {
1727 1727 up->u_signal[sig - 1] = SIG_DFL;
1728 1728 sigemptyset(&up->u_sigmask[sig - 1]);
1729 1729 if (sigismember(&ignoredefault, sig)) {
1730 1730 sigdelq(p, NULL, sig);
1731 1731 sigdelq(p, t, sig);
1732 1732 }
1733 1733 if (sig == SIGCLD)
1734 1734 p->p_flag &= ~(SNOWAIT|SJCTL);
1735 1735 }
1736 1736 }
1737 1737 sigorset(&p->p_ignore, &ignoredefault);
1738 1738 sigfillset(&p->p_siginfo);
1739 1739 sigdiffset(&p->p_siginfo, &cantmask);
1740 1740 sigdiffset(&p->p_sig, &ignoredefault);
1741 1741 sigdiffset(&p->p_extsig, &ignoredefault);
1742 1742 sigdiffset(&t->t_sig, &ignoredefault);
1743 1743 sigdiffset(&t->t_extsig, &ignoredefault);
1744 1744 }
1745 1745
1746 1746 void
1747 1747 sigcld(proc_t *cp, sigqueue_t *sqp)
1748 1748 {
1749 1749 proc_t *pp = cp->p_parent;
1750 1750
1751 1751 ASSERT(MUTEX_HELD(&pidlock));
1752 1752
1753 1753 switch (cp->p_wcode) {
1754 1754 case CLD_EXITED:
1755 1755 case CLD_DUMPED:
1756 1756 case CLD_KILLED:
1757 1757 ASSERT(cp->p_stat == SZOMB);
1758 1758 /*
1759 1759 * The broadcast on p_srwchan_cv is a kludge to
1760 1760 * wakeup a possible thread in uadmin(A_SHUTDOWN).
1761 1761 */
1762 1762 cv_broadcast(&cp->p_srwchan_cv);
1763 1763
1764 1764 /*
1765 1765 * Add to newstate list of the parent
1766 1766 */
1767 1767 add_ns(pp, cp);
1768 1768
1769 1769 cv_broadcast(&pp->p_cv);
1770 1770 if ((pp->p_flag & SNOWAIT) ||
1771 1771 PTOU(pp)->u_signal[SIGCLD - 1] == SIG_IGN) {
1772 1772 if (!(cp->p_pidflag & CLDWAITPID))
1773 1773 freeproc(cp);
1774 1774 } else if (!(cp->p_pidflag & CLDNOSIGCHLD)) {
1775 1775 post_sigcld(cp, sqp);
1776 1776 sqp = NULL;
1777 1777 }
1778 1778 break;
1779 1779
1780 1780 case CLD_STOPPED:
1781 1781 case CLD_CONTINUED:
1782 1782 cv_broadcast(&pp->p_cv);
1783 1783 if (pp->p_flag & SJCTL) {
1784 1784 post_sigcld(cp, sqp);
1785 1785 sqp = NULL;
1786 1786 }
1787 1787 break;
1788 1788 }
1789 1789
1790 1790 if (sqp)
1791 1791 siginfofree(sqp);
1792 1792 }
1793 1793
1794 1794 /*
1795 1795 * Common code called from sigcld() and from
1796 1796 * waitid() and issig_forreal() via sigcld_repost().
1797 1797 * Give the parent process a SIGCLD if it does not have one pending,
1798 1798 * else mark the child process so a SIGCLD can be posted later.
1799 1799 */
1800 1800 static void
1801 1801 post_sigcld(proc_t *cp, sigqueue_t *sqp)
1802 1802 {
1803 1803 proc_t *pp = cp->p_parent;
1804 1804 k_siginfo_t info;
1805 1805
1806 1806 ASSERT(MUTEX_HELD(&pidlock));
1807 1807 mutex_enter(&pp->p_lock);
1808 1808
1809 1809 /*
1810 1810 * If a SIGCLD is pending, then just mark the child process
1811 1811 * so that its SIGCLD will be posted later, when the first
1812 1812 * SIGCLD is taken off the queue or when the parent is ready
1813 1813 * to receive it or accept it, if ever.
1814 1814 */
1815 1815 if (sigismember(&pp->p_sig, SIGCLD)) {
1816 1816 cp->p_pidflag |= CLDPEND;
1817 1817 } else {
1818 1818 cp->p_pidflag &= ~CLDPEND;
1819 1819 if (sqp == NULL) {
1820 1820 /*
1821 1821 * This can only happen when the parent is init.
1822 1822 * (See call to sigcld(q, NULL) in exit().)
1823 1823 * Use KM_NOSLEEP to avoid deadlock. The child procs
1824 1824 * initpid can be 1 for zlogin.
1825 1825 */
1826 1826 ASSERT(pp->p_pidp->pid_id ==
1827 1827 cp->p_zone->zone_proc_initpid ||
1828 1828 pp->p_pidp->pid_id == 1);
1829 1829 winfo(cp, &info, 0);
1830 1830 sigaddq(pp, NULL, &info, KM_NOSLEEP);
1831 1831 } else {
1832 1832 winfo(cp, &sqp->sq_info, 0);
1833 1833 sigaddqa(pp, NULL, sqp);
1834 1834 sqp = NULL;
1835 1835 }
1836 1836 }
1837 1837
1838 1838 mutex_exit(&pp->p_lock);
1839 1839
1840 1840 if (sqp)
1841 1841 siginfofree(sqp);
1842 1842 }
1843 1843
1844 1844 /*
1845 1845 * Search for a child that has a pending SIGCLD for us, the parent.
1846 1846 * The queue of SIGCLD signals is implied by the list of children.
1847 1847 * We post the SIGCLD signals one at a time so they don't get lost.
1848 1848 * When one is dequeued, another is enqueued, until there are no more.
1849 1849 */
1850 1850 void
1851 1851 sigcld_repost()
1852 1852 {
1853 1853 proc_t *pp = curproc;
1854 1854 proc_t *cp;
1855 1855 sigqueue_t *sqp;
1856 1856
1857 1857 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
1858 1858 mutex_enter(&pidlock);
1859 1859 if (PROC_IS_BRANDED(pp) && BROP(pp)->b_sigcld_repost != NULL) {
1860 1860 /*
1861 1861 * Allow the brand to inject synthetic SIGCLD signals.
1862 1862 */
1863 1863 if (BROP(pp)->b_sigcld_repost(pp, sqp) == 0) {
1864 1864 mutex_exit(&pidlock);
1865 1865 return;
1866 1866 }
1867 1867 }
1868 1868 for (cp = pp->p_child; cp; cp = cp->p_sibling) {
1869 1869 if (cp->p_pidflag & CLDPEND) {
1870 1870 post_sigcld(cp, sqp);
1871 1871 mutex_exit(&pidlock);
1872 1872 return;
1873 1873 }
1874 1874 }
1875 1875 mutex_exit(&pidlock);
1876 1876 kmem_free(sqp, sizeof (sigqueue_t));
1877 1877 }
1878 1878
1879 1879 /*
1880 1880 * count number of sigqueue send by sigaddqa()
1881 1881 */
1882 1882 void
1883 1883 sigqsend(int cmd, proc_t *p, kthread_t *t, sigqueue_t *sigqp)
1884 1884 {
1885 1885 sigqhdr_t *sqh;
1886 1886
1887 1887 sqh = (sigqhdr_t *)sigqp->sq_backptr;
1888 1888 ASSERT(sqh);
1889 1889
1890 1890 mutex_enter(&sqh->sqb_lock);
1891 1891 sqh->sqb_sent++;
1892 1892 mutex_exit(&sqh->sqb_lock);
1893 1893
1894 1894 if (cmd == SN_SEND)
1895 1895 sigaddqa(p, t, sigqp);
1896 1896 else
1897 1897 siginfofree(sigqp);
1898 1898 }
1899 1899
1900 1900 int
1901 1901 sigsendproc(proc_t *p, sigsend_t *pv)
1902 1902 {
1903 1903 struct cred *cr;
1904 1904 proc_t *myprocp = curproc;
1905 1905
1906 1906 ASSERT(MUTEX_HELD(&pidlock));
1907 1907
1908 1908 if (p->p_pid == 1 && pv->sig && sigismember(&cantmask, pv->sig))
1909 1909 return (EPERM);
1910 1910
1911 1911 cr = CRED();
1912 1912
1913 1913 if (pv->checkperm == 0 ||
1914 1914 (pv->sig == SIGCONT && p->p_sessp == myprocp->p_sessp) ||
1915 1915 prochasprocperm(p, myprocp, cr)) {
1916 1916 pv->perm++;
1917 1917 if (pv->sig) {
1918 1918 /* Make sure we should be setting si_pid and friends */
1919 1919 ASSERT(pv->sicode <= 0);
1920 1920 if (SI_CANQUEUE(pv->sicode)) {
1921 1921 sigqueue_t *sqp;
1922 1922
1923 1923 mutex_enter(&myprocp->p_lock);
1924 1924 sqp = sigqalloc(myprocp->p_sigqhdr);
1925 1925 mutex_exit(&myprocp->p_lock);
1926 1926 if (sqp == NULL)
1927 1927 return (EAGAIN);
1928 1928 sqp->sq_info.si_signo = pv->sig;
1929 1929 sqp->sq_info.si_code = pv->sicode;
1930 1930 sqp->sq_info.si_pid = myprocp->p_pid;
1931 1931 sqp->sq_info.si_ctid = PRCTID(myprocp);
1932 1932 sqp->sq_info.si_zoneid = getzoneid();
1933 1933 sqp->sq_info.si_uid = crgetruid(cr);
1934 1934 sqp->sq_info.si_value = pv->value;
1935 1935 mutex_enter(&p->p_lock);
1936 1936 sigqsend(SN_SEND, p, NULL, sqp);
1937 1937 mutex_exit(&p->p_lock);
1938 1938 } else {
1939 1939 k_siginfo_t info;
1940 1940 bzero(&info, sizeof (info));
1941 1941 info.si_signo = pv->sig;
1942 1942 info.si_code = pv->sicode;
1943 1943 info.si_pid = myprocp->p_pid;
1944 1944 info.si_ctid = PRCTID(myprocp);
1945 1945 info.si_zoneid = getzoneid();
1946 1946 info.si_uid = crgetruid(cr);
1947 1947 mutex_enter(&p->p_lock);
1948 1948 /*
1949 1949 * XXX: Should be KM_SLEEP but
1950 1950 * we have to avoid deadlock.
1951 1951 */
1952 1952 sigaddq(p, NULL, &info, KM_NOSLEEP);
1953 1953 mutex_exit(&p->p_lock);
1954 1954 }
1955 1955 }
1956 1956 }
1957 1957
1958 1958 return (0);
1959 1959 }
1960 1960
1961 1961 int
1962 1962 sigsendset(procset_t *psp, sigsend_t *pv)
1963 1963 {
1964 1964 int error;
1965 1965
1966 1966 error = dotoprocs(psp, sigsendproc, (char *)pv);
1967 1967 if (error == 0 && pv->perm == 0)
1968 1968 return (EPERM);
1969 1969
1970 1970 return (error);
1971 1971 }
1972 1972
1973 1973 /*
1974 1974 * Dequeue a queued siginfo structure.
1975 1975 * If a non-null thread pointer is passed then dequeue from
1976 1976 * the thread queue, otherwise dequeue from the process queue.
1977 1977 */
1978 1978 void
1979 1979 sigdeq(proc_t *p, kthread_t *t, int sig, sigqueue_t **qpp)
1980 1980 {
1981 1981 sigqueue_t **psqp, *sqp;
1982 1982
1983 1983 ASSERT(MUTEX_HELD(&p->p_lock));
1984 1984
1985 1985 *qpp = NULL;
1986 1986
1987 1987 if (t != NULL) {
1988 1988 sigdelset(&t->t_sig, sig);
1989 1989 sigdelset(&t->t_extsig, sig);
1990 1990 psqp = &t->t_sigqueue;
1991 1991 } else {
1992 1992 sigdelset(&p->p_sig, sig);
1993 1993 sigdelset(&p->p_extsig, sig);
1994 1994 psqp = &p->p_sigqueue;
1995 1995 }
1996 1996
1997 1997 for (;;) {
1998 1998 if ((sqp = *psqp) == NULL)
1999 1999 return;
2000 2000 if (sqp->sq_info.si_signo == sig)
2001 2001 break;
2002 2002 else
2003 2003 psqp = &sqp->sq_next;
2004 2004 }
2005 2005 *qpp = sqp;
2006 2006 *psqp = sqp->sq_next;
2007 2007 for (sqp = *psqp; sqp; sqp = sqp->sq_next) {
2008 2008 if (sqp->sq_info.si_signo == sig) {
2009 2009 if (t != (kthread_t *)NULL) {
2010 2010 sigaddset(&t->t_sig, sig);
2011 2011 t->t_sig_check = 1;
2012 2012 } else {
2013 2013 sigaddset(&p->p_sig, sig);
2014 2014 set_proc_ast(p);
2015 2015 }
2016 2016 break;
2017 2017 }
2018 2018 }
2019 2019 }
2020 2020
2021 2021 /*
2022 2022 * Delete a queued SIGCLD siginfo structure matching the k_siginfo_t argument.
2023 2023 */
2024 2024 void
2025 2025 sigcld_delete(k_siginfo_t *ip)
2026 2026 {
2027 2027 proc_t *p = curproc;
2028 2028 int another_sigcld = 0;
2029 2029 sigqueue_t **psqp, *sqp;
2030 2030
2031 2031 ASSERT(ip->si_signo == SIGCLD);
2032 2032
2033 2033 mutex_enter(&p->p_lock);
2034 2034
2035 2035 if (!sigismember(&p->p_sig, SIGCLD)) {
2036 2036 mutex_exit(&p->p_lock);
2037 2037 return;
2038 2038 }
2039 2039
2040 2040 psqp = &p->p_sigqueue;
2041 2041 for (;;) {
2042 2042 if ((sqp = *psqp) == NULL) {
2043 2043 mutex_exit(&p->p_lock);
2044 2044 return;
2045 2045 }
2046 2046 if (sqp->sq_info.si_signo == SIGCLD) {
2047 2047 if (sqp->sq_info.si_pid == ip->si_pid &&
2048 2048 sqp->sq_info.si_code == ip->si_code &&
2049 2049 sqp->sq_info.si_status == ip->si_status)
2050 2050 break;
2051 2051 another_sigcld = 1;
2052 2052 }
2053 2053 psqp = &sqp->sq_next;
2054 2054 }
2055 2055 *psqp = sqp->sq_next;
2056 2056
2057 2057 siginfofree(sqp);
2058 2058
2059 2059 for (sqp = *psqp; !another_sigcld && sqp; sqp = sqp->sq_next) {
2060 2060 if (sqp->sq_info.si_signo == SIGCLD)
2061 2061 another_sigcld = 1;
2062 2062 }
2063 2063
2064 2064 if (!another_sigcld) {
2065 2065 sigdelset(&p->p_sig, SIGCLD);
2066 2066 sigdelset(&p->p_extsig, SIGCLD);
2067 2067 }
2068 2068
2069 2069 mutex_exit(&p->p_lock);
2070 2070 }
2071 2071
2072 2072 /*
2073 2073 * Delete queued siginfo structures.
2074 2074 * If a non-null thread pointer is passed then delete from
2075 2075 * the thread queue, otherwise delete from the process queue.
2076 2076 */
2077 2077 void
2078 2078 sigdelq(proc_t *p, kthread_t *t, int sig)
2079 2079 {
2080 2080 sigqueue_t **psqp, *sqp;
2081 2081
2082 2082 /*
2083 2083 * We must be holding p->p_lock unless the process is
2084 2084 * being reaped or has failed to get started on fork.
2085 2085 */
2086 2086 ASSERT(MUTEX_HELD(&p->p_lock) ||
2087 2087 p->p_stat == SIDL || p->p_stat == SZOMB);
2088 2088
2089 2089 if (t != (kthread_t *)NULL)
2090 2090 psqp = &t->t_sigqueue;
2091 2091 else
2092 2092 psqp = &p->p_sigqueue;
2093 2093
2094 2094 while (*psqp) {
2095 2095 sqp = *psqp;
2096 2096 if (sig == 0 || sqp->sq_info.si_signo == sig) {
2097 2097 *psqp = sqp->sq_next;
2098 2098 siginfofree(sqp);
2099 2099 } else
2100 2100 psqp = &sqp->sq_next;
2101 2101 }
2102 2102 }
2103 2103
2104 2104 /*
2105 2105 * Insert a siginfo structure into a queue.
2106 2106 * If a non-null thread pointer is passed then add to the thread queue,
2107 2107 * otherwise add to the process queue.
2108 2108 *
2109 2109 * The function sigaddqins() is called with sigqueue already allocated.
2110 2110 * It is called from sigaddqa() and sigaddq() below.
2111 2111 *
2112 2112 * The value of si_code implicitly indicates whether sigp is to be
2113 2113 * explicitly queued, or to be queued to depth one.
2114 2114 */
2115 2115 static void
2116 2116 sigaddqins(proc_t *p, kthread_t *t, sigqueue_t *sigqp)
2117 2117 {
2118 2118 sigqueue_t **psqp;
2119 2119 int sig = sigqp->sq_info.si_signo;
2120 2120
2121 2121 sigqp->sq_external = (curproc != &p0) &&
2122 2122 (curproc->p_ct_process != p->p_ct_process);
2123 2123
2124 2124 /*
2125 2125 * issig_forreal() doesn't bother dequeueing signals if SKILLED
2126 2126 * is set, and even if it did, we would want to avoid situation
2127 2127 * (which would be unique to SIGKILL) where one thread dequeued
2128 2128 * the sigqueue_t and another executed psig(). So we create a
2129 2129 * separate stash for SIGKILL's sigqueue_t. Because a second
2130 2130 * SIGKILL can set SEXTKILLED, we overwrite the existing entry
2131 2131 * if (and only if) it was non-extracontractual.
2132 2132 */
2133 2133 if (sig == SIGKILL) {
2134 2134 if (p->p_killsqp == NULL || !p->p_killsqp->sq_external) {
2135 2135 if (p->p_killsqp != NULL)
2136 2136 siginfofree(p->p_killsqp);
2137 2137 p->p_killsqp = sigqp;
2138 2138 sigqp->sq_next = NULL;
2139 2139 } else {
2140 2140 siginfofree(sigqp);
2141 2141 }
2142 2142 return;
2143 2143 }
2144 2144
2145 2145 ASSERT(sig >= 1 && sig < NSIG);
2146 2146 if (t != NULL) /* directed to a thread */
2147 2147 psqp = &t->t_sigqueue;
2148 2148 else /* directed to a process */
2149 2149 psqp = &p->p_sigqueue;
2150 2150 if (SI_CANQUEUE(sigqp->sq_info.si_code) &&
2151 2151 sigismember(&p->p_siginfo, sig)) {
2152 2152 for (; *psqp != NULL; psqp = &(*psqp)->sq_next)
2153 2153 ;
2154 2154 } else {
2155 2155 for (; *psqp != NULL; psqp = &(*psqp)->sq_next) {
2156 2156 if ((*psqp)->sq_info.si_signo == sig) {
2157 2157 siginfofree(sigqp);
2158 2158 return;
2159 2159 }
2160 2160 }
2161 2161 }
2162 2162 *psqp = sigqp;
2163 2163 sigqp->sq_next = NULL;
2164 2164 }
2165 2165
2166 2166 /*
2167 2167 * The function sigaddqa() is called with sigqueue already allocated.
2168 2168 * If signal is ignored, discard but guarantee KILL and generation semantics.
2169 2169 * It is called from sigqueue() and other places.
2170 2170 */
2171 2171 void
2172 2172 sigaddqa(proc_t *p, kthread_t *t, sigqueue_t *sigqp)
2173 2173 {
2174 2174 int sig = sigqp->sq_info.si_signo;
2175 2175
2176 2176 ASSERT(MUTEX_HELD(&p->p_lock));
2177 2177 ASSERT(sig >= 1 && sig < NSIG);
2178 2178
2179 2179 if (sig_discardable(p, t, sig))
2180 2180 siginfofree(sigqp);
2181 2181 else
2182 2182 sigaddqins(p, t, sigqp);
2183 2183
2184 2184 sigtoproc(p, t, sig);
2185 2185 }
2186 2186
2187 2187 /*
2188 2188 * Allocate the sigqueue_t structure and call sigaddqins().
2189 2189 */
2190 2190 void
2191 2191 sigaddq(proc_t *p, kthread_t *t, k_siginfo_t *infop, int km_flags)
2192 2192 {
2193 2193 sigqueue_t *sqp;
2194 2194 int sig = infop->si_signo;
2195 2195
2196 2196 ASSERT(MUTEX_HELD(&p->p_lock));
2197 2197 ASSERT(sig >= 1 && sig < NSIG);
2198 2198
2199 2199 /*
2200 2200 * If the signal will be discarded by sigtoproc() or
2201 2201 * if the process isn't requesting siginfo and it isn't
2202 2202 * blocking the signal (it *could* change it's mind while
2203 2203 * the signal is pending) then don't bother creating one.
2204 2204 */
2205 2205 if (!sig_discardable(p, t, sig) &&
2206 2206 (sigismember(&p->p_siginfo, sig) ||
2207 2207 (curproc->p_ct_process != p->p_ct_process) ||
2208 2208 (sig == SIGCLD && SI_FROMKERNEL(infop))) &&
2209 2209 ((sqp = kmem_alloc(sizeof (sigqueue_t), km_flags)) != NULL)) {
2210 2210 bcopy(infop, &sqp->sq_info, sizeof (k_siginfo_t));
2211 2211 sqp->sq_func = NULL;
2212 2212 sqp->sq_next = NULL;
2213 2213 sigaddqins(p, t, sqp);
2214 2214 }
2215 2215 sigtoproc(p, t, sig);
2216 2216 }
2217 2217
2218 2218 /*
2219 2219 * Handle stop-on-fault processing for the debugger. Returns 0
2220 2220 * if the fault is cleared during the stop, nonzero if it isn't.
2221 2221 */
2222 2222 int
2223 2223 stop_on_fault(uint_t fault, k_siginfo_t *sip)
2224 2224 {
2225 2225 proc_t *p = ttoproc(curthread);
2226 2226 klwp_t *lwp = ttolwp(curthread);
2227 2227
2228 2228 ASSERT(prismember(&p->p_fltmask, fault));
2229 2229
2230 2230 /*
2231 2231 * Record current fault and siginfo structure so debugger can
2232 2232 * find it.
2233 2233 */
2234 2234 mutex_enter(&p->p_lock);
2235 2235 lwp->lwp_curflt = (uchar_t)fault;
2236 2236 lwp->lwp_siginfo = *sip;
2237 2237
2238 2238 stop(PR_FAULTED, fault);
2239 2239
2240 2240 fault = lwp->lwp_curflt;
2241 2241 lwp->lwp_curflt = 0;
2242 2242 mutex_exit(&p->p_lock);
2243 2243 return (fault);
2244 2244 }
2245 2245
2246 2246 void
2247 2247 sigorset(k_sigset_t *s1, const k_sigset_t *s2)
2248 2248 {
2249 2249 s1->__sigbits[0] |= s2->__sigbits[0];
2250 2250 s1->__sigbits[1] |= s2->__sigbits[1];
2251 2251 s1->__sigbits[2] |= s2->__sigbits[2];
2252 2252 }
2253 2253
2254 2254 void
2255 2255 sigandset(k_sigset_t *s1, const k_sigset_t *s2)
2256 2256 {
2257 2257 s1->__sigbits[0] &= s2->__sigbits[0];
2258 2258 s1->__sigbits[1] &= s2->__sigbits[1];
2259 2259 s1->__sigbits[2] &= s2->__sigbits[2];
2260 2260 }
2261 2261
2262 2262 void
2263 2263 sigdiffset(k_sigset_t *s1, const k_sigset_t *s2)
2264 2264 {
2265 2265 s1->__sigbits[0] &= ~(s2->__sigbits[0]);
2266 2266 s1->__sigbits[1] &= ~(s2->__sigbits[1]);
2267 2267 s1->__sigbits[2] &= ~(s2->__sigbits[2]);
2268 2268 }
2269 2269
2270 2270 /*
2271 2271 * Return non-zero if curthread->t_sig_check should be set to 1, that is,
2272 2272 * if there are any signals the thread might take on return from the kernel.
2273 2273 * If ksigset_t's were a single word, we would do:
2274 2274 * return (((p->p_sig | t->t_sig) & ~t->t_hold) & fillset);
2275 2275 */
2276 2276 int
2277 2277 sigcheck(proc_t *p, kthread_t *t)
2278 2278 {
2279 2279 sc_shared_t *tdp = t->t_schedctl;
2280 2280
2281 2281 /*
2282 2282 * If signals are blocked via the schedctl interface
2283 2283 * then we only check for the unmaskable signals.
2284 2284 * The unmaskable signal numbers should all be contained
2285 2285 * in __sigbits[0] and we assume this for speed.
2286 2286 */
2287 2287 #if (CANTMASK1 == 0 && CANTMASK2 == 0)
2288 2288 if (tdp != NULL && tdp->sc_sigblock)
2289 2289 return ((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) &
2290 2290 CANTMASK0);
2291 2291 #else
2292 2292 #error "fix me: CANTMASK1 and CANTMASK2 are not zero"
2293 2293 #endif
2294 2294
2295 2295 /* see uts/common/sys/signal.h for why this must be true */
2296 2296 #if ((MAXSIG > (2 * 32)) && (MAXSIG <= (3 * 32)))
2297 2297 return (((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) &
2298 2298 ~t->t_hold.__sigbits[0]) |
2299 2299 ((p->p_sig.__sigbits[1] | t->t_sig.__sigbits[1]) &
2300 2300 ~t->t_hold.__sigbits[1]) |
2301 2301 (((p->p_sig.__sigbits[2] | t->t_sig.__sigbits[2]) &
2302 2302 ~t->t_hold.__sigbits[2]) & FILLSET2));
2303 2303 #else
2304 2304 #error "fix me: MAXSIG out of bounds"
2305 2305 #endif
2306 2306 }
2307 2307
2308 2308 void
2309 2309 sigintr(k_sigset_t *smask, int intable)
2310 2310 {
2311 2311 proc_t *p;
2312 2312 int owned;
2313 2313 k_sigset_t lmask; /* local copy of cantmask */
2314 2314 klwp_t *lwp = ttolwp(curthread);
2315 2315
2316 2316 /*
2317 2317 * Mask out all signals except SIGHUP, SIGINT, SIGQUIT
2318 2318 * and SIGTERM. (Preserving the existing masks).
2319 2319 * This function supports the -intr nfs and ufs mount option.
2320 2320 */
2321 2321
2322 2322 /*
2323 2323 * don't do kernel threads
2324 2324 */
2325 2325 if (lwp == NULL)
2326 2326 return;
2327 2327
2328 2328 /*
2329 2329 * get access to signal mask
2330 2330 */
2331 2331 p = ttoproc(curthread);
2332 2332 owned = mutex_owned(&p->p_lock); /* this is filthy */
2333 2333 if (!owned)
2334 2334 mutex_enter(&p->p_lock);
2335 2335
2336 2336 /*
2337 2337 * remember the current mask
2338 2338 */
2339 2339 schedctl_finish_sigblock(curthread);
2340 2340 *smask = curthread->t_hold;
2341 2341
2342 2342 /*
2343 2343 * mask out all signals
2344 2344 */
2345 2345 sigfillset(&curthread->t_hold);
2346 2346
2347 2347 /*
2348 2348 * Unmask the non-maskable signals (e.g., KILL), as long as
2349 2349 * they aren't already masked (which could happen at exit).
2350 2350 * The first sigdiffset sets lmask to (cantmask & ~curhold). The
2351 2351 * second sets the current hold mask to (~0 & ~lmask), which reduces
2352 2352 * to (~cantmask | curhold).
2353 2353 */
2354 2354 lmask = cantmask;
2355 2355 sigdiffset(&lmask, smask);
2356 2356 sigdiffset(&curthread->t_hold, &lmask);
2357 2357
2358 2358 /*
2359 2359 * Re-enable HUP, QUIT, and TERM iff they were originally enabled
2360 2360 * Re-enable INT if it's originally enabled and the NFS mount option
2361 2361 * nointr is not set.
2362 2362 */
2363 2363 if (!sigismember(smask, SIGHUP))
2364 2364 sigdelset(&curthread->t_hold, SIGHUP);
2365 2365 if (!sigismember(smask, SIGINT) && intable)
2366 2366 sigdelset(&curthread->t_hold, SIGINT);
2367 2367 if (!sigismember(smask, SIGQUIT))
2368 2368 sigdelset(&curthread->t_hold, SIGQUIT);
2369 2369 if (!sigismember(smask, SIGTERM))
2370 2370 sigdelset(&curthread->t_hold, SIGTERM);
2371 2371
2372 2372 /*
2373 2373 * release access to signal mask
2374 2374 */
2375 2375 if (!owned)
2376 2376 mutex_exit(&p->p_lock);
2377 2377
2378 2378 /*
2379 2379 * Indicate that this lwp is not to be stopped.
2380 2380 */
2381 2381 lwp->lwp_nostop++;
2382 2382
2383 2383 }
2384 2384
2385 2385 void
2386 2386 sigunintr(k_sigset_t *smask)
2387 2387 {
2388 2388 proc_t *p;
2389 2389 int owned;
2390 2390 klwp_t *lwp = ttolwp(curthread);
2391 2391
2392 2392 /*
2393 2393 * Reset previous mask (See sigintr() above)
2394 2394 */
2395 2395 if (lwp != NULL) {
2396 2396 lwp->lwp_nostop--; /* restore lwp stoppability */
2397 2397 p = ttoproc(curthread);
2398 2398 owned = mutex_owned(&p->p_lock); /* this is filthy */
2399 2399 if (!owned)
2400 2400 mutex_enter(&p->p_lock);
2401 2401 curthread->t_hold = *smask;
2402 2402 /* so unmasked signals will be seen */
2403 2403 curthread->t_sig_check = 1;
2404 2404 if (!owned)
2405 2405 mutex_exit(&p->p_lock);
2406 2406 }
2407 2407 }
2408 2408
2409 2409 void
2410 2410 sigreplace(k_sigset_t *newmask, k_sigset_t *oldmask)
2411 2411 {
2412 2412 proc_t *p;
2413 2413 int owned;
2414 2414 /*
2415 2415 * Save current signal mask in oldmask, then
2416 2416 * set it to newmask.
2417 2417 */
2418 2418 if (ttolwp(curthread) != NULL) {
2419 2419 p = ttoproc(curthread);
2420 2420 owned = mutex_owned(&p->p_lock); /* this is filthy */
2421 2421 if (!owned)
2422 2422 mutex_enter(&p->p_lock);
2423 2423 schedctl_finish_sigblock(curthread);
2424 2424 if (oldmask != NULL)
2425 2425 *oldmask = curthread->t_hold;
2426 2426 curthread->t_hold = *newmask;
2427 2427 curthread->t_sig_check = 1;
2428 2428 if (!owned)
2429 2429 mutex_exit(&p->p_lock);
2430 2430 }
2431 2431 }
2432 2432
2433 2433 /*
2434 2434 * Return true if the signal number is in range
2435 2435 * and the signal code specifies signal queueing.
2436 2436 */
2437 2437 int
2438 2438 sigwillqueue(int sig, int code)
2439 2439 {
2440 2440 if (sig >= 0 && sig < NSIG) {
2441 2441 switch (code) {
2442 2442 case SI_QUEUE:
2443 2443 case SI_TIMER:
2444 2444 case SI_ASYNCIO:
2445 2445 case SI_MESGQ:
2446 2446 return (1);
2447 2447 }
2448 2448 }
2449 2449 return (0);
2450 2450 }
2451 2451
2452 2452 /*
2453 2453 * The pre-allocated pool (with _SIGQUEUE_PREALLOC entries) is
2454 2454 * allocated at the first sigqueue/signotify call.
2455 2455 */
2456 2456 sigqhdr_t *
2457 2457 sigqhdralloc(size_t size, uint_t maxcount)
2458 2458 {
2459 2459 size_t i;
2460 2460 sigqueue_t *sq, *next;
2461 2461 sigqhdr_t *sqh;
2462 2462
2463 2463 /*
2464 2464 * Before the introduction of process.max-sigqueue-size
2465 2465 * _SC_SIGQUEUE_MAX had this static value.
2466 2466 */
2467 2467 #define _SIGQUEUE_PREALLOC 32
2468 2468
2469 2469 i = (_SIGQUEUE_PREALLOC * size) + sizeof (sigqhdr_t);
2470 2470 ASSERT(maxcount <= INT_MAX);
2471 2471 sqh = kmem_alloc(i, KM_SLEEP);
2472 2472 sqh->sqb_count = maxcount;
2473 2473 sqh->sqb_maxcount = maxcount;
2474 2474 sqh->sqb_size = i;
2475 2475 sqh->sqb_pexited = 0;
2476 2476 sqh->sqb_sent = 0;
2477 2477 sqh->sqb_free = sq = (sigqueue_t *)(sqh + 1);
2478 2478 for (i = _SIGQUEUE_PREALLOC - 1; i != 0; i--) {
2479 2479 next = (sigqueue_t *)((uintptr_t)sq + size);
2480 2480 sq->sq_next = next;
2481 2481 sq = next;
2482 2482 }
2483 2483 sq->sq_next = NULL;
2484 2484 cv_init(&sqh->sqb_cv, NULL, CV_DEFAULT, NULL);
2485 2485 mutex_init(&sqh->sqb_lock, NULL, MUTEX_DEFAULT, NULL);
2486 2486 return (sqh);
2487 2487 }
2488 2488
2489 2489 static void sigqrel(sigqueue_t *);
2490 2490
2491 2491 /*
2492 2492 * Allocate a sigqueue/signotify structure from the per process
2493 2493 * pre-allocated pool or allocate a new sigqueue/signotify structure
2494 2494 * if the pre-allocated pool is exhausted.
2495 2495 */
2496 2496 sigqueue_t *
2497 2497 sigqalloc(sigqhdr_t *sqh)
2498 2498 {
2499 2499 sigqueue_t *sq = NULL;
2500 2500
2501 2501 ASSERT(MUTEX_HELD(&curproc->p_lock));
2502 2502
2503 2503 if (sqh != NULL) {
2504 2504 mutex_enter(&sqh->sqb_lock);
2505 2505 if (sqh->sqb_count > 0) {
2506 2506 sqh->sqb_count--;
2507 2507 if (sqh->sqb_free == NULL) {
2508 2508 /*
2509 2509 * The pre-allocated pool is exhausted.
2510 2510 */
2511 2511 sq = kmem_alloc(sizeof (sigqueue_t), KM_SLEEP);
2512 2512 sq->sq_func = NULL;
2513 2513 } else {
2514 2514 sq = sqh->sqb_free;
2515 2515 sq->sq_func = sigqrel;
2516 2516 sqh->sqb_free = sq->sq_next;
2517 2517 }
2518 2518 mutex_exit(&sqh->sqb_lock);
2519 2519 bzero(&sq->sq_info, sizeof (k_siginfo_t));
2520 2520 sq->sq_backptr = sqh;
2521 2521 sq->sq_next = NULL;
2522 2522 sq->sq_external = 0;
2523 2523 } else {
2524 2524 mutex_exit(&sqh->sqb_lock);
2525 2525 }
2526 2526 }
2527 2527 return (sq);
2528 2528 }
2529 2529
2530 2530 /*
2531 2531 * Return a sigqueue structure back to the pre-allocated pool.
2532 2532 */
2533 2533 static void
2534 2534 sigqrel(sigqueue_t *sq)
2535 2535 {
2536 2536 sigqhdr_t *sqh;
2537 2537
2538 2538 /* make sure that p_lock of the affected process is held */
2539 2539
2540 2540 sqh = (sigqhdr_t *)sq->sq_backptr;
2541 2541 mutex_enter(&sqh->sqb_lock);
2542 2542 if (sqh->sqb_pexited && sqh->sqb_sent == 1) {
2543 2543 mutex_exit(&sqh->sqb_lock);
2544 2544 cv_destroy(&sqh->sqb_cv);
2545 2545 mutex_destroy(&sqh->sqb_lock);
2546 2546 kmem_free(sqh, sqh->sqb_size);
2547 2547 } else {
2548 2548 sqh->sqb_count++;
2549 2549 sqh->sqb_sent--;
2550 2550 sq->sq_next = sqh->sqb_free;
2551 2551 sq->sq_backptr = NULL;
2552 2552 sqh->sqb_free = sq;
2553 2553 cv_signal(&sqh->sqb_cv);
2554 2554 mutex_exit(&sqh->sqb_lock);
2555 2555 }
2556 2556 }
2557 2557
2558 2558 /*
2559 2559 * Free up the pre-allocated sigqueue headers of sigqueue pool
2560 2560 * and signotify pool, if possible.
2561 2561 * Called only by the owning process during exec() and exit().
2562 2562 */
2563 2563 void
2564 2564 sigqfree(proc_t *p)
2565 2565 {
2566 2566 ASSERT(MUTEX_HELD(&p->p_lock));
2567 2567
2568 2568 if (p->p_sigqhdr != NULL) { /* sigqueue pool */
2569 2569 sigqhdrfree(p->p_sigqhdr);
2570 2570 p->p_sigqhdr = NULL;
2571 2571 }
2572 2572 if (p->p_signhdr != NULL) { /* signotify pool */
2573 2573 sigqhdrfree(p->p_signhdr);
2574 2574 p->p_signhdr = NULL;
2575 2575 }
2576 2576 }
2577 2577
2578 2578 /*
2579 2579 * Free up the pre-allocated header and sigq pool if possible.
2580 2580 */
2581 2581 void
2582 2582 sigqhdrfree(sigqhdr_t *sqh)
2583 2583 {
2584 2584 mutex_enter(&sqh->sqb_lock);
2585 2585 if (sqh->sqb_sent == 0) {
2586 2586 mutex_exit(&sqh->sqb_lock);
2587 2587 cv_destroy(&sqh->sqb_cv);
2588 2588 mutex_destroy(&sqh->sqb_lock);
2589 2589 kmem_free(sqh, sqh->sqb_size);
2590 2590 } else {
2591 2591 sqh->sqb_pexited = 1;
2592 2592 mutex_exit(&sqh->sqb_lock);
2593 2593 }
2594 2594 }
2595 2595
2596 2596 /*
2597 2597 * Free up a single sigqueue structure.
2598 2598 * No other code should free a sigqueue directly.
2599 2599 */
2600 2600 void
2601 2601 siginfofree(sigqueue_t *sqp)
2602 2602 {
2603 2603 if (sqp != NULL) {
2604 2604 if (sqp->sq_func != NULL)
2605 2605 (sqp->sq_func)(sqp);
2606 2606 else
2607 2607 kmem_free(sqp, sizeof (sigqueue_t));
2608 2608 }
2609 2609 }
2610 2610
2611 2611 /*
2612 2612 * Generate a synchronous signal caused by a hardware
2613 2613 * condition encountered by an lwp. Called from trap().
2614 2614 */
2615 2615 void
2616 2616 trapsig(k_siginfo_t *ip, int restartable)
2617 2617 {
2618 2618 proc_t *p = ttoproc(curthread);
2619 2619 int sig = ip->si_signo;
2620 2620 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
2621 2621
2622 2622 ASSERT(sig > 0 && sig < NSIG);
2623 2623
2624 2624 if (curthread->t_dtrace_on)
2625 2625 dtrace_safe_synchronous_signal();
2626 2626
2627 2627 mutex_enter(&p->p_lock);
2628 2628 schedctl_finish_sigblock(curthread);
2629 2629 /*
2630 2630 * Avoid a possible infinite loop if the lwp is holding the
2631 2631 * signal generated by a trap of a restartable instruction or
2632 2632 * if the signal so generated is being ignored by the process.
2633 2633 */
2634 2634 if (restartable &&
2635 2635 (sigismember(&curthread->t_hold, sig) ||
2636 2636 p->p_user.u_signal[sig-1] == SIG_IGN)) {
2637 2637 sigdelset(&curthread->t_hold, sig);
2638 2638 p->p_user.u_signal[sig-1] = SIG_DFL;
2639 2639 sigdelset(&p->p_ignore, sig);
2640 2640 }
2641 2641 bcopy(ip, &sqp->sq_info, sizeof (k_siginfo_t));
2642 2642 sigaddqa(p, curthread, sqp);
2643 2643 mutex_exit(&p->p_lock);
2644 2644 }
2645 2645
2646 2646 /*
2647 2647 * Dispatch the real time profiling signal in the traditional way,
2648 2648 * honoring all of the /proc tracing mechanism built into issig().
2649 2649 */
2650 2650 static void
2651 2651 realsigprof_slow(int sysnum, int nsysarg, int error)
2652 2652 {
2653 2653 kthread_t *t = curthread;
2654 2654 proc_t *p = ttoproc(t);
2655 2655 klwp_t *lwp = ttolwp(t);
2656 2656 k_siginfo_t *sip = &lwp->lwp_siginfo;
2657 2657 void (*func)();
2658 2658
2659 2659 mutex_enter(&p->p_lock);
2660 2660 func = PTOU(p)->u_signal[SIGPROF - 1];
2661 2661 if (p->p_rprof_cyclic == CYCLIC_NONE ||
2662 2662 func == SIG_DFL || func == SIG_IGN) {
2663 2663 bzero(t->t_rprof, sizeof (*t->t_rprof));
2664 2664 mutex_exit(&p->p_lock);
2665 2665 return;
2666 2666 }
2667 2667 if (sigismember(&t->t_hold, SIGPROF)) {
2668 2668 mutex_exit(&p->p_lock);
2669 2669 return;
2670 2670 }
2671 2671 sip->si_signo = SIGPROF;
2672 2672 sip->si_code = PROF_SIG;
2673 2673 sip->si_errno = error;
2674 2674 hrt2ts(gethrtime(), &sip->si_tstamp);
2675 2675 sip->si_syscall = sysnum;
2676 2676 sip->si_nsysarg = nsysarg;
2677 2677 sip->si_fault = lwp->lwp_lastfault;
2678 2678 sip->si_faddr = lwp->lwp_lastfaddr;
2679 2679 lwp->lwp_lastfault = 0;
2680 2680 lwp->lwp_lastfaddr = NULL;
2681 2681 sigtoproc(p, t, SIGPROF);
2682 2682 mutex_exit(&p->p_lock);
2683 2683 ASSERT(lwp->lwp_cursig == 0);
2684 2684 if (issig(FORREAL))
2685 2685 psig();
2686 2686 sip->si_signo = 0;
2687 2687 bzero(t->t_rprof, sizeof (*t->t_rprof));
2688 2688 }
2689 2689
2690 2690 /*
2691 2691 * We are not tracing the SIGPROF signal, or doing any other unnatural
2692 2692 * acts, like watchpoints, so dispatch the real time profiling signal
2693 2693 * directly, bypassing all of the overhead built into issig().
2694 2694 */
2695 2695 static void
2696 2696 realsigprof_fast(int sysnum, int nsysarg, int error)
2697 2697 {
2698 2698 kthread_t *t = curthread;
2699 2699 proc_t *p = ttoproc(t);
2700 2700 klwp_t *lwp = ttolwp(t);
2701 2701 k_siginfo_t *sip = &lwp->lwp_siginfo;
2702 2702 void (*func)();
2703 2703 int rc;
2704 2704 int code;
2705 2705
2706 2706 /*
2707 2707 * We don't need to acquire p->p_lock here;
2708 2708 * we are manipulating thread-private data.
2709 2709 */
2710 2710 func = PTOU(p)->u_signal[SIGPROF - 1];
2711 2711 if (p->p_rprof_cyclic == CYCLIC_NONE ||
2712 2712 func == SIG_DFL || func == SIG_IGN) {
2713 2713 bzero(t->t_rprof, sizeof (*t->t_rprof));
2714 2714 return;
2715 2715 }
2716 2716 if (lwp->lwp_cursig != 0 ||
2717 2717 lwp->lwp_curinfo != NULL ||
2718 2718 sigismember(&t->t_hold, SIGPROF)) {
2719 2719 return;
2720 2720 }
2721 2721 sip->si_signo = SIGPROF;
2722 2722 sip->si_code = PROF_SIG;
2723 2723 sip->si_errno = error;
2724 2724 hrt2ts(gethrtime(), &sip->si_tstamp);
2725 2725 sip->si_syscall = sysnum;
2726 2726 sip->si_nsysarg = nsysarg;
2727 2727 sip->si_fault = lwp->lwp_lastfault;
2728 2728 sip->si_faddr = lwp->lwp_lastfaddr;
2729 2729 lwp->lwp_lastfault = 0;
2730 2730 lwp->lwp_lastfaddr = NULL;
2731 2731 if (t->t_flag & T_TOMASK)
2732 2732 t->t_flag &= ~T_TOMASK;
2733 2733 else
2734 2734 lwp->lwp_sigoldmask = t->t_hold;
2735 2735 sigorset(&t->t_hold, &PTOU(p)->u_sigmask[SIGPROF - 1]);
2736 2736 if (!sigismember(&PTOU(p)->u_signodefer, SIGPROF))
2737 2737 sigaddset(&t->t_hold, SIGPROF);
2738 2738 lwp->lwp_extsig = 0;
2739 2739 lwp->lwp_ru.nsignals++;
2740 2740 if (p->p_model == DATAMODEL_NATIVE)
2741 2741 rc = sendsig(SIGPROF, sip, func);
2742 2742 #ifdef _SYSCALL32_IMPL
2743 2743 else
2744 2744 rc = sendsig32(SIGPROF, sip, func);
2745 2745 #endif /* _SYSCALL32_IMPL */
2746 2746 sip->si_signo = 0;
2747 2747 bzero(t->t_rprof, sizeof (*t->t_rprof));
2748 2748 if (rc == 0) {
2749 2749 /*
2750 2750 * sendsig() failed; we must dump core with a SIGSEGV.
2751 2751 * See psig(). This code is copied from there.
2752 2752 */
2753 2753 lwp->lwp_cursig = SIGSEGV;
2754 2754 code = CLD_KILLED;
2755 2755 proc_is_exiting(p);
2756 2756 if (exitlwps(1) != 0) {
2757 2757 mutex_enter(&p->p_lock);
2758 2758 lwp_exit();
2759 2759 }
2760 2760 if (audit_active == C2AUDIT_LOADED)
2761 2761 audit_core_start(SIGSEGV);
2762 2762 if (core(SIGSEGV, 0) == 0)
2763 2763 code = CLD_DUMPED;
2764 2764 if (audit_active == C2AUDIT_LOADED)
2765 2765 audit_core_finish(code);
2766 2766 exit(code, SIGSEGV);
2767 2767 }
2768 2768 }
2769 2769
2770 2770 /*
2771 2771 * Arrange for the real time profiling signal to be dispatched.
2772 2772 */
2773 2773 void
2774 2774 realsigprof(int sysnum, int nsysarg, int error)
2775 2775 {
2776 2776 kthread_t *t = curthread;
2777 2777 proc_t *p = ttoproc(t);
2778 2778
2779 2779 if (t->t_rprof->rp_anystate == 0)
2780 2780 return;
2781 2781
2782 2782 schedctl_finish_sigblock(t);
2783 2783
2784 2784 /* test for any activity that requires p->p_lock */
2785 2785 if (tracing(p, SIGPROF) || pr_watch_active(p) ||
2786 2786 sigismember(&PTOU(p)->u_sigresethand, SIGPROF)) {
2787 2787 /* do it the classic slow way */
2788 2788 realsigprof_slow(sysnum, nsysarg, error);
2789 2789 } else {
2790 2790 /* do it the cheating-a-little fast way */
2791 2791 realsigprof_fast(sysnum, nsysarg, error);
2792 2792 }
2793 2793 }
2794 2794
2795 2795 #ifdef _SYSCALL32_IMPL
2796 2796
2797 2797 /*
2798 2798 * It's tricky to transmit a sigval between 32-bit and 64-bit
2799 2799 * process, since in the 64-bit world, a pointer and an integer
2800 2800 * are different sizes. Since we're constrained by the standards
2801 2801 * world not to change the types, and it's unclear how useful it is
2802 2802 * to send pointers between address spaces this way, we preserve
2803 2803 * the 'int' interpretation for 32-bit processes interoperating
2804 2804 * with 64-bit processes. The full semantics (pointers or integers)
2805 2805 * are available for N-bit processes interoperating with N-bit
2806 2806 * processes.
2807 2807 */
2808 2808 void
2809 2809 siginfo_kto32(const k_siginfo_t *src, siginfo32_t *dest)
2810 2810 {
2811 2811 bzero(dest, sizeof (*dest));
2812 2812
2813 2813 /*
2814 2814 * The absolute minimum content is si_signo and si_code.
2815 2815 */
2816 2816 dest->si_signo = src->si_signo;
2817 2817 if ((dest->si_code = src->si_code) == SI_NOINFO)
2818 2818 return;
2819 2819
2820 2820 /*
2821 2821 * A siginfo generated by user level is structured
2822 2822 * differently from one generated by the kernel.
2823 2823 */
2824 2824 if (SI_FROMUSER(src)) {
2825 2825 dest->si_pid = src->si_pid;
2826 2826 dest->si_ctid = src->si_ctid;
2827 2827 dest->si_zoneid = src->si_zoneid;
2828 2828 dest->si_uid = src->si_uid;
2829 2829 if (SI_CANQUEUE(src->si_code))
2830 2830 dest->si_value.sival_int =
2831 2831 (int32_t)src->si_value.sival_int;
2832 2832 return;
2833 2833 }
2834 2834
2835 2835 dest->si_errno = src->si_errno;
2836 2836
2837 2837 switch (src->si_signo) {
2838 2838 default:
2839 2839 dest->si_pid = src->si_pid;
2840 2840 dest->si_ctid = src->si_ctid;
2841 2841 dest->si_zoneid = src->si_zoneid;
2842 2842 dest->si_uid = src->si_uid;
2843 2843 dest->si_value.sival_int = (int32_t)src->si_value.sival_int;
2844 2844 break;
2845 2845 case SIGCLD:
2846 2846 dest->si_pid = src->si_pid;
2847 2847 dest->si_ctid = src->si_ctid;
2848 2848 dest->si_zoneid = src->si_zoneid;
2849 2849 dest->si_status = src->si_status;
2850 2850 dest->si_stime = src->si_stime;
2851 2851 dest->si_utime = src->si_utime;
2852 2852 break;
2853 2853 case SIGSEGV:
2854 2854 case SIGBUS:
2855 2855 case SIGILL:
2856 2856 case SIGTRAP:
2857 2857 case SIGFPE:
2858 2858 case SIGEMT:
2859 2859 dest->si_addr = (caddr32_t)(uintptr_t)src->si_addr;
2860 2860 dest->si_trapno = src->si_trapno;
2861 2861 dest->si_pc = (caddr32_t)(uintptr_t)src->si_pc;
2862 2862 break;
2863 2863 case SIGPOLL:
2864 2864 case SIGXFSZ:
2865 2865 dest->si_fd = src->si_fd;
2866 2866 dest->si_band = src->si_band;
2867 2867 break;
2868 2868 case SIGPROF:
2869 2869 dest->si_faddr = (caddr32_t)(uintptr_t)src->si_faddr;
2870 2870 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec;
2871 2871 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec;
2872 2872 dest->si_syscall = src->si_syscall;
2873 2873 dest->si_nsysarg = src->si_nsysarg;
2874 2874 dest->si_fault = src->si_fault;
2875 2875 break;
2876 2876 }
2877 2877 }
2878 2878
2879 2879 void
2880 2880 siginfo_32tok(const siginfo32_t *src, k_siginfo_t *dest)
2881 2881 {
2882 2882 bzero(dest, sizeof (*dest));
2883 2883
2884 2884 /*
2885 2885 * The absolute minimum content is si_signo and si_code.
2886 2886 */
2887 2887 dest->si_signo = src->si_signo;
2888 2888 if ((dest->si_code = src->si_code) == SI_NOINFO)
2889 2889 return;
2890 2890
2891 2891 /*
2892 2892 * A siginfo generated by user level is structured
2893 2893 * differently from one generated by the kernel.
2894 2894 */
2895 2895 if (SI_FROMUSER(src)) {
2896 2896 dest->si_pid = src->si_pid;
2897 2897 dest->si_ctid = src->si_ctid;
2898 2898 dest->si_zoneid = src->si_zoneid;
2899 2899 dest->si_uid = src->si_uid;
2900 2900 if (SI_CANQUEUE(src->si_code))
2901 2901 dest->si_value.sival_int =
2902 2902 (int)src->si_value.sival_int;
2903 2903 return;
2904 2904 }
2905 2905
2906 2906 dest->si_errno = src->si_errno;
2907 2907
2908 2908 switch (src->si_signo) {
2909 2909 default:
2910 2910 dest->si_pid = src->si_pid;
2911 2911 dest->si_ctid = src->si_ctid;
2912 2912 dest->si_zoneid = src->si_zoneid;
2913 2913 dest->si_uid = src->si_uid;
2914 2914 dest->si_value.sival_int = (int)src->si_value.sival_int;
2915 2915 break;
2916 2916 case SIGCLD:
2917 2917 dest->si_pid = src->si_pid;
2918 2918 dest->si_ctid = src->si_ctid;
2919 2919 dest->si_zoneid = src->si_zoneid;
2920 2920 dest->si_status = src->si_status;
2921 2921 dest->si_stime = src->si_stime;
2922 2922 dest->si_utime = src->si_utime;
2923 2923 break;
2924 2924 case SIGSEGV:
2925 2925 case SIGBUS:
2926 2926 case SIGILL:
2927 2927 case SIGTRAP:
2928 2928 case SIGFPE:
2929 2929 case SIGEMT:
2930 2930 dest->si_addr = (void *)(uintptr_t)src->si_addr;
2931 2931 dest->si_trapno = src->si_trapno;
2932 2932 dest->si_pc = (void *)(uintptr_t)src->si_pc;
2933 2933 break;
2934 2934 case SIGPOLL:
2935 2935 case SIGXFSZ:
2936 2936 dest->si_fd = src->si_fd;
2937 2937 dest->si_band = src->si_band;
2938 2938 break;
2939 2939 case SIGPROF:
2940 2940 dest->si_faddr = (void *)(uintptr_t)src->si_faddr;
2941 2941 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec;
2942 2942 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec;
2943 2943 dest->si_syscall = src->si_syscall;
2944 2944 dest->si_nsysarg = src->si_nsysarg;
2945 2945 dest->si_fault = src->si_fault;
2946 2946 break;
2947 2947 }
2948 2948 }
2949 2949
2950 2950 #endif /* _SYSCALL32_IMPL */
|
↓ open down ↓ |
2876 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX