Print this page
OS-4437 lxbrand ptrace turns harmless signals deadly
Reviewed by: Joshua M. Clulow <jmc@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
OS-3742 lxbrand add support for signalfd
OS-4382 remove obsolete brand hooks added during lx development
OS-4306 lxbrand setsockopt(IP_MULTICAST_TTL) handles optlen poorly
OS-4303 lxbrand ltp ptrace05 fails
Reviewed by: Joshua M. Clulow <jmc@joyent.com>
OS-3820 lxbrand ptrace(2): the next generation
OS-3685 lxbrand PTRACE_O_TRACEFORK race condition
OS-3834 lxbrand 64-bit strace(1) reports 64-bit process as using x32 ABI
OS-3794 lxbrand panic on init signal death
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Bryan Cantrill <bryan@joyent.com>
OS-3642 failed assert when exiting zlogin due to OS-3140 fix
OS-3140 In LX zone 'ps fax' does not show all processes
OS-2844 lx brand should support 64-bit user-land
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/os/sig.c
+++ new/usr/src/uts/common/os/sig.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 - * Copyright (c) 2014, Joyent, Inc. All rights reserved.
25 + * Copyright 2015, Joyent, Inc.
26 26 */
27 27
28 28 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
29 29 /* All Rights Reserved */
30 30
31 31 #include <sys/param.h>
32 32 #include <sys/types.h>
33 33 #include <sys/bitmap.h>
34 34 #include <sys/sysmacros.h>
35 35 #include <sys/systm.h>
36 36 #include <sys/cred.h>
37 37 #include <sys/user.h>
38 38 #include <sys/errno.h>
39 39 #include <sys/proc.h>
40 40 #include <sys/poll_impl.h> /* only needed for kludge in sigwaiting_send() */
41 41 #include <sys/signal.h>
42 42 #include <sys/siginfo.h>
43 43 #include <sys/fault.h>
44 44 #include <sys/ucontext.h>
45 45 #include <sys/procfs.h>
46 46 #include <sys/wait.h>
47 47 #include <sys/class.h>
48 48 #include <sys/mman.h>
49 49 #include <sys/procset.h>
50 50 #include <sys/kmem.h>
51 51 #include <sys/cpuvar.h>
52 52 #include <sys/prsystm.h>
53 53 #include <sys/debug.h>
|
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
54 54 #include <vm/as.h>
55 55 #include <sys/bitmap.h>
56 56 #include <c2/audit.h>
57 57 #include <sys/core.h>
58 58 #include <sys/schedctl.h>
59 59 #include <sys/contract/process_impl.h>
60 60 #include <sys/cyclic.h>
61 61 #include <sys/dtrace.h>
62 62 #include <sys/sdt.h>
63 63 #include <sys/signalfd.h>
64 +#include <sys/brand.h>
64 65
65 66 const k_sigset_t nullsmask = {0, 0, 0};
66 67
67 68 const k_sigset_t fillset = /* MUST be contiguous */
68 69 {FILLSET0, FILLSET1, FILLSET2};
69 70
70 71 const k_sigset_t cantmask =
71 72 {CANTMASK0, CANTMASK1, CANTMASK2};
72 73
73 74 const k_sigset_t cantreset =
74 75 {(sigmask(SIGILL)|sigmask(SIGTRAP)|sigmask(SIGPWR)), 0, 0};
75 76
76 77 const k_sigset_t ignoredefault =
77 78 {(sigmask(SIGCONT)|sigmask(SIGCLD)|sigmask(SIGPWR)
78 79 |sigmask(SIGWINCH)|sigmask(SIGURG)|sigmask(SIGWAITING)),
79 80 (sigmask(SIGLWP)|sigmask(SIGCANCEL)|sigmask(SIGFREEZE)
80 81 |sigmask(SIGTHAW)|sigmask(SIGXRES)|sigmask(SIGJVM1)
81 82 |sigmask(SIGJVM2)|sigmask(SIGINFO)), 0};
82 83
83 84 const k_sigset_t stopdefault =
84 85 {(sigmask(SIGSTOP)|sigmask(SIGTSTP)|sigmask(SIGTTOU)|sigmask(SIGTTIN)),
85 86 0, 0};
86 87
87 88 const k_sigset_t coredefault =
88 89 {(sigmask(SIGQUIT)|sigmask(SIGILL)|sigmask(SIGTRAP)|sigmask(SIGIOT)
89 90 |sigmask(SIGEMT)|sigmask(SIGFPE)|sigmask(SIGBUS)|sigmask(SIGSEGV)
90 91 |sigmask(SIGSYS)|sigmask(SIGXCPU)|sigmask(SIGXFSZ)), 0, 0};
91 92
92 93 const k_sigset_t holdvfork =
93 94 {(sigmask(SIGTTOU)|sigmask(SIGTTIN)|sigmask(SIGTSTP)), 0, 0};
94 95
95 96 static int isjobstop(int);
96 97 static void post_sigcld(proc_t *, sigqueue_t *);
97 98
98 99
99 100 /*
100 101 * signalfd helper function which is set when the signalfd driver loads.
101 102 */
102 103 void (*sigfd_exit_helper)();
103 104
104 105 /*
105 106 * Internal variables for counting number of user thread stop requests posted.
106 107 * They may not be accurate at some special situation such as that a virtually
107 108 * stopped thread starts to run.
108 109 */
109 110 static int num_utstop;
110 111 /*
111 112 * Internal variables for broadcasting an event when all thread stop requests
112 113 * are processed.
113 114 */
114 115 static kcondvar_t utstop_cv;
115 116
116 117 static kmutex_t thread_stop_lock;
117 118 void del_one_utstop(void);
118 119
119 120 /*
120 121 * Send the specified signal to the specified process.
121 122 */
122 123 void
123 124 psignal(proc_t *p, int sig)
124 125 {
125 126 mutex_enter(&p->p_lock);
126 127 sigtoproc(p, NULL, sig);
127 128 mutex_exit(&p->p_lock);
128 129 }
129 130
130 131 /*
131 132 * Send the specified signal to the specified thread.
132 133 */
133 134 void
134 135 tsignal(kthread_t *t, int sig)
135 136 {
136 137 proc_t *p = ttoproc(t);
137 138
138 139 mutex_enter(&p->p_lock);
139 140 sigtoproc(p, t, sig);
140 141 mutex_exit(&p->p_lock);
|
↓ open down ↓ |
67 lines elided |
↑ open up ↑ |
141 142 }
142 143
143 144 int
144 145 signal_is_blocked(kthread_t *t, int sig)
145 146 {
146 147 return (sigismember(&t->t_hold, sig) ||
147 148 (schedctl_sigblock(t) && !sigismember(&cantmask, sig)));
148 149 }
149 150
150 151 /*
152 + * Return true if the signal can safely be ignored.
153 + * That is, if the signal is included in the p_ignore mask and doing so is not
154 + * forbidden by any process branding.
155 + */
156 +static int
157 +sig_ignorable(proc_t *p, klwp_t *lwp, int sig)
158 +{
159 + return (sigismember(&p->p_ignore, sig) && /* sig in ignore mask */
160 + !(PROC_IS_BRANDED(p) && /* allowed by brand */
161 + BROP(p)->b_sig_ignorable != NULL &&
162 + BROP(p)->b_sig_ignorable(p, lwp, sig) == B_FALSE));
163 +
164 +}
165 +
166 +/*
151 167 * Return true if the signal can safely be discarded on generation.
152 168 * That is, if there is no need for the signal on the receiving end.
153 169 * The answer is true if the process is a zombie or
154 170 * if all of these conditions are true:
155 171 * the signal is being ignored
156 172 * the process is single-threaded
157 173 * the signal is not being traced by /proc
158 174 * the signal is not blocked by the process
159 175 * the signal is not being accepted via sigwait()
160 176 */
161 177 static int
162 -sig_discardable(proc_t *p, int sig)
178 +sig_discardable(proc_t *p, kthread_t *tp, int sig)
163 179 {
164 180 kthread_t *t = p->p_tlist;
181 + klwp_t *lwp = (tp == NULL) ? NULL : tp->t_lwp;
165 182
166 183 return (t == NULL || /* if zombie or ... */
167 - (sigismember(&p->p_ignore, sig) && /* signal is ignored */
184 + (sig_ignorable(p, lwp, sig) && /* signal is ignored */
168 185 t->t_forw == t && /* and single-threaded */
169 186 !tracing(p, sig) && /* and no /proc tracing */
170 187 !signal_is_blocked(t, sig) && /* and signal not blocked */
171 188 !sigismember(&t->t_sigwait, sig))); /* and not being accepted */
172 189 }
173 190
174 191 /*
175 192 * Return true if this thread is going to eat this signal soon.
176 193 * Note that, if the signal is SIGKILL, we force stopped threads to be
177 194 * set running (to make SIGKILL be a sure kill), but only if the process
178 195 * is not currently locked by /proc (the P_PR_LOCK flag). Code in /proc
179 196 * relies on the fact that a process will not change shape while P_PR_LOCK
180 197 * is set (it drops and reacquires p->p_lock while leaving P_PR_LOCK set).
181 198 * We wish that we could simply call prbarrier() below, in sigtoproc(), to
182 199 * ensure that the process is not locked by /proc, but prbarrier() drops
183 200 * and reacquires p->p_lock and dropping p->p_lock here would be damaging.
184 201 */
185 202 int
186 203 eat_signal(kthread_t *t, int sig)
187 204 {
188 205 int rval = 0;
189 206 ASSERT(THREAD_LOCK_HELD(t));
190 207
191 208 /*
192 209 * Do not do anything if the target thread has the signal blocked.
|
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
193 210 */
194 211 if (!signal_is_blocked(t, sig)) {
195 212 t->t_sig_check = 1; /* have thread do an issig */
196 213 if (ISWAKEABLE(t) || ISWAITING(t)) {
197 214 setrun_locked(t);
198 215 rval = 1;
199 216 } else if (t->t_state == TS_STOPPED && sig == SIGKILL &&
200 217 !(ttoproc(t)->p_proc_flag & P_PR_LOCK)) {
201 218 ttoproc(t)->p_stopsig = 0;
202 219 t->t_dtrace_stop = 0;
203 - t->t_schedflag |= TS_XSTART | TS_PSTART;
220 + t->t_schedflag |= TS_XSTART | TS_PSTART | TS_BSTART;
204 221 setrun_locked(t);
205 222 } else if (t != curthread && t->t_state == TS_ONPROC) {
206 223 aston(t); /* make it do issig promptly */
207 224 if (t->t_cpu != CPU)
208 225 poke_cpu(t->t_cpu->cpu_id);
209 226 rval = 1;
210 227 } else if (t->t_state == TS_RUN) {
211 228 rval = 1;
212 229 }
213 230 }
214 231
215 232 return (rval);
216 233 }
217 234
218 235 /*
219 236 * Post a signal.
220 237 * If a non-null thread pointer is passed, then post the signal
221 238 * to the thread/lwp, otherwise post the signal to the process.
222 239 */
223 240 void
224 241 sigtoproc(proc_t *p, kthread_t *t, int sig)
225 242 {
226 243 kthread_t *tt;
227 244 int ext = !(curproc->p_flag & SSYS) &&
228 245 (curproc->p_ct_process != p->p_ct_process);
229 246
230 247 ASSERT(MUTEX_HELD(&p->p_lock));
231 248
232 249 /* System processes don't get signals */
233 250 if (sig <= 0 || sig >= NSIG || (p->p_flag & SSYS))
234 251 return;
235 252
236 253 /*
237 254 * Regardless of origin or directedness,
238 255 * SIGKILL kills all lwps in the process immediately
239 256 * and jobcontrol signals affect all lwps in the process.
240 257 */
241 258 if (sig == SIGKILL) {
242 259 p->p_flag |= SKILLED | (ext ? SEXTKILLED : 0);
243 260 t = NULL;
244 261 } else if (sig == SIGCONT) {
245 262 /*
246 263 * The SSCONT flag will remain set until a stopping
247 264 * signal comes in (below). This is harmless.
248 265 */
249 266 p->p_flag |= SSCONT;
250 267 sigdelq(p, NULL, SIGSTOP);
251 268 sigdelq(p, NULL, SIGTSTP);
252 269 sigdelq(p, NULL, SIGTTOU);
253 270 sigdelq(p, NULL, SIGTTIN);
254 271 sigdiffset(&p->p_sig, &stopdefault);
255 272 sigdiffset(&p->p_extsig, &stopdefault);
256 273 p->p_stopsig = 0;
257 274 if ((tt = p->p_tlist) != NULL) {
258 275 do {
259 276 sigdelq(p, tt, SIGSTOP);
260 277 sigdelq(p, tt, SIGTSTP);
261 278 sigdelq(p, tt, SIGTTOU);
262 279 sigdelq(p, tt, SIGTTIN);
263 280 sigdiffset(&tt->t_sig, &stopdefault);
264 281 sigdiffset(&tt->t_extsig, &stopdefault);
265 282 } while ((tt = tt->t_forw) != p->p_tlist);
266 283 }
267 284 if ((tt = p->p_tlist) != NULL) {
268 285 do {
269 286 thread_lock(tt);
270 287 if (tt->t_state == TS_STOPPED &&
271 288 tt->t_whystop == PR_JOBCONTROL) {
272 289 tt->t_schedflag |= TS_XSTART;
273 290 setrun_locked(tt);
274 291 }
275 292 thread_unlock(tt);
276 293 } while ((tt = tt->t_forw) != p->p_tlist);
277 294 }
278 295 } else if (sigismember(&stopdefault, sig)) {
279 296 /*
280 297 * This test has a race condition which we can't fix:
281 298 * By the time the stopping signal is received by
282 299 * the target process/thread, the signal handler
283 300 * and/or the detached state might have changed.
284 301 */
285 302 if (PTOU(p)->u_signal[sig-1] == SIG_DFL &&
286 303 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned))
287 304 p->p_flag &= ~SSCONT;
288 305 sigdelq(p, NULL, SIGCONT);
289 306 sigdelset(&p->p_sig, SIGCONT);
|
↓ open down ↓ |
76 lines elided |
↑ open up ↑ |
290 307 sigdelset(&p->p_extsig, SIGCONT);
291 308 if ((tt = p->p_tlist) != NULL) {
292 309 do {
293 310 sigdelq(p, tt, SIGCONT);
294 311 sigdelset(&tt->t_sig, SIGCONT);
295 312 sigdelset(&tt->t_extsig, SIGCONT);
296 313 } while ((tt = tt->t_forw) != p->p_tlist);
297 314 }
298 315 }
299 316
300 - if (sig_discardable(p, sig)) {
317 + if (sig_discardable(p, t, sig)) {
301 318 DTRACE_PROC3(signal__discard, kthread_t *, p->p_tlist,
302 319 proc_t *, p, int, sig);
303 320 return;
304 321 }
305 322
306 323 if (t != NULL) {
307 324 /*
308 325 * This is a directed signal, wake up the lwp.
309 326 */
310 327 sigaddset(&t->t_sig, sig);
311 328 if (ext)
312 329 sigaddset(&t->t_extsig, sig);
313 330 thread_lock(t);
314 331 (void) eat_signal(t, sig);
315 332 thread_unlock(t);
316 333 DTRACE_PROC2(signal__send, kthread_t *, t, int, sig);
317 334 if (p->p_sigfd != NULL && ((sigfd_proc_state_t *)
318 335 (p->p_sigfd))->sigfd_pollwake_cb != NULL)
319 336 (*((sigfd_proc_state_t *)(p->p_sigfd))->
320 337 sigfd_pollwake_cb)(p, sig);
321 338
322 339 } else if ((tt = p->p_tlist) != NULL) {
323 340 /*
324 341 * Make sure that some lwp that already exists
325 342 * in the process fields the signal soon.
326 343 * Wake up an interruptibly sleeping lwp if necessary.
327 344 * For SIGKILL make all of the lwps see the signal;
328 345 * This is needed to guarantee a sure kill for processes
329 346 * with a mix of realtime and non-realtime threads.
330 347 */
331 348 int su = 0;
332 349
333 350 sigaddset(&p->p_sig, sig);
334 351 if (ext)
335 352 sigaddset(&p->p_extsig, sig);
336 353 do {
337 354 thread_lock(tt);
338 355 if (eat_signal(tt, sig) && sig != SIGKILL) {
339 356 thread_unlock(tt);
340 357 break;
341 358 }
342 359 if (SUSPENDED(tt))
343 360 su++;
344 361 thread_unlock(tt);
345 362 } while ((tt = tt->t_forw) != p->p_tlist);
346 363 /*
347 364 * If the process is deadlocked, make somebody run and die.
348 365 */
349 366 if (sig == SIGKILL && p->p_stat != SIDL &&
350 367 p->p_lwprcnt == 0 && p->p_lwpcnt == su &&
351 368 !(p->p_proc_flag & P_PR_LOCK)) {
352 369 thread_lock(tt);
353 370 p->p_lwprcnt++;
354 371 tt->t_schedflag |= TS_CSTART;
355 372 setrun_locked(tt);
356 373 thread_unlock(tt);
357 374 }
358 375
359 376 DTRACE_PROC2(signal__send, kthread_t *, tt, int, sig);
360 377 if (p->p_sigfd != NULL && ((sigfd_proc_state_t *)
361 378 (p->p_sigfd))->sigfd_pollwake_cb != NULL)
362 379 (*((sigfd_proc_state_t *)(p->p_sigfd))->
363 380 sigfd_pollwake_cb)(p, sig);
364 381 }
365 382 }
366 383
367 384 static int
368 385 isjobstop(int sig)
369 386 {
370 387 proc_t *p = ttoproc(curthread);
371 388
372 389 ASSERT(MUTEX_HELD(&p->p_lock));
373 390
374 391 if (PTOU(curproc)->u_signal[sig-1] == SIG_DFL &&
375 392 sigismember(&stopdefault, sig)) {
376 393 /*
377 394 * If SIGCONT has been posted since we promoted this signal
378 395 * from pending to current, then don't do a jobcontrol stop.
379 396 */
380 397 if (!(p->p_flag & SSCONT) &&
381 398 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned) &&
382 399 curthread != p->p_agenttp) {
383 400 sigqueue_t *sqp;
384 401
385 402 stop(PR_JOBCONTROL, sig);
386 403 mutex_exit(&p->p_lock);
387 404 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
388 405 mutex_enter(&pidlock);
389 406 /*
390 407 * Only the first lwp to continue notifies the parent.
391 408 */
392 409 if (p->p_pidflag & CLDCONT)
393 410 siginfofree(sqp);
394 411 else {
395 412 p->p_pidflag |= CLDCONT;
396 413 p->p_wcode = CLD_CONTINUED;
397 414 p->p_wdata = SIGCONT;
398 415 sigcld(p, sqp);
399 416 }
400 417 mutex_exit(&pidlock);
401 418 mutex_enter(&p->p_lock);
402 419 }
403 420 return (1);
404 421 }
405 422 return (0);
406 423 }
407 424
408 425 /*
409 426 * Returns true if the current process has a signal to process, and
410 427 * the signal is not held. The signal to process is put in p_cursig.
411 428 * This is asked at least once each time a process enters the system
412 429 * (though this can usually be done without actually calling issig by
413 430 * checking the pending signal masks). A signal does not do anything
414 431 * directly to a process; it sets a flag that asks the process to do
415 432 * something to itself.
416 433 *
417 434 * The "why" argument indicates the allowable side-effects of the call:
418 435 *
419 436 * FORREAL: Extract the next pending signal from p_sig into p_cursig;
420 437 * stop the process if a stop has been requested or if a traced signal
421 438 * is pending.
422 439 *
423 440 * JUSTLOOKING: Don't stop the process, just indicate whether or not
424 441 * a signal might be pending (FORREAL is needed to tell for sure).
425 442 *
426 443 * XXX: Changes to the logic in these routines should be propagated
427 444 * to lm_sigispending(). See bug 1201594.
428 445 */
429 446
430 447 static int issig_forreal(void);
431 448 static int issig_justlooking(void);
432 449
433 450 int
434 451 issig(int why)
435 452 {
436 453 ASSERT(why == FORREAL || why == JUSTLOOKING);
437 454
438 455 return ((why == FORREAL)? issig_forreal() : issig_justlooking());
439 456 }
440 457
441 458
442 459 static int
443 460 issig_justlooking(void)
444 461 {
445 462 kthread_t *t = curthread;
446 463 klwp_t *lwp = ttolwp(t);
447 464 proc_t *p = ttoproc(t);
448 465 k_sigset_t set;
449 466
450 467 /*
451 468 * This function answers the question:
452 469 * "Is there any reason to call issig_forreal()?"
453 470 *
454 471 * We have to answer the question w/o grabbing any locks
455 472 * because we are (most likely) being called after we
456 473 * put ourselves on the sleep queue.
457 474 */
458 475
459 476 if (t->t_dtrace_stop | t->t_dtrace_sig)
460 477 return (1);
461 478
462 479 /*
463 480 * Another piece of complexity in this process. When single-stepping a
464 481 * process, we don't want an intervening signal or TP_PAUSE request to
465 482 * suspend the current thread. Otherwise, the controlling process will
466 483 * hang beacuse we will be stopped with TS_PSTART set in t_schedflag.
467 484 * We will trigger any remaining signals when we re-enter the kernel on
468 485 * the single step trap.
469 486 */
470 487 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP)
471 488 return (0);
472 489
473 490 if ((lwp->lwp_asleep && MUSTRETURN(p, t)) ||
474 491 (p->p_flag & (SEXITLWPS|SKILLED)) ||
475 492 (lwp->lwp_nostop == 0 &&
476 493 (p->p_stopsig | (p->p_flag & (SHOLDFORK1|SHOLDWATCH)) |
477 494 (t->t_proc_flag &
478 495 (TP_PRSTOP|TP_HOLDLWP|TP_CHKPT|TP_PAUSE)))) ||
479 496 lwp->lwp_cursig)
480 497 return (1);
481 498
482 499 if (p->p_flag & SVFWAIT)
483 500 return (0);
484 501 set = p->p_sig;
485 502 sigorset(&set, &t->t_sig);
486 503 if (schedctl_sigblock(t)) /* all blockable signals blocked */
487 504 sigandset(&set, &cantmask);
488 505 else
489 506 sigdiffset(&set, &t->t_hold);
|
↓ open down ↓ |
179 lines elided |
↑ open up ↑ |
490 507 if (p->p_flag & SVFORK)
491 508 sigdiffset(&set, &holdvfork);
492 509
493 510 if (!sigisempty(&set)) {
494 511 int sig;
495 512
496 513 for (sig = 1; sig < NSIG; sig++) {
497 514 if (sigismember(&set, sig) &&
498 515 (tracing(p, sig) ||
499 516 sigismember(&t->t_sigwait, sig) ||
500 - !sigismember(&p->p_ignore, sig))) {
517 + !sig_ignorable(p, lwp, sig))) {
501 518 /*
502 519 * Don't promote a signal that will stop
503 520 * the process when lwp_nostop is set.
504 521 */
505 522 if (!lwp->lwp_nostop ||
506 523 PTOU(p)->u_signal[sig-1] != SIG_DFL ||
507 524 !sigismember(&stopdefault, sig))
508 525 return (1);
509 526 }
510 527 }
511 528 }
512 529
513 530 return (0);
514 531 }
515 532
516 533 static int
517 534 issig_forreal(void)
518 535 {
519 536 int sig = 0, ext = 0;
520 537 kthread_t *t = curthread;
521 538 klwp_t *lwp = ttolwp(t);
522 539 proc_t *p = ttoproc(t);
523 540 int toproc = 0;
524 541 int sigcld_found = 0;
525 542 int nostop_break = 0;
526 543
527 544 ASSERT(t->t_state == TS_ONPROC);
528 545
529 546 mutex_enter(&p->p_lock);
530 547 schedctl_finish_sigblock(t);
531 548
532 549 if (t->t_dtrace_stop | t->t_dtrace_sig) {
533 550 if (t->t_dtrace_stop) {
534 551 /*
535 552 * If DTrace's "stop" action has been invoked on us,
536 553 * set TP_PRSTOP.
537 554 */
538 555 t->t_proc_flag |= TP_PRSTOP;
539 556 }
540 557
541 558 if (t->t_dtrace_sig != 0) {
542 559 k_siginfo_t info;
543 560
544 561 /*
545 562 * Post the signal generated as the result of
546 563 * DTrace's "raise" action as a normal signal before
547 564 * the full-fledged signal checking begins.
548 565 */
549 566 bzero(&info, sizeof (info));
550 567 info.si_signo = t->t_dtrace_sig;
551 568 info.si_code = SI_DTRACE;
552 569
553 570 sigaddq(p, NULL, &info, KM_NOSLEEP);
554 571
555 572 t->t_dtrace_sig = 0;
556 573 }
557 574 }
558 575
559 576 for (;;) {
560 577 if (p->p_flag & (SEXITLWPS|SKILLED)) {
561 578 lwp->lwp_cursig = sig = SIGKILL;
562 579 lwp->lwp_extsig = ext = (p->p_flag & SEXTKILLED) != 0;
563 580 t->t_sig_check = 1;
564 581 break;
565 582 }
566 583
567 584 /*
568 585 * Another piece of complexity in this process. When
569 586 * single-stepping a process, we don't want an intervening
570 587 * signal or TP_PAUSE request to suspend the current thread.
571 588 * Otherwise, the controlling process will hang beacuse we will
572 589 * be stopped with TS_PSTART set in t_schedflag. We will
573 590 * trigger any remaining signals when we re-enter the kernel on
574 591 * the single step trap.
575 592 */
576 593 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP) {
577 594 sig = 0;
578 595 break;
579 596 }
580 597
581 598 /*
582 599 * Hold the lwp here for watchpoint manipulation.
583 600 */
584 601 if ((t->t_proc_flag & TP_PAUSE) && !lwp->lwp_nostop) {
585 602 stop(PR_SUSPENDED, SUSPEND_PAUSE);
586 603 continue;
587 604 }
588 605
589 606 if (lwp->lwp_asleep && MUSTRETURN(p, t)) {
590 607 if ((sig = lwp->lwp_cursig) != 0) {
591 608 /*
592 609 * Make sure we call ISSIG() in post_syscall()
593 610 * to re-validate this current signal.
594 611 */
595 612 t->t_sig_check = 1;
596 613 }
597 614 break;
598 615 }
599 616
600 617 /*
601 618 * If the request is PR_CHECKPOINT, ignore the rest of signals
602 619 * or requests. Honor other stop requests or signals later.
603 620 * Go back to top of loop here to check if an exit or hold
604 621 * event has occurred while stopped.
605 622 */
606 623 if ((t->t_proc_flag & TP_CHKPT) && !lwp->lwp_nostop) {
607 624 stop(PR_CHECKPOINT, 0);
608 625 continue;
609 626 }
610 627
611 628 /*
612 629 * Honor SHOLDFORK1, SHOLDWATCH, and TP_HOLDLWP before dealing
613 630 * with signals or /proc. Another lwp is executing fork1(),
614 631 * or is undergoing watchpoint activity (remapping a page),
615 632 * or is executing lwp_suspend() on this lwp.
|
↓ open down ↓ |
105 lines elided |
↑ open up ↑ |
616 633 * Again, go back to top of loop to check if an exit
617 634 * or hold event has occurred while stopped.
618 635 */
619 636 if (((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) ||
620 637 (t->t_proc_flag & TP_HOLDLWP)) && !lwp->lwp_nostop) {
621 638 stop(PR_SUSPENDED, SUSPEND_NORMAL);
622 639 continue;
623 640 }
624 641
625 642 /*
643 + * Allow the brand the chance to alter (or suppress) delivery
644 + * of this signal.
645 + */
646 + if (PROC_IS_BRANDED(p) && BROP(p)->b_issig_stop != NULL) {
647 + /*
648 + * The brand hook will return 0 if it would like
649 + * us to drive on, or -1 if we should restart
650 + * the loop to check other conditions.
651 + */
652 + if (BROP(p)->b_issig_stop(p, lwp) != 0) {
653 + continue;
654 + }
655 + }
656 +
657 + /*
626 658 * Honor requested stop before dealing with the
627 659 * current signal; a debugger may change it.
628 660 * Do not want to go back to loop here since this is a special
629 661 * stop that means: make incremental progress before the next
630 662 * stop. The danger is that returning to top of loop would most
631 663 * likely drop the thread right back here to stop soon after it
632 664 * was continued, violating the incremental progress request.
633 665 */
634 666 if ((t->t_proc_flag & TP_PRSTOP) && !lwp->lwp_nostop)
635 667 stop(PR_REQUESTED, 0);
636 668
637 669 /*
638 670 * If a debugger wants us to take a signal it will have
639 671 * left it in lwp->lwp_cursig. If lwp_cursig has been cleared
640 672 * or if it's being ignored, we continue on looking for another
641 673 * signal. Otherwise we return the specified signal, provided
642 674 * it's not a signal that causes a job control stop.
643 675 *
644 676 * When stopped on PR_JOBCONTROL, there is no current
645 677 * signal; we cancel lwp->lwp_cursig temporarily before
646 678 * calling isjobstop(). The current signal may be reset
647 679 * by a debugger while we are stopped in isjobstop().
648 680 *
|
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
649 681 * If the current thread is accepting the signal
650 682 * (via sigwait(), sigwaitinfo(), or sigtimedwait()),
651 683 * we allow the signal to be accepted, even if it is
652 684 * being ignored, and without causing a job control stop.
653 685 */
654 686 if ((sig = lwp->lwp_cursig) != 0) {
655 687 ext = lwp->lwp_extsig;
656 688 lwp->lwp_cursig = 0;
657 689 lwp->lwp_extsig = 0;
658 690 if (sigismember(&t->t_sigwait, sig) ||
659 - (!sigismember(&p->p_ignore, sig) &&
691 + (!sig_ignorable(p, lwp, sig) &&
660 692 !isjobstop(sig))) {
661 693 if (p->p_flag & (SEXITLWPS|SKILLED)) {
662 694 sig = SIGKILL;
663 695 ext = (p->p_flag & SEXTKILLED) != 0;
664 696 }
665 697 lwp->lwp_cursig = (uchar_t)sig;
666 698 lwp->lwp_extsig = (uchar_t)ext;
667 699 break;
668 700 }
669 701 /*
670 702 * The signal is being ignored or it caused a
671 703 * job-control stop. If another current signal
672 704 * has not been established, return the current
673 705 * siginfo, if any, to the memory manager.
674 706 */
675 707 if (lwp->lwp_cursig == 0 && lwp->lwp_curinfo != NULL) {
676 708 siginfofree(lwp->lwp_curinfo);
677 709 lwp->lwp_curinfo = NULL;
678 710 }
679 711 /*
680 712 * Loop around again in case we were stopped
681 713 * on a job control signal and a /proc stop
682 714 * request was posted or another current signal
683 715 * was established while we were stopped.
684 716 */
685 717 continue;
686 718 }
687 719
688 720 if (p->p_stopsig && !lwp->lwp_nostop &&
689 721 curthread != p->p_agenttp) {
690 722 /*
691 723 * Some lwp in the process has already stopped
692 724 * showing PR_JOBCONTROL. This is a stop in
693 725 * sympathy with the other lwp, even if this
694 726 * lwp is blocking the stopping signal.
695 727 */
696 728 stop(PR_JOBCONTROL, p->p_stopsig);
697 729 continue;
698 730 }
699 731
700 732 /*
|
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
701 733 * Loop on the pending signals until we find a
702 734 * non-held signal that is traced or not ignored.
703 735 * First check the signals pending for the lwp,
704 736 * then the signals pending for the process as a whole.
705 737 */
706 738 for (;;) {
707 739 if ((sig = fsig(&t->t_sig, t)) != 0) {
708 740 toproc = 0;
709 741 if (tracing(p, sig) ||
710 742 sigismember(&t->t_sigwait, sig) ||
711 - !sigismember(&p->p_ignore, sig)) {
743 + !sig_ignorable(p, lwp, sig)) {
712 744 if (sigismember(&t->t_extsig, sig))
713 745 ext = 1;
714 746 break;
715 747 }
716 748 sigdelset(&t->t_sig, sig);
717 749 sigdelset(&t->t_extsig, sig);
718 750 sigdelq(p, t, sig);
719 751 } else if ((sig = fsig(&p->p_sig, t)) != 0) {
720 752 if (sig == SIGCLD)
721 753 sigcld_found = 1;
722 754 toproc = 1;
723 755 if (tracing(p, sig) ||
724 756 sigismember(&t->t_sigwait, sig) ||
725 - !sigismember(&p->p_ignore, sig)) {
757 + !sig_ignorable(p, lwp, sig)) {
726 758 if (sigismember(&p->p_extsig, sig))
727 759 ext = 1;
728 760 break;
729 761 }
730 762 sigdelset(&p->p_sig, sig);
731 763 sigdelset(&p->p_extsig, sig);
732 764 sigdelq(p, NULL, sig);
733 765 } else {
734 766 /* no signal was found */
735 767 break;
736 768 }
737 769 }
738 770
739 771 if (sig == 0) { /* no signal was found */
740 772 if (p->p_flag & (SEXITLWPS|SKILLED)) {
741 773 lwp->lwp_cursig = SIGKILL;
742 774 sig = SIGKILL;
743 775 ext = (p->p_flag & SEXTKILLED) != 0;
744 776 }
745 777 break;
746 778 }
747 779
748 780 /*
749 781 * If we have been informed not to stop (i.e., we are being
750 782 * called from within a network operation), then don't promote
751 783 * the signal at this time, just return the signal number.
752 784 * We will call issig() again later when it is safe.
753 785 *
754 786 * fsig() does not return a jobcontrol stopping signal
755 787 * with a default action of stopping the process if
756 788 * lwp_nostop is set, so we won't be causing a bogus
757 789 * EINTR by this action. (Such a signal is eaten by
758 790 * isjobstop() when we loop around to do final checks.)
759 791 */
760 792 if (lwp->lwp_nostop) {
761 793 nostop_break = 1;
762 794 break;
763 795 }
764 796
765 797 /*
766 798 * Promote the signal from pending to current.
767 799 *
768 800 * Note that sigdeq() will set lwp->lwp_curinfo to NULL
769 801 * if no siginfo_t exists for this signal.
770 802 */
771 803 lwp->lwp_cursig = (uchar_t)sig;
772 804 lwp->lwp_extsig = (uchar_t)ext;
773 805 t->t_sig_check = 1; /* so post_syscall will see signal */
774 806 ASSERT(lwp->lwp_curinfo == NULL);
775 807 sigdeq(p, toproc ? NULL : t, sig, &lwp->lwp_curinfo);
776 808
777 809 if (tracing(p, sig))
778 810 stop(PR_SIGNALLED, sig);
779 811
780 812 /*
781 813 * Loop around to check for requested stop before
782 814 * performing the usual current-signal actions.
783 815 */
784 816 }
785 817
786 818 mutex_exit(&p->p_lock);
787 819
788 820 /*
789 821 * If SIGCLD was dequeued from the process's signal queue,
790 822 * search for other pending SIGCLD's from the list of children.
791 823 */
792 824 if (sigcld_found)
793 825 sigcld_repost();
794 826
795 827 if (sig != 0)
796 828 (void) undo_watch_step(NULL);
797 829
798 830 /*
799 831 * If we have been blocked since the p_lock was dropped off
800 832 * above, then this promoted signal might have been handled
801 833 * already when we were on the way back from sleep queue, so
802 834 * just ignore it.
803 835 * If we have been informed not to stop, just return the signal
804 836 * number. Also see comments above.
805 837 */
806 838 if (!nostop_break) {
807 839 sig = lwp->lwp_cursig;
808 840 }
809 841
810 842 return (sig != 0);
811 843 }
812 844
813 845 /*
814 846 * Return true if the process is currently stopped showing PR_JOBCONTROL.
815 847 * This is true only if all of the process's lwp's are so stopped.
816 848 * If this is asked by one of the lwps in the process, exclude that lwp.
817 849 */
818 850 int
819 851 jobstopped(proc_t *p)
820 852 {
821 853 kthread_t *t;
822 854
823 855 ASSERT(MUTEX_HELD(&p->p_lock));
824 856
825 857 if ((t = p->p_tlist) == NULL)
826 858 return (0);
827 859
828 860 do {
829 861 thread_lock(t);
830 862 /* ignore current, zombie and suspended lwps in the test */
831 863 if (!(t == curthread || t->t_state == TS_ZOMB ||
832 864 SUSPENDED(t)) &&
833 865 (t->t_state != TS_STOPPED ||
834 866 t->t_whystop != PR_JOBCONTROL)) {
835 867 thread_unlock(t);
836 868 return (0);
837 869 }
838 870 thread_unlock(t);
839 871 } while ((t = t->t_forw) != p->p_tlist);
840 872
841 873 return (1);
842 874 }
843 875
844 876 /*
845 877 * Put ourself (curthread) into the stopped state and notify tracers.
846 878 */
847 879 void
848 880 stop(int why, int what)
849 881 {
850 882 kthread_t *t = curthread;
851 883 proc_t *p = ttoproc(t);
852 884 klwp_t *lwp = ttolwp(t);
853 885 kthread_t *tx;
854 886 lwpent_t *lep;
855 887 int procstop;
856 888 int flags = TS_ALLSTART;
857 889 hrtime_t stoptime;
858 890
859 891 /*
860 892 * Can't stop a system process.
861 893 */
862 894 if (p == NULL || lwp == NULL || (p->p_flag & SSYS) || p->p_as == &kas)
863 895 return;
864 896
865 897 ASSERT(MUTEX_HELD(&p->p_lock));
866 898
867 899 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) {
868 900 /*
869 901 * Don't stop an lwp with SIGKILL pending.
870 902 * Don't stop if the process or lwp is exiting.
871 903 */
872 904 if (lwp->lwp_cursig == SIGKILL ||
873 905 sigismember(&t->t_sig, SIGKILL) ||
874 906 sigismember(&p->p_sig, SIGKILL) ||
875 907 (t->t_proc_flag & TP_LWPEXIT) ||
876 908 (p->p_flag & (SEXITLWPS|SKILLED))) {
877 909 p->p_stopsig = 0;
878 910 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP);
879 911 return;
880 912 }
881 913 }
882 914
883 915 /*
884 916 * Make sure we don't deadlock on a recursive call to prstop().
885 917 * prstop() sets the lwp_nostop flag.
886 918 */
887 919 if (lwp->lwp_nostop)
888 920 return;
889 921
890 922 /*
891 923 * Make sure the lwp is in an orderly state for inspection
892 924 * by a debugger through /proc or for dumping via core().
893 925 */
894 926 schedctl_finish_sigblock(t);
895 927 t->t_proc_flag |= TP_STOPPING; /* must set before dropping p_lock */
896 928 mutex_exit(&p->p_lock);
897 929 stoptime = gethrtime();
898 930 prstop(why, what);
899 931 (void) undo_watch_step(NULL);
900 932 mutex_enter(&p->p_lock);
901 933 ASSERT(t->t_state == TS_ONPROC);
902 934
903 935 switch (why) {
904 936 case PR_CHECKPOINT:
905 937 /*
906 938 * The situation may have changed since we dropped
907 939 * and reacquired p->p_lock. Double-check now
908 940 * whether we should stop or not.
909 941 */
910 942 if (!(t->t_proc_flag & TP_CHKPT)) {
911 943 t->t_proc_flag &= ~TP_STOPPING;
912 944 return;
913 945 }
914 946 t->t_proc_flag &= ~TP_CHKPT;
915 947 flags &= ~TS_RESUME;
916 948 break;
917 949
918 950 case PR_JOBCONTROL:
919 951 ASSERT(what == SIGSTOP || what == SIGTSTP ||
920 952 what == SIGTTIN || what == SIGTTOU);
921 953 flags &= ~TS_XSTART;
922 954 break;
923 955
924 956 case PR_SUSPENDED:
925 957 ASSERT(what == SUSPEND_NORMAL || what == SUSPEND_PAUSE);
926 958 /*
927 959 * The situation may have changed since we dropped
928 960 * and reacquired p->p_lock. Double-check now
929 961 * whether we should stop or not.
930 962 */
931 963 if (what == SUSPEND_PAUSE) {
932 964 if (!(t->t_proc_flag & TP_PAUSE)) {
933 965 t->t_proc_flag &= ~TP_STOPPING;
934 966 return;
935 967 }
936 968 flags &= ~TS_UNPAUSE;
937 969 } else {
938 970 if (!((t->t_proc_flag & TP_HOLDLWP) ||
939 971 (p->p_flag & (SHOLDFORK|SHOLDFORK1|SHOLDWATCH)))) {
940 972 t->t_proc_flag &= ~TP_STOPPING;
941 973 return;
942 974 }
943 975 /*
944 976 * If SHOLDFORK is in effect and we are stopping
945 977 * while asleep (not at the top of the stack),
946 978 * we return now to allow the hold to take effect
|
↓ open down ↓ |
211 lines elided |
↑ open up ↑ |
947 979 * when we reach the top of the kernel stack.
948 980 */
949 981 if (lwp->lwp_asleep && (p->p_flag & SHOLDFORK)) {
950 982 t->t_proc_flag &= ~TP_STOPPING;
951 983 return;
952 984 }
953 985 flags &= ~TS_CSTART;
954 986 }
955 987 break;
956 988
989 + case PR_BRAND:
990 + /*
991 + * We have been stopped by the brand code for a brand-private
992 + * reason. This is an asynchronous stop affecting only this
993 + * LWP.
994 + */
995 + VERIFY(PROC_IS_BRANDED(p));
996 + flags &= ~TS_BSTART;
997 + break;
998 +
957 999 default: /* /proc stop */
958 1000 flags &= ~TS_PSTART;
959 1001 /*
960 1002 * Do synchronous stop unless the async-stop flag is set.
961 1003 * If why is PR_REQUESTED and t->t_dtrace_stop flag is set,
962 1004 * then no debugger is present and we also do synchronous stop.
963 1005 */
964 1006 if ((why != PR_REQUESTED || t->t_dtrace_stop) &&
965 1007 !(p->p_proc_flag & P_PR_ASYNC)) {
966 1008 int notify;
967 1009
968 1010 for (tx = t->t_forw; tx != t; tx = tx->t_forw) {
969 1011 notify = 0;
970 1012 thread_lock(tx);
971 1013 if (ISTOPPED(tx) ||
972 1014 (tx->t_proc_flag & TP_PRSTOP)) {
973 1015 thread_unlock(tx);
974 1016 continue;
975 1017 }
976 1018 tx->t_proc_flag |= TP_PRSTOP;
977 1019 tx->t_sig_check = 1;
978 1020 if (tx->t_state == TS_SLEEP &&
979 1021 (tx->t_flag & T_WAKEABLE)) {
980 1022 /*
981 1023 * Don't actually wake it up if it's
982 1024 * in one of the lwp_*() syscalls.
983 1025 * Mark it virtually stopped and
984 1026 * notify /proc waiters (below).
985 1027 */
986 1028 if (tx->t_wchan0 == NULL)
987 1029 setrun_locked(tx);
988 1030 else {
989 1031 tx->t_proc_flag |= TP_PRVSTOP;
990 1032 tx->t_stoptime = stoptime;
991 1033 notify = 1;
992 1034 }
993 1035 }
994 1036
995 1037 /* Move waiting thread to run queue */
996 1038 if (ISWAITING(tx))
997 1039 setrun_locked(tx);
998 1040
999 1041 /*
1000 1042 * force the thread into the kernel
1001 1043 * if it is not already there.
1002 1044 */
1003 1045 if (tx->t_state == TS_ONPROC &&
1004 1046 tx->t_cpu != CPU)
1005 1047 poke_cpu(tx->t_cpu->cpu_id);
1006 1048 thread_unlock(tx);
1007 1049 lep = p->p_lwpdir[tx->t_dslot].ld_entry;
1008 1050 if (notify && lep->le_trace)
1009 1051 prnotify(lep->le_trace);
1010 1052 }
1011 1053 /*
1012 1054 * We do this just in case one of the threads we asked
1013 1055 * to stop is in holdlwps() (called from cfork()) or
1014 1056 * lwp_suspend().
1015 1057 */
1016 1058 cv_broadcast(&p->p_holdlwps);
1017 1059 }
1018 1060 break;
1019 1061 }
1020 1062
1021 1063 t->t_stoptime = stoptime;
1022 1064
1023 1065 if (why == PR_JOBCONTROL || (why == PR_SUSPENDED && p->p_stopsig)) {
1024 1066 /*
1025 1067 * Determine if the whole process is jobstopped.
1026 1068 */
1027 1069 if (jobstopped(p)) {
1028 1070 sigqueue_t *sqp;
1029 1071 int sig;
1030 1072
1031 1073 if ((sig = p->p_stopsig) == 0)
1032 1074 p->p_stopsig = (uchar_t)(sig = what);
1033 1075 mutex_exit(&p->p_lock);
1034 1076 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
1035 1077 mutex_enter(&pidlock);
1036 1078 /*
1037 1079 * The last lwp to stop notifies the parent.
1038 1080 * Turn off the CLDCONT flag now so the first
1039 1081 * lwp to continue knows what to do.
1040 1082 */
1041 1083 p->p_pidflag &= ~CLDCONT;
1042 1084 p->p_wcode = CLD_STOPPED;
1043 1085 p->p_wdata = sig;
1044 1086 sigcld(p, sqp);
1045 1087 /*
1046 1088 * Grab p->p_lock before releasing pidlock so the
1047 1089 * parent and the child don't have a race condition.
1048 1090 */
1049 1091 mutex_enter(&p->p_lock);
1050 1092 mutex_exit(&pidlock);
1051 1093 p->p_stopsig = 0;
1052 1094 } else if (why == PR_JOBCONTROL && p->p_stopsig == 0) {
1053 1095 /*
1054 1096 * Set p->p_stopsig and wake up sleeping lwps
1055 1097 * so they will stop in sympathy with this lwp.
1056 1098 */
1057 1099 p->p_stopsig = (uchar_t)what;
|
↓ open down ↓ |
91 lines elided |
↑ open up ↑ |
1058 1100 pokelwps(p);
1059 1101 /*
1060 1102 * We do this just in case one of the threads we asked
1061 1103 * to stop is in holdlwps() (called from cfork()) or
1062 1104 * lwp_suspend().
1063 1105 */
1064 1106 cv_broadcast(&p->p_holdlwps);
1065 1107 }
1066 1108 }
1067 1109
1068 - if (why != PR_JOBCONTROL && why != PR_CHECKPOINT) {
1110 + if (why != PR_JOBCONTROL && why != PR_CHECKPOINT && why != PR_BRAND) {
1069 1111 /*
1070 1112 * Do process-level notification when all lwps are
1071 1113 * either stopped on events of interest to /proc
1072 1114 * or are stopped showing PR_SUSPENDED or are zombies.
1073 1115 */
1074 1116 procstop = 1;
1075 1117 for (tx = t->t_forw; procstop && tx != t; tx = tx->t_forw) {
1076 1118 if (VSTOPPED(tx))
1077 1119 continue;
1078 1120 thread_lock(tx);
1079 1121 switch (tx->t_state) {
1080 1122 case TS_ZOMB:
1081 1123 break;
1082 1124 case TS_STOPPED:
1083 1125 /* neither ISTOPPED nor SUSPENDED? */
1084 1126 if ((tx->t_schedflag &
1085 1127 (TS_CSTART | TS_UNPAUSE | TS_PSTART)) ==
1086 1128 (TS_CSTART | TS_UNPAUSE | TS_PSTART))
1087 1129 procstop = 0;
1088 1130 break;
1089 1131 case TS_SLEEP:
1090 1132 /* not paused for watchpoints? */
1091 1133 if (!(tx->t_flag & T_WAKEABLE) ||
1092 1134 tx->t_wchan0 == NULL ||
1093 1135 !(tx->t_proc_flag & TP_PAUSE))
1094 1136 procstop = 0;
1095 1137 break;
1096 1138 default:
1097 1139 procstop = 0;
1098 1140 break;
1099 1141 }
1100 1142 thread_unlock(tx);
1101 1143 }
1102 1144 if (procstop) {
1103 1145 /* there must not be any remapped watched pages now */
1104 1146 ASSERT(p->p_mapcnt == 0);
1105 1147 if (p->p_proc_flag & P_PR_PTRACE) {
1106 1148 /* ptrace() compatibility */
1107 1149 mutex_exit(&p->p_lock);
1108 1150 mutex_enter(&pidlock);
1109 1151 p->p_wcode = CLD_TRAPPED;
1110 1152 p->p_wdata = (why == PR_SIGNALLED)?
1111 1153 what : SIGTRAP;
1112 1154 cv_broadcast(&p->p_parent->p_cv);
1113 1155 /*
1114 1156 * Grab p->p_lock before releasing pidlock so
1115 1157 * parent and child don't have a race condition.
1116 1158 */
1117 1159 mutex_enter(&p->p_lock);
1118 1160 mutex_exit(&pidlock);
1119 1161 }
1120 1162 if (p->p_trace) /* /proc */
1121 1163 prnotify(p->p_trace);
1122 1164 cv_broadcast(&pr_pid_cv[p->p_slot]); /* pauselwps() */
1123 1165 cv_broadcast(&p->p_holdlwps); /* holdwatch() */
1124 1166 }
1125 1167 if (why != PR_SUSPENDED) {
1126 1168 lep = p->p_lwpdir[t->t_dslot].ld_entry;
1127 1169 if (lep->le_trace) /* /proc */
1128 1170 prnotify(lep->le_trace);
1129 1171 /*
1130 1172 * Special notification for creation of the agent lwp.
1131 1173 */
1132 1174 if (t == p->p_agenttp &&
1133 1175 (t->t_proc_flag & TP_PRSTOP) &&
1134 1176 p->p_trace)
1135 1177 prnotify(p->p_trace);
1136 1178 /*
1137 1179 * The situation may have changed since we dropped
1138 1180 * and reacquired p->p_lock. Double-check now
1139 1181 * whether we should stop or not.
1140 1182 */
1141 1183 if (!(t->t_proc_flag & TP_STOPPING)) {
1142 1184 if (t->t_proc_flag & TP_PRSTOP)
1143 1185 t->t_proc_flag |= TP_STOPPING;
1144 1186 }
1145 1187 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP);
1146 1188 prnostep(lwp);
1147 1189 }
1148 1190 }
1149 1191
1150 1192 if (why == PR_SUSPENDED) {
1151 1193
1152 1194 /*
1153 1195 * We always broadcast in the case of SUSPEND_PAUSE. This is
1154 1196 * because checks for TP_PAUSE take precedence over checks for
1155 1197 * SHOLDWATCH. If a thread is trying to stop because of
1156 1198 * SUSPEND_PAUSE and tries to do a holdwatch(), it will be
1157 1199 * waiting for the rest of the threads to enter a stopped state.
1158 1200 * If we are stopping for a SUSPEND_PAUSE, we may be the last
1159 1201 * lwp and not know it, so broadcast just in case.
1160 1202 */
1161 1203 if (what == SUSPEND_PAUSE ||
1162 1204 --p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP))
1163 1205 cv_broadcast(&p->p_holdlwps);
|
↓ open down ↓ |
85 lines elided |
↑ open up ↑ |
1164 1206
1165 1207 }
1166 1208
1167 1209 /*
1168 1210 * Need to do this here (rather than after the thread is officially
1169 1211 * stopped) because we can't call mutex_enter from a stopped thread.
1170 1212 */
1171 1213 if (why == PR_CHECKPOINT)
1172 1214 del_one_utstop();
1173 1215
1216 + /*
1217 + * Allow the brand to post notification of this stop condition.
1218 + */
1219 + if (PROC_IS_BRANDED(p) && BROP(p)->b_stop_notify != NULL) {
1220 + BROP(p)->b_stop_notify(p, lwp, why, what);
1221 + }
1222 +
1174 1223 thread_lock(t);
1175 1224 ASSERT((t->t_schedflag & TS_ALLSTART) == 0);
1176 1225 t->t_schedflag |= flags;
1177 1226 t->t_whystop = (short)why;
1178 1227 t->t_whatstop = (short)what;
1179 1228 CL_STOP(t, why, what);
1180 1229 (void) new_mstate(t, LMS_STOPPED);
1181 1230 thread_stop(t); /* set stop state and drop lock */
1182 1231
1183 1232 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) {
1184 1233 /*
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
1185 1234 * We may have gotten a SIGKILL or a SIGCONT when
1186 1235 * we released p->p_lock; make one last check.
1187 1236 * Also check for a /proc run-on-last-close.
1188 1237 */
1189 1238 if (sigismember(&t->t_sig, SIGKILL) ||
1190 1239 sigismember(&p->p_sig, SIGKILL) ||
1191 1240 (t->t_proc_flag & TP_LWPEXIT) ||
1192 1241 (p->p_flag & (SEXITLWPS|SKILLED))) {
1193 1242 p->p_stopsig = 0;
1194 1243 thread_lock(t);
1195 - t->t_schedflag |= TS_XSTART | TS_PSTART;
1244 + t->t_schedflag |= TS_XSTART | TS_PSTART | TS_BSTART;
1196 1245 setrun_locked(t);
1197 1246 thread_unlock_nopreempt(t);
1198 1247 } else if (why == PR_JOBCONTROL) {
1199 1248 if (p->p_flag & SSCONT) {
1200 1249 /*
1201 1250 * This resulted from a SIGCONT posted
1202 1251 * while we were not holding p->p_lock.
1203 1252 */
1204 1253 p->p_stopsig = 0;
1205 1254 thread_lock(t);
1206 1255 t->t_schedflag |= TS_XSTART;
1207 1256 setrun_locked(t);
1208 1257 thread_unlock_nopreempt(t);
1209 1258 }
1210 1259 } else if (!(t->t_proc_flag & TP_STOPPING)) {
1211 1260 /*
1212 1261 * This resulted from a /proc run-on-last-close.
1213 1262 */
1214 1263 thread_lock(t);
1215 1264 t->t_schedflag |= TS_PSTART;
1216 1265 setrun_locked(t);
1217 1266 thread_unlock_nopreempt(t);
1218 1267 }
1219 1268 }
1220 1269
1221 1270 t->t_proc_flag &= ~TP_STOPPING;
1222 1271 mutex_exit(&p->p_lock);
1223 1272
1224 1273 swtch();
1225 1274 setallwatch(); /* reestablish any watchpoints set while stopped */
1226 1275 mutex_enter(&p->p_lock);
1227 1276 prbarrier(p); /* barrier against /proc locking */
1228 1277 }
1229 1278
1230 1279 /* Interface for resetting user thread stop count. */
1231 1280 void
1232 1281 utstop_init(void)
1233 1282 {
1234 1283 mutex_enter(&thread_stop_lock);
1235 1284 num_utstop = 0;
1236 1285 mutex_exit(&thread_stop_lock);
1237 1286 }
1238 1287
1239 1288 /* Interface for registering a user thread stop request. */
1240 1289 void
1241 1290 add_one_utstop(void)
1242 1291 {
1243 1292 mutex_enter(&thread_stop_lock);
1244 1293 num_utstop++;
1245 1294 mutex_exit(&thread_stop_lock);
1246 1295 }
1247 1296
1248 1297 /* Interface for cancelling a user thread stop request */
1249 1298 void
1250 1299 del_one_utstop(void)
1251 1300 {
1252 1301 mutex_enter(&thread_stop_lock);
1253 1302 num_utstop--;
1254 1303 if (num_utstop == 0)
1255 1304 cv_broadcast(&utstop_cv);
1256 1305 mutex_exit(&thread_stop_lock);
1257 1306 }
1258 1307
1259 1308 /* Interface to wait for all user threads to be stopped */
1260 1309 void
1261 1310 utstop_timedwait(clock_t ticks)
1262 1311 {
1263 1312 mutex_enter(&thread_stop_lock);
1264 1313 if (num_utstop > 0)
1265 1314 (void) cv_reltimedwait(&utstop_cv, &thread_stop_lock, ticks,
1266 1315 TR_CLOCK_TICK);
1267 1316 mutex_exit(&thread_stop_lock);
1268 1317 }
1269 1318
1270 1319 /*
1271 1320 * Perform the action specified by the current signal.
1272 1321 * The usual sequence is:
1273 1322 * if (issig())
1274 1323 * psig();
1275 1324 * The signal bit has already been cleared by issig(),
1276 1325 * the current signal number has been stored in lwp_cursig,
1277 1326 * and the current siginfo is now referenced by lwp_curinfo.
1278 1327 */
1279 1328 void
1280 1329 psig(void)
1281 1330 {
1282 1331 kthread_t *t = curthread;
1283 1332 proc_t *p = ttoproc(t);
1284 1333 klwp_t *lwp = ttolwp(t);
1285 1334 void (*func)();
1286 1335 int sig, rc, code, ext;
1287 1336 pid_t pid = -1;
1288 1337 id_t ctid = 0;
1289 1338 zoneid_t zoneid = -1;
1290 1339 sigqueue_t *sqp = NULL;
1291 1340 uint32_t auditing = AU_AUDITING();
1292 1341
1293 1342 mutex_enter(&p->p_lock);
1294 1343 schedctl_finish_sigblock(t);
1295 1344 code = CLD_KILLED;
1296 1345
1297 1346 if (p->p_flag & SEXITLWPS) {
1298 1347 lwp_exit();
1299 1348 return; /* not reached */
1300 1349 }
1301 1350 sig = lwp->lwp_cursig;
1302 1351 ext = lwp->lwp_extsig;
1303 1352
1304 1353 ASSERT(sig < NSIG);
1305 1354
1306 1355 /*
1307 1356 * Re-check lwp_cursig after we acquire p_lock. Since p_lock was
1308 1357 * dropped between issig() and psig(), a debugger may have cleared
1309 1358 * lwp_cursig via /proc in the intervening window.
1310 1359 */
1311 1360 if (sig == 0) {
1312 1361 if (lwp->lwp_curinfo) {
1313 1362 siginfofree(lwp->lwp_curinfo);
1314 1363 lwp->lwp_curinfo = NULL;
1315 1364 }
1316 1365 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */
1317 1366 t->t_flag &= ~T_TOMASK;
1318 1367 t->t_hold = lwp->lwp_sigoldmask;
1319 1368 }
|
↓ open down ↓ |
114 lines elided |
↑ open up ↑ |
1320 1369 mutex_exit(&p->p_lock);
1321 1370 return;
1322 1371 }
1323 1372 func = PTOU(curproc)->u_signal[sig-1];
1324 1373
1325 1374 /*
1326 1375 * The signal disposition could have changed since we promoted
1327 1376 * this signal from pending to current (we dropped p->p_lock).
1328 1377 * This can happen only in a multi-threaded process.
1329 1378 */
1330 - if (sigismember(&p->p_ignore, sig) ||
1379 + if (sig_ignorable(p, lwp, sig) ||
1331 1380 (func == SIG_DFL && sigismember(&stopdefault, sig))) {
1332 1381 lwp->lwp_cursig = 0;
1333 1382 lwp->lwp_extsig = 0;
1334 1383 if (lwp->lwp_curinfo) {
1335 1384 siginfofree(lwp->lwp_curinfo);
1336 1385 lwp->lwp_curinfo = NULL;
1337 1386 }
1338 1387 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */
1339 1388 t->t_flag &= ~T_TOMASK;
1340 1389 t->t_hold = lwp->lwp_sigoldmask;
1341 1390 }
1342 1391 mutex_exit(&p->p_lock);
1343 1392 return;
1344 1393 }
1345 1394
1346 1395 /*
1347 1396 * We check lwp_curinfo first since pr_setsig can actually
1348 1397 * stuff a sigqueue_t there for SIGKILL.
1349 1398 */
1350 1399 if (lwp->lwp_curinfo) {
1351 1400 sqp = lwp->lwp_curinfo;
1352 1401 } else if (sig == SIGKILL && p->p_killsqp) {
1353 1402 sqp = p->p_killsqp;
1354 1403 }
1355 1404
1356 1405 if (sqp != NULL) {
1357 1406 if (SI_FROMUSER(&sqp->sq_info)) {
1358 1407 pid = sqp->sq_info.si_pid;
1359 1408 ctid = sqp->sq_info.si_ctid;
1360 1409 zoneid = sqp->sq_info.si_zoneid;
1361 1410 }
1362 1411 /*
1363 1412 * If we have a sigqueue_t, its sq_external value
1364 1413 * trumps the lwp_extsig value. It is theoretically
1365 1414 * possible to make lwp_extsig reflect reality, but it
1366 1415 * would unnecessarily complicate things elsewhere.
1367 1416 */
1368 1417 ext = sqp->sq_external;
1369 1418 }
1370 1419
1371 1420 if (func == SIG_DFL) {
1372 1421 mutex_exit(&p->p_lock);
1373 1422 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *,
1374 1423 NULL, void (*)(void), func);
1375 1424 } else {
1376 1425 k_siginfo_t *sip = NULL;
1377 1426
1378 1427 /*
1379 1428 * If DTrace user-land tracing is active, give DTrace a
1380 1429 * chance to defer the signal until after tracing is
1381 1430 * complete.
1382 1431 */
1383 1432 if (t->t_dtrace_on && dtrace_safe_defer_signal()) {
1384 1433 mutex_exit(&p->p_lock);
1385 1434 return;
1386 1435 }
1387 1436
1388 1437 /*
1389 1438 * save siginfo pointer here, in case the
1390 1439 * the signal's reset bit is on
1391 1440 *
1392 1441 * The presence of a current signal prevents paging
1393 1442 * from succeeding over a network. We copy the current
1394 1443 * signal information to the side and cancel the current
1395 1444 * signal so that sendsig() will succeed.
1396 1445 */
1397 1446 if (sigismember(&p->p_siginfo, sig)) {
1398 1447 sip = &lwp->lwp_siginfo;
1399 1448 if (sqp) {
1400 1449 bcopy(&sqp->sq_info, sip, sizeof (*sip));
1401 1450 /*
1402 1451 * If we were interrupted out of a system call
1403 1452 * due to pthread_cancel(), inform libc.
1404 1453 */
1405 1454 if (sig == SIGCANCEL &&
1406 1455 sip->si_code == SI_LWP &&
1407 1456 t->t_sysnum != 0)
1408 1457 schedctl_cancel_eintr();
1409 1458 } else if (sig == SIGPROF && sip->si_signo == SIGPROF &&
1410 1459 t->t_rprof != NULL && t->t_rprof->rp_anystate) {
1411 1460 /* EMPTY */;
1412 1461 } else {
1413 1462 bzero(sip, sizeof (*sip));
1414 1463 sip->si_signo = sig;
1415 1464 sip->si_code = SI_NOINFO;
1416 1465 }
1417 1466 }
1418 1467
1419 1468 if (t->t_flag & T_TOMASK)
1420 1469 t->t_flag &= ~T_TOMASK;
1421 1470 else
1422 1471 lwp->lwp_sigoldmask = t->t_hold;
1423 1472 sigorset(&t->t_hold, &PTOU(curproc)->u_sigmask[sig-1]);
1424 1473 if (!sigismember(&PTOU(curproc)->u_signodefer, sig))
1425 1474 sigaddset(&t->t_hold, sig);
1426 1475 if (sigismember(&PTOU(curproc)->u_sigresethand, sig))
1427 1476 setsigact(sig, SIG_DFL, &nullsmask, 0);
1428 1477
1429 1478 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *,
1430 1479 sip, void (*)(void), func);
1431 1480
1432 1481 lwp->lwp_cursig = 0;
1433 1482 lwp->lwp_extsig = 0;
1434 1483 if (lwp->lwp_curinfo) {
1435 1484 /* p->p_killsqp is freed by freeproc */
1436 1485 siginfofree(lwp->lwp_curinfo);
1437 1486 lwp->lwp_curinfo = NULL;
1438 1487 }
1439 1488 mutex_exit(&p->p_lock);
1440 1489 lwp->lwp_ru.nsignals++;
1441 1490
1442 1491 if (p->p_model == DATAMODEL_NATIVE)
1443 1492 rc = sendsig(sig, sip, func);
1444 1493 #ifdef _SYSCALL32_IMPL
1445 1494 else
1446 1495 rc = sendsig32(sig, sip, func);
1447 1496 #endif /* _SYSCALL32_IMPL */
1448 1497 if (rc)
1449 1498 return;
1450 1499 sig = lwp->lwp_cursig = SIGSEGV;
1451 1500 ext = 0; /* lwp_extsig was set above */
1452 1501 pid = -1;
1453 1502 ctid = 0;
1454 1503 }
1455 1504
1456 1505 if (sigismember(&coredefault, sig)) {
1457 1506 /*
1458 1507 * Terminate all LWPs but don't discard them.
1459 1508 * If another lwp beat us to the punch by calling exit(),
1460 1509 * evaporate now.
1461 1510 */
1462 1511 proc_is_exiting(p);
1463 1512 if (exitlwps(1) != 0) {
1464 1513 mutex_enter(&p->p_lock);
1465 1514 lwp_exit();
1466 1515 }
1467 1516 /* if we got a SIGKILL from anywhere, no core dump */
1468 1517 if (p->p_flag & SKILLED) {
1469 1518 sig = SIGKILL;
1470 1519 ext = (p->p_flag & SEXTKILLED) != 0;
1471 1520 } else {
1472 1521 if (auditing) /* audit core dump */
1473 1522 audit_core_start(sig);
1474 1523 if (core(sig, ext) == 0)
1475 1524 code = CLD_DUMPED;
1476 1525 if (auditing) /* audit core dump */
1477 1526 audit_core_finish(code);
1478 1527 }
1479 1528 }
1480 1529
1481 1530 /*
1482 1531 * Generate a contract event once if the process is killed
1483 1532 * by a signal.
1484 1533 */
1485 1534 if (ext) {
1486 1535 proc_is_exiting(p);
1487 1536 if (exitlwps(0) != 0) {
1488 1537 mutex_enter(&p->p_lock);
1489 1538 lwp_exit();
1490 1539 }
1491 1540 contract_process_sig(p->p_ct_process, p, sig, pid, ctid,
1492 1541 zoneid);
1493 1542 }
1494 1543
1495 1544 exit(code, sig);
1496 1545 }
1497 1546
1498 1547 /*
1499 1548 * Find next unheld signal in ssp for thread t.
1500 1549 */
1501 1550 int
1502 1551 fsig(k_sigset_t *ssp, kthread_t *t)
1503 1552 {
1504 1553 proc_t *p = ttoproc(t);
1505 1554 user_t *up = PTOU(p);
1506 1555 int i;
1507 1556 k_sigset_t temp;
1508 1557
1509 1558 ASSERT(MUTEX_HELD(&p->p_lock));
1510 1559
1511 1560 /*
1512 1561 * Don't promote any signals for the parent of a vfork()d
1513 1562 * child that hasn't yet released the parent's memory.
1514 1563 */
1515 1564 if (p->p_flag & SVFWAIT)
1516 1565 return (0);
1517 1566
1518 1567 temp = *ssp;
1519 1568 sigdiffset(&temp, &t->t_hold);
1520 1569
1521 1570 /*
1522 1571 * Don't promote stopping signals (except SIGSTOP) for a child
1523 1572 * of vfork() that hasn't yet released the parent's memory.
1524 1573 */
1525 1574 if (p->p_flag & SVFORK)
1526 1575 sigdiffset(&temp, &holdvfork);
1527 1576
1528 1577 /*
1529 1578 * Don't promote a signal that will stop
1530 1579 * the process when lwp_nostop is set.
1531 1580 */
1532 1581 if (ttolwp(t)->lwp_nostop) {
1533 1582 sigdelset(&temp, SIGSTOP);
1534 1583 if (!p->p_pgidp->pid_pgorphaned) {
1535 1584 if (up->u_signal[SIGTSTP-1] == SIG_DFL)
1536 1585 sigdelset(&temp, SIGTSTP);
1537 1586 if (up->u_signal[SIGTTIN-1] == SIG_DFL)
1538 1587 sigdelset(&temp, SIGTTIN);
1539 1588 if (up->u_signal[SIGTTOU-1] == SIG_DFL)
1540 1589 sigdelset(&temp, SIGTTOU);
1541 1590 }
1542 1591 }
1543 1592
1544 1593 /*
1545 1594 * Choose SIGKILL and SIGPROF before all other pending signals.
1546 1595 * The rest are promoted in signal number order.
1547 1596 */
1548 1597 if (sigismember(&temp, SIGKILL))
1549 1598 return (SIGKILL);
1550 1599 if (sigismember(&temp, SIGPROF))
1551 1600 return (SIGPROF);
1552 1601
1553 1602 for (i = 0; i < sizeof (temp) / sizeof (temp.__sigbits[0]); i++) {
1554 1603 if (temp.__sigbits[i])
1555 1604 return ((i * NBBY * sizeof (temp.__sigbits[0])) +
1556 1605 lowbit(temp.__sigbits[i]));
1557 1606 }
1558 1607
1559 1608 return (0);
1560 1609 }
1561 1610
1562 1611 void
1563 1612 setsigact(int sig, void (*disp)(), const k_sigset_t *mask, int flags)
1564 1613 {
1565 1614 proc_t *p = ttoproc(curthread);
1566 1615 kthread_t *t;
1567 1616
1568 1617 ASSERT(MUTEX_HELD(&p->p_lock));
1569 1618
1570 1619 PTOU(curproc)->u_signal[sig - 1] = disp;
1571 1620
1572 1621 /*
1573 1622 * Honor the SA_SIGINFO flag if the signal is being caught.
1574 1623 * Force the SA_SIGINFO flag if the signal is not being caught.
1575 1624 * This is necessary to make sigqueue() and sigwaitinfo() work
1576 1625 * properly together when the signal is set to default or is
1577 1626 * being temporarily ignored.
1578 1627 */
1579 1628 if ((flags & SA_SIGINFO) || disp == SIG_DFL || disp == SIG_IGN)
1580 1629 sigaddset(&p->p_siginfo, sig);
1581 1630 else
1582 1631 sigdelset(&p->p_siginfo, sig);
1583 1632
1584 1633 if (disp != SIG_DFL && disp != SIG_IGN) {
1585 1634 sigdelset(&p->p_ignore, sig);
1586 1635 PTOU(curproc)->u_sigmask[sig - 1] = *mask;
1587 1636 if (!sigismember(&cantreset, sig)) {
1588 1637 if (flags & SA_RESETHAND)
1589 1638 sigaddset(&PTOU(curproc)->u_sigresethand, sig);
1590 1639 else
1591 1640 sigdelset(&PTOU(curproc)->u_sigresethand, sig);
1592 1641 }
1593 1642 if (flags & SA_NODEFER)
1594 1643 sigaddset(&PTOU(curproc)->u_signodefer, sig);
1595 1644 else
1596 1645 sigdelset(&PTOU(curproc)->u_signodefer, sig);
1597 1646 if (flags & SA_RESTART)
1598 1647 sigaddset(&PTOU(curproc)->u_sigrestart, sig);
1599 1648 else
1600 1649 sigdelset(&PTOU(curproc)->u_sigrestart, sig);
1601 1650 if (flags & SA_ONSTACK)
1602 1651 sigaddset(&PTOU(curproc)->u_sigonstack, sig);
1603 1652 else
1604 1653 sigdelset(&PTOU(curproc)->u_sigonstack, sig);
1605 1654 } else if (disp == SIG_IGN ||
1606 1655 (disp == SIG_DFL && sigismember(&ignoredefault, sig))) {
1607 1656 /*
1608 1657 * Setting the signal action to SIG_IGN results in the
1609 1658 * discarding of all pending signals of that signal number.
1610 1659 * Setting the signal action to SIG_DFL does the same *only*
1611 1660 * if the signal's default behavior is to be ignored.
1612 1661 */
1613 1662 sigaddset(&p->p_ignore, sig);
1614 1663 sigdelset(&p->p_sig, sig);
1615 1664 sigdelset(&p->p_extsig, sig);
1616 1665 sigdelq(p, NULL, sig);
1617 1666 t = p->p_tlist;
1618 1667 do {
1619 1668 sigdelset(&t->t_sig, sig);
1620 1669 sigdelset(&t->t_extsig, sig);
1621 1670 sigdelq(p, t, sig);
1622 1671 } while ((t = t->t_forw) != p->p_tlist);
1623 1672 } else {
1624 1673 /*
1625 1674 * The signal action is being set to SIG_DFL and the default
1626 1675 * behavior is to do something: make sure it is not ignored.
1627 1676 */
1628 1677 sigdelset(&p->p_ignore, sig);
1629 1678 }
1630 1679
1631 1680 if (sig == SIGCLD) {
1632 1681 if (flags & SA_NOCLDWAIT)
1633 1682 p->p_flag |= SNOWAIT;
1634 1683 else
1635 1684 p->p_flag &= ~SNOWAIT;
1636 1685
1637 1686 if (flags & SA_NOCLDSTOP)
1638 1687 p->p_flag &= ~SJCTL;
1639 1688 else
1640 1689 p->p_flag |= SJCTL;
1641 1690
1642 1691 if ((p->p_flag & SNOWAIT) || disp == SIG_IGN) {
1643 1692 proc_t *cp, *tp;
1644 1693
1645 1694 mutex_exit(&p->p_lock);
1646 1695 mutex_enter(&pidlock);
1647 1696 for (cp = p->p_child; cp != NULL; cp = tp) {
1648 1697 tp = cp->p_sibling;
1649 1698 if (cp->p_stat == SZOMB &&
1650 1699 !(cp->p_pidflag & CLDWAITPID))
1651 1700 freeproc(cp);
1652 1701 }
1653 1702 mutex_exit(&pidlock);
1654 1703 mutex_enter(&p->p_lock);
1655 1704 }
1656 1705 }
1657 1706 }
1658 1707
1659 1708 /*
1660 1709 * Set all signal actions not already set to SIG_DFL or SIG_IGN to SIG_DFL.
1661 1710 * Called from exec_common() for a process undergoing execve()
1662 1711 * and from cfork() for a newly-created child of vfork().
1663 1712 * In the vfork() case, 'p' is not the current process.
1664 1713 * In both cases, there is only one thread in the process.
1665 1714 */
1666 1715 void
1667 1716 sigdefault(proc_t *p)
1668 1717 {
1669 1718 kthread_t *t = p->p_tlist;
1670 1719 struct user *up = PTOU(p);
1671 1720 int sig;
1672 1721
1673 1722 ASSERT(MUTEX_HELD(&p->p_lock));
1674 1723
1675 1724 for (sig = 1; sig < NSIG; sig++) {
1676 1725 if (up->u_signal[sig - 1] != SIG_DFL &&
1677 1726 up->u_signal[sig - 1] != SIG_IGN) {
1678 1727 up->u_signal[sig - 1] = SIG_DFL;
1679 1728 sigemptyset(&up->u_sigmask[sig - 1]);
1680 1729 if (sigismember(&ignoredefault, sig)) {
1681 1730 sigdelq(p, NULL, sig);
1682 1731 sigdelq(p, t, sig);
1683 1732 }
1684 1733 if (sig == SIGCLD)
1685 1734 p->p_flag &= ~(SNOWAIT|SJCTL);
1686 1735 }
1687 1736 }
1688 1737 sigorset(&p->p_ignore, &ignoredefault);
1689 1738 sigfillset(&p->p_siginfo);
1690 1739 sigdiffset(&p->p_siginfo, &cantmask);
1691 1740 sigdiffset(&p->p_sig, &ignoredefault);
1692 1741 sigdiffset(&p->p_extsig, &ignoredefault);
1693 1742 sigdiffset(&t->t_sig, &ignoredefault);
1694 1743 sigdiffset(&t->t_extsig, &ignoredefault);
1695 1744 }
1696 1745
1697 1746 void
1698 1747 sigcld(proc_t *cp, sigqueue_t *sqp)
1699 1748 {
1700 1749 proc_t *pp = cp->p_parent;
1701 1750
1702 1751 ASSERT(MUTEX_HELD(&pidlock));
1703 1752
1704 1753 switch (cp->p_wcode) {
1705 1754 case CLD_EXITED:
1706 1755 case CLD_DUMPED:
1707 1756 case CLD_KILLED:
1708 1757 ASSERT(cp->p_stat == SZOMB);
1709 1758 /*
1710 1759 * The broadcast on p_srwchan_cv is a kludge to
1711 1760 * wakeup a possible thread in uadmin(A_SHUTDOWN).
1712 1761 */
1713 1762 cv_broadcast(&cp->p_srwchan_cv);
1714 1763
1715 1764 /*
1716 1765 * Add to newstate list of the parent
1717 1766 */
1718 1767 add_ns(pp, cp);
1719 1768
1720 1769 cv_broadcast(&pp->p_cv);
1721 1770 if ((pp->p_flag & SNOWAIT) ||
1722 1771 PTOU(pp)->u_signal[SIGCLD - 1] == SIG_IGN) {
1723 1772 if (!(cp->p_pidflag & CLDWAITPID))
1724 1773 freeproc(cp);
1725 1774 } else if (!(cp->p_pidflag & CLDNOSIGCHLD)) {
1726 1775 post_sigcld(cp, sqp);
1727 1776 sqp = NULL;
1728 1777 }
1729 1778 break;
1730 1779
1731 1780 case CLD_STOPPED:
1732 1781 case CLD_CONTINUED:
1733 1782 cv_broadcast(&pp->p_cv);
1734 1783 if (pp->p_flag & SJCTL) {
1735 1784 post_sigcld(cp, sqp);
1736 1785 sqp = NULL;
1737 1786 }
1738 1787 break;
1739 1788 }
1740 1789
1741 1790 if (sqp)
1742 1791 siginfofree(sqp);
1743 1792 }
1744 1793
1745 1794 /*
1746 1795 * Common code called from sigcld() and from
1747 1796 * waitid() and issig_forreal() via sigcld_repost().
1748 1797 * Give the parent process a SIGCLD if it does not have one pending,
1749 1798 * else mark the child process so a SIGCLD can be posted later.
1750 1799 */
1751 1800 static void
1752 1801 post_sigcld(proc_t *cp, sigqueue_t *sqp)
1753 1802 {
1754 1803 proc_t *pp = cp->p_parent;
1755 1804 k_siginfo_t info;
1756 1805
1757 1806 ASSERT(MUTEX_HELD(&pidlock));
1758 1807 mutex_enter(&pp->p_lock);
1759 1808
1760 1809 /*
1761 1810 * If a SIGCLD is pending, then just mark the child process
1762 1811 * so that its SIGCLD will be posted later, when the first
1763 1812 * SIGCLD is taken off the queue or when the parent is ready
|
↓ open down ↓ |
423 lines elided |
↑ open up ↑ |
1764 1813 * to receive it or accept it, if ever.
1765 1814 */
1766 1815 if (sigismember(&pp->p_sig, SIGCLD)) {
1767 1816 cp->p_pidflag |= CLDPEND;
1768 1817 } else {
1769 1818 cp->p_pidflag &= ~CLDPEND;
1770 1819 if (sqp == NULL) {
1771 1820 /*
1772 1821 * This can only happen when the parent is init.
1773 1822 * (See call to sigcld(q, NULL) in exit().)
1774 - * Use KM_NOSLEEP to avoid deadlock.
1823 + * Use KM_NOSLEEP to avoid deadlock. The child procs
1824 + * initpid can be 1 for zlogin.
1775 1825 */
1776 - ASSERT(pp == proc_init);
1826 + ASSERT(pp->p_pidp->pid_id ==
1827 + cp->p_zone->zone_proc_initpid ||
1828 + pp->p_pidp->pid_id == 1);
1777 1829 winfo(cp, &info, 0);
1778 1830 sigaddq(pp, NULL, &info, KM_NOSLEEP);
1779 1831 } else {
1780 1832 winfo(cp, &sqp->sq_info, 0);
1781 1833 sigaddqa(pp, NULL, sqp);
1782 1834 sqp = NULL;
1783 1835 }
1784 1836 }
1785 1837
1786 1838 mutex_exit(&pp->p_lock);
1787 1839
1788 1840 if (sqp)
1789 1841 siginfofree(sqp);
1790 1842 }
1791 1843
1792 1844 /*
1793 1845 * Search for a child that has a pending SIGCLD for us, the parent.
1794 1846 * The queue of SIGCLD signals is implied by the list of children.
1795 1847 * We post the SIGCLD signals one at a time so they don't get lost.
1796 1848 * When one is dequeued, another is enqueued, until there are no more.
|
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
1797 1849 */
1798 1850 void
1799 1851 sigcld_repost()
1800 1852 {
1801 1853 proc_t *pp = curproc;
1802 1854 proc_t *cp;
1803 1855 sigqueue_t *sqp;
1804 1856
1805 1857 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
1806 1858 mutex_enter(&pidlock);
1859 + if (PROC_IS_BRANDED(pp) && BROP(pp)->b_sigcld_repost != NULL) {
1860 + /*
1861 + * Allow the brand to inject synthetic SIGCLD signals.
1862 + */
1863 + if (BROP(pp)->b_sigcld_repost(pp, sqp) == 0) {
1864 + mutex_exit(&pidlock);
1865 + return;
1866 + }
1867 + }
1807 1868 for (cp = pp->p_child; cp; cp = cp->p_sibling) {
1808 1869 if (cp->p_pidflag & CLDPEND) {
1809 1870 post_sigcld(cp, sqp);
1810 1871 mutex_exit(&pidlock);
1811 1872 return;
1812 1873 }
1813 1874 }
1814 1875 mutex_exit(&pidlock);
1815 1876 kmem_free(sqp, sizeof (sigqueue_t));
1816 1877 }
1817 1878
1818 1879 /*
1819 1880 * count number of sigqueue send by sigaddqa()
1820 1881 */
1821 1882 void
1822 1883 sigqsend(int cmd, proc_t *p, kthread_t *t, sigqueue_t *sigqp)
1823 1884 {
1824 1885 sigqhdr_t *sqh;
1825 1886
1826 1887 sqh = (sigqhdr_t *)sigqp->sq_backptr;
1827 1888 ASSERT(sqh);
1828 1889
1829 1890 mutex_enter(&sqh->sqb_lock);
1830 1891 sqh->sqb_sent++;
1831 1892 mutex_exit(&sqh->sqb_lock);
1832 1893
1833 1894 if (cmd == SN_SEND)
1834 1895 sigaddqa(p, t, sigqp);
1835 1896 else
1836 1897 siginfofree(sigqp);
1837 1898 }
1838 1899
1839 1900 int
1840 1901 sigsendproc(proc_t *p, sigsend_t *pv)
1841 1902 {
1842 1903 struct cred *cr;
1843 1904 proc_t *myprocp = curproc;
1844 1905
1845 1906 ASSERT(MUTEX_HELD(&pidlock));
1846 1907
1847 1908 if (p->p_pid == 1 && pv->sig && sigismember(&cantmask, pv->sig))
1848 1909 return (EPERM);
1849 1910
1850 1911 cr = CRED();
1851 1912
1852 1913 if (pv->checkperm == 0 ||
1853 1914 (pv->sig == SIGCONT && p->p_sessp == myprocp->p_sessp) ||
1854 1915 prochasprocperm(p, myprocp, cr)) {
1855 1916 pv->perm++;
1856 1917 if (pv->sig) {
1857 1918 /* Make sure we should be setting si_pid and friends */
1858 1919 ASSERT(pv->sicode <= 0);
1859 1920 if (SI_CANQUEUE(pv->sicode)) {
1860 1921 sigqueue_t *sqp;
1861 1922
1862 1923 mutex_enter(&myprocp->p_lock);
1863 1924 sqp = sigqalloc(myprocp->p_sigqhdr);
1864 1925 mutex_exit(&myprocp->p_lock);
1865 1926 if (sqp == NULL)
1866 1927 return (EAGAIN);
1867 1928 sqp->sq_info.si_signo = pv->sig;
1868 1929 sqp->sq_info.si_code = pv->sicode;
1869 1930 sqp->sq_info.si_pid = myprocp->p_pid;
1870 1931 sqp->sq_info.si_ctid = PRCTID(myprocp);
1871 1932 sqp->sq_info.si_zoneid = getzoneid();
1872 1933 sqp->sq_info.si_uid = crgetruid(cr);
1873 1934 sqp->sq_info.si_value = pv->value;
1874 1935 mutex_enter(&p->p_lock);
1875 1936 sigqsend(SN_SEND, p, NULL, sqp);
1876 1937 mutex_exit(&p->p_lock);
1877 1938 } else {
1878 1939 k_siginfo_t info;
1879 1940 bzero(&info, sizeof (info));
1880 1941 info.si_signo = pv->sig;
1881 1942 info.si_code = pv->sicode;
1882 1943 info.si_pid = myprocp->p_pid;
1883 1944 info.si_ctid = PRCTID(myprocp);
1884 1945 info.si_zoneid = getzoneid();
1885 1946 info.si_uid = crgetruid(cr);
1886 1947 mutex_enter(&p->p_lock);
1887 1948 /*
1888 1949 * XXX: Should be KM_SLEEP but
1889 1950 * we have to avoid deadlock.
1890 1951 */
1891 1952 sigaddq(p, NULL, &info, KM_NOSLEEP);
1892 1953 mutex_exit(&p->p_lock);
1893 1954 }
1894 1955 }
1895 1956 }
1896 1957
1897 1958 return (0);
1898 1959 }
1899 1960
1900 1961 int
1901 1962 sigsendset(procset_t *psp, sigsend_t *pv)
1902 1963 {
1903 1964 int error;
1904 1965
1905 1966 error = dotoprocs(psp, sigsendproc, (char *)pv);
1906 1967 if (error == 0 && pv->perm == 0)
1907 1968 return (EPERM);
1908 1969
1909 1970 return (error);
1910 1971 }
1911 1972
1912 1973 /*
1913 1974 * Dequeue a queued siginfo structure.
1914 1975 * If a non-null thread pointer is passed then dequeue from
1915 1976 * the thread queue, otherwise dequeue from the process queue.
1916 1977 */
1917 1978 void
1918 1979 sigdeq(proc_t *p, kthread_t *t, int sig, sigqueue_t **qpp)
1919 1980 {
1920 1981 sigqueue_t **psqp, *sqp;
1921 1982
1922 1983 ASSERT(MUTEX_HELD(&p->p_lock));
1923 1984
1924 1985 *qpp = NULL;
1925 1986
1926 1987 if (t != NULL) {
1927 1988 sigdelset(&t->t_sig, sig);
1928 1989 sigdelset(&t->t_extsig, sig);
1929 1990 psqp = &t->t_sigqueue;
1930 1991 } else {
1931 1992 sigdelset(&p->p_sig, sig);
1932 1993 sigdelset(&p->p_extsig, sig);
1933 1994 psqp = &p->p_sigqueue;
1934 1995 }
1935 1996
1936 1997 for (;;) {
1937 1998 if ((sqp = *psqp) == NULL)
1938 1999 return;
1939 2000 if (sqp->sq_info.si_signo == sig)
1940 2001 break;
1941 2002 else
1942 2003 psqp = &sqp->sq_next;
1943 2004 }
1944 2005 *qpp = sqp;
1945 2006 *psqp = sqp->sq_next;
1946 2007 for (sqp = *psqp; sqp; sqp = sqp->sq_next) {
1947 2008 if (sqp->sq_info.si_signo == sig) {
1948 2009 if (t != (kthread_t *)NULL) {
1949 2010 sigaddset(&t->t_sig, sig);
1950 2011 t->t_sig_check = 1;
1951 2012 } else {
1952 2013 sigaddset(&p->p_sig, sig);
1953 2014 set_proc_ast(p);
1954 2015 }
1955 2016 break;
1956 2017 }
1957 2018 }
1958 2019 }
1959 2020
1960 2021 /*
1961 2022 * Delete a queued SIGCLD siginfo structure matching the k_siginfo_t argument.
1962 2023 */
1963 2024 void
1964 2025 sigcld_delete(k_siginfo_t *ip)
1965 2026 {
1966 2027 proc_t *p = curproc;
1967 2028 int another_sigcld = 0;
1968 2029 sigqueue_t **psqp, *sqp;
1969 2030
1970 2031 ASSERT(ip->si_signo == SIGCLD);
1971 2032
1972 2033 mutex_enter(&p->p_lock);
1973 2034
1974 2035 if (!sigismember(&p->p_sig, SIGCLD)) {
1975 2036 mutex_exit(&p->p_lock);
1976 2037 return;
1977 2038 }
1978 2039
1979 2040 psqp = &p->p_sigqueue;
1980 2041 for (;;) {
1981 2042 if ((sqp = *psqp) == NULL) {
1982 2043 mutex_exit(&p->p_lock);
1983 2044 return;
1984 2045 }
1985 2046 if (sqp->sq_info.si_signo == SIGCLD) {
1986 2047 if (sqp->sq_info.si_pid == ip->si_pid &&
1987 2048 sqp->sq_info.si_code == ip->si_code &&
1988 2049 sqp->sq_info.si_status == ip->si_status)
1989 2050 break;
1990 2051 another_sigcld = 1;
1991 2052 }
1992 2053 psqp = &sqp->sq_next;
1993 2054 }
1994 2055 *psqp = sqp->sq_next;
1995 2056
1996 2057 siginfofree(sqp);
1997 2058
1998 2059 for (sqp = *psqp; !another_sigcld && sqp; sqp = sqp->sq_next) {
1999 2060 if (sqp->sq_info.si_signo == SIGCLD)
2000 2061 another_sigcld = 1;
2001 2062 }
2002 2063
2003 2064 if (!another_sigcld) {
2004 2065 sigdelset(&p->p_sig, SIGCLD);
2005 2066 sigdelset(&p->p_extsig, SIGCLD);
2006 2067 }
2007 2068
2008 2069 mutex_exit(&p->p_lock);
2009 2070 }
2010 2071
2011 2072 /*
2012 2073 * Delete queued siginfo structures.
2013 2074 * If a non-null thread pointer is passed then delete from
2014 2075 * the thread queue, otherwise delete from the process queue.
2015 2076 */
2016 2077 void
2017 2078 sigdelq(proc_t *p, kthread_t *t, int sig)
2018 2079 {
2019 2080 sigqueue_t **psqp, *sqp;
2020 2081
2021 2082 /*
2022 2083 * We must be holding p->p_lock unless the process is
2023 2084 * being reaped or has failed to get started on fork.
2024 2085 */
2025 2086 ASSERT(MUTEX_HELD(&p->p_lock) ||
2026 2087 p->p_stat == SIDL || p->p_stat == SZOMB);
2027 2088
2028 2089 if (t != (kthread_t *)NULL)
2029 2090 psqp = &t->t_sigqueue;
2030 2091 else
2031 2092 psqp = &p->p_sigqueue;
2032 2093
2033 2094 while (*psqp) {
2034 2095 sqp = *psqp;
2035 2096 if (sig == 0 || sqp->sq_info.si_signo == sig) {
2036 2097 *psqp = sqp->sq_next;
2037 2098 siginfofree(sqp);
2038 2099 } else
2039 2100 psqp = &sqp->sq_next;
2040 2101 }
2041 2102 }
2042 2103
2043 2104 /*
2044 2105 * Insert a siginfo structure into a queue.
2045 2106 * If a non-null thread pointer is passed then add to the thread queue,
2046 2107 * otherwise add to the process queue.
2047 2108 *
2048 2109 * The function sigaddqins() is called with sigqueue already allocated.
2049 2110 * It is called from sigaddqa() and sigaddq() below.
2050 2111 *
2051 2112 * The value of si_code implicitly indicates whether sigp is to be
2052 2113 * explicitly queued, or to be queued to depth one.
2053 2114 */
2054 2115 static void
2055 2116 sigaddqins(proc_t *p, kthread_t *t, sigqueue_t *sigqp)
2056 2117 {
2057 2118 sigqueue_t **psqp;
2058 2119 int sig = sigqp->sq_info.si_signo;
2059 2120
2060 2121 sigqp->sq_external = (curproc != &p0) &&
2061 2122 (curproc->p_ct_process != p->p_ct_process);
2062 2123
2063 2124 /*
2064 2125 * issig_forreal() doesn't bother dequeueing signals if SKILLED
2065 2126 * is set, and even if it did, we would want to avoid situation
2066 2127 * (which would be unique to SIGKILL) where one thread dequeued
2067 2128 * the sigqueue_t and another executed psig(). So we create a
2068 2129 * separate stash for SIGKILL's sigqueue_t. Because a second
2069 2130 * SIGKILL can set SEXTKILLED, we overwrite the existing entry
2070 2131 * if (and only if) it was non-extracontractual.
2071 2132 */
2072 2133 if (sig == SIGKILL) {
2073 2134 if (p->p_killsqp == NULL || !p->p_killsqp->sq_external) {
2074 2135 if (p->p_killsqp != NULL)
2075 2136 siginfofree(p->p_killsqp);
2076 2137 p->p_killsqp = sigqp;
2077 2138 sigqp->sq_next = NULL;
2078 2139 } else {
2079 2140 siginfofree(sigqp);
2080 2141 }
2081 2142 return;
2082 2143 }
2083 2144
2084 2145 ASSERT(sig >= 1 && sig < NSIG);
2085 2146 if (t != NULL) /* directed to a thread */
2086 2147 psqp = &t->t_sigqueue;
2087 2148 else /* directed to a process */
2088 2149 psqp = &p->p_sigqueue;
2089 2150 if (SI_CANQUEUE(sigqp->sq_info.si_code) &&
2090 2151 sigismember(&p->p_siginfo, sig)) {
2091 2152 for (; *psqp != NULL; psqp = &(*psqp)->sq_next)
2092 2153 ;
2093 2154 } else {
2094 2155 for (; *psqp != NULL; psqp = &(*psqp)->sq_next) {
2095 2156 if ((*psqp)->sq_info.si_signo == sig) {
2096 2157 siginfofree(sigqp);
2097 2158 return;
2098 2159 }
2099 2160 }
2100 2161 }
2101 2162 *psqp = sigqp;
2102 2163 sigqp->sq_next = NULL;
2103 2164 }
2104 2165
2105 2166 /*
2106 2167 * The function sigaddqa() is called with sigqueue already allocated.
2107 2168 * If signal is ignored, discard but guarantee KILL and generation semantics.
|
↓ open down ↓ |
291 lines elided |
↑ open up ↑ |
2108 2169 * It is called from sigqueue() and other places.
2109 2170 */
2110 2171 void
2111 2172 sigaddqa(proc_t *p, kthread_t *t, sigqueue_t *sigqp)
2112 2173 {
2113 2174 int sig = sigqp->sq_info.si_signo;
2114 2175
2115 2176 ASSERT(MUTEX_HELD(&p->p_lock));
2116 2177 ASSERT(sig >= 1 && sig < NSIG);
2117 2178
2118 - if (sig_discardable(p, sig))
2179 + if (sig_discardable(p, t, sig))
2119 2180 siginfofree(sigqp);
2120 2181 else
2121 2182 sigaddqins(p, t, sigqp);
2122 2183
2123 2184 sigtoproc(p, t, sig);
2124 2185 }
2125 2186
2126 2187 /*
2127 2188 * Allocate the sigqueue_t structure and call sigaddqins().
2128 2189 */
2129 2190 void
2130 2191 sigaddq(proc_t *p, kthread_t *t, k_siginfo_t *infop, int km_flags)
2131 2192 {
2132 2193 sigqueue_t *sqp;
2133 2194 int sig = infop->si_signo;
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
2134 2195
2135 2196 ASSERT(MUTEX_HELD(&p->p_lock));
2136 2197 ASSERT(sig >= 1 && sig < NSIG);
2137 2198
2138 2199 /*
2139 2200 * If the signal will be discarded by sigtoproc() or
2140 2201 * if the process isn't requesting siginfo and it isn't
2141 2202 * blocking the signal (it *could* change it's mind while
2142 2203 * the signal is pending) then don't bother creating one.
2143 2204 */
2144 - if (!sig_discardable(p, sig) &&
2205 + if (!sig_discardable(p, t, sig) &&
2145 2206 (sigismember(&p->p_siginfo, sig) ||
2146 2207 (curproc->p_ct_process != p->p_ct_process) ||
2147 2208 (sig == SIGCLD && SI_FROMKERNEL(infop))) &&
2148 2209 ((sqp = kmem_alloc(sizeof (sigqueue_t), km_flags)) != NULL)) {
2149 2210 bcopy(infop, &sqp->sq_info, sizeof (k_siginfo_t));
2150 2211 sqp->sq_func = NULL;
2151 2212 sqp->sq_next = NULL;
2152 2213 sigaddqins(p, t, sqp);
2153 2214 }
2154 2215 sigtoproc(p, t, sig);
2155 2216 }
2156 2217
2157 2218 /*
2158 2219 * Handle stop-on-fault processing for the debugger. Returns 0
2159 2220 * if the fault is cleared during the stop, nonzero if it isn't.
2160 2221 */
2161 2222 int
2162 2223 stop_on_fault(uint_t fault, k_siginfo_t *sip)
2163 2224 {
2164 2225 proc_t *p = ttoproc(curthread);
2165 2226 klwp_t *lwp = ttolwp(curthread);
2166 2227
2167 2228 ASSERT(prismember(&p->p_fltmask, fault));
2168 2229
2169 2230 /*
2170 2231 * Record current fault and siginfo structure so debugger can
2171 2232 * find it.
2172 2233 */
2173 2234 mutex_enter(&p->p_lock);
2174 2235 lwp->lwp_curflt = (uchar_t)fault;
2175 2236 lwp->lwp_siginfo = *sip;
2176 2237
2177 2238 stop(PR_FAULTED, fault);
2178 2239
2179 2240 fault = lwp->lwp_curflt;
2180 2241 lwp->lwp_curflt = 0;
2181 2242 mutex_exit(&p->p_lock);
2182 2243 return (fault);
2183 2244 }
2184 2245
2185 2246 void
2186 2247 sigorset(k_sigset_t *s1, const k_sigset_t *s2)
2187 2248 {
2188 2249 s1->__sigbits[0] |= s2->__sigbits[0];
2189 2250 s1->__sigbits[1] |= s2->__sigbits[1];
2190 2251 s1->__sigbits[2] |= s2->__sigbits[2];
2191 2252 }
2192 2253
2193 2254 void
2194 2255 sigandset(k_sigset_t *s1, const k_sigset_t *s2)
2195 2256 {
2196 2257 s1->__sigbits[0] &= s2->__sigbits[0];
2197 2258 s1->__sigbits[1] &= s2->__sigbits[1];
2198 2259 s1->__sigbits[2] &= s2->__sigbits[2];
2199 2260 }
2200 2261
2201 2262 void
2202 2263 sigdiffset(k_sigset_t *s1, const k_sigset_t *s2)
2203 2264 {
2204 2265 s1->__sigbits[0] &= ~(s2->__sigbits[0]);
2205 2266 s1->__sigbits[1] &= ~(s2->__sigbits[1]);
2206 2267 s1->__sigbits[2] &= ~(s2->__sigbits[2]);
2207 2268 }
2208 2269
2209 2270 /*
2210 2271 * Return non-zero if curthread->t_sig_check should be set to 1, that is,
2211 2272 * if there are any signals the thread might take on return from the kernel.
2212 2273 * If ksigset_t's were a single word, we would do:
2213 2274 * return (((p->p_sig | t->t_sig) & ~t->t_hold) & fillset);
2214 2275 */
2215 2276 int
2216 2277 sigcheck(proc_t *p, kthread_t *t)
2217 2278 {
2218 2279 sc_shared_t *tdp = t->t_schedctl;
2219 2280
2220 2281 /*
2221 2282 * If signals are blocked via the schedctl interface
2222 2283 * then we only check for the unmaskable signals.
2223 2284 * The unmaskable signal numbers should all be contained
2224 2285 * in __sigbits[0] and we assume this for speed.
2225 2286 */
2226 2287 #if (CANTMASK1 == 0 && CANTMASK2 == 0)
2227 2288 if (tdp != NULL && tdp->sc_sigblock)
2228 2289 return ((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) &
2229 2290 CANTMASK0);
2230 2291 #else
2231 2292 #error "fix me: CANTMASK1 and CANTMASK2 are not zero"
2232 2293 #endif
2233 2294
2234 2295 /* see uts/common/sys/signal.h for why this must be true */
2235 2296 #if ((MAXSIG > (2 * 32)) && (MAXSIG <= (3 * 32)))
2236 2297 return (((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) &
2237 2298 ~t->t_hold.__sigbits[0]) |
2238 2299 ((p->p_sig.__sigbits[1] | t->t_sig.__sigbits[1]) &
2239 2300 ~t->t_hold.__sigbits[1]) |
2240 2301 (((p->p_sig.__sigbits[2] | t->t_sig.__sigbits[2]) &
2241 2302 ~t->t_hold.__sigbits[2]) & FILLSET2));
2242 2303 #else
2243 2304 #error "fix me: MAXSIG out of bounds"
2244 2305 #endif
2245 2306 }
2246 2307
2247 2308 void
2248 2309 sigintr(k_sigset_t *smask, int intable)
2249 2310 {
2250 2311 proc_t *p;
2251 2312 int owned;
2252 2313 k_sigset_t lmask; /* local copy of cantmask */
2253 2314 klwp_t *lwp = ttolwp(curthread);
2254 2315
2255 2316 /*
2256 2317 * Mask out all signals except SIGHUP, SIGINT, SIGQUIT
2257 2318 * and SIGTERM. (Preserving the existing masks).
2258 2319 * This function supports the -intr nfs and ufs mount option.
2259 2320 */
2260 2321
2261 2322 /*
2262 2323 * don't do kernel threads
2263 2324 */
2264 2325 if (lwp == NULL)
2265 2326 return;
2266 2327
2267 2328 /*
2268 2329 * get access to signal mask
2269 2330 */
2270 2331 p = ttoproc(curthread);
2271 2332 owned = mutex_owned(&p->p_lock); /* this is filthy */
2272 2333 if (!owned)
2273 2334 mutex_enter(&p->p_lock);
2274 2335
2275 2336 /*
2276 2337 * remember the current mask
2277 2338 */
2278 2339 schedctl_finish_sigblock(curthread);
2279 2340 *smask = curthread->t_hold;
2280 2341
2281 2342 /*
2282 2343 * mask out all signals
2283 2344 */
2284 2345 sigfillset(&curthread->t_hold);
2285 2346
2286 2347 /*
2287 2348 * Unmask the non-maskable signals (e.g., KILL), as long as
2288 2349 * they aren't already masked (which could happen at exit).
2289 2350 * The first sigdiffset sets lmask to (cantmask & ~curhold). The
2290 2351 * second sets the current hold mask to (~0 & ~lmask), which reduces
2291 2352 * to (~cantmask | curhold).
2292 2353 */
2293 2354 lmask = cantmask;
2294 2355 sigdiffset(&lmask, smask);
2295 2356 sigdiffset(&curthread->t_hold, &lmask);
2296 2357
2297 2358 /*
2298 2359 * Re-enable HUP, QUIT, and TERM iff they were originally enabled
2299 2360 * Re-enable INT if it's originally enabled and the NFS mount option
2300 2361 * nointr is not set.
2301 2362 */
2302 2363 if (!sigismember(smask, SIGHUP))
2303 2364 sigdelset(&curthread->t_hold, SIGHUP);
2304 2365 if (!sigismember(smask, SIGINT) && intable)
2305 2366 sigdelset(&curthread->t_hold, SIGINT);
2306 2367 if (!sigismember(smask, SIGQUIT))
2307 2368 sigdelset(&curthread->t_hold, SIGQUIT);
2308 2369 if (!sigismember(smask, SIGTERM))
2309 2370 sigdelset(&curthread->t_hold, SIGTERM);
2310 2371
2311 2372 /*
2312 2373 * release access to signal mask
2313 2374 */
2314 2375 if (!owned)
2315 2376 mutex_exit(&p->p_lock);
2316 2377
2317 2378 /*
2318 2379 * Indicate that this lwp is not to be stopped.
2319 2380 */
2320 2381 lwp->lwp_nostop++;
2321 2382
2322 2383 }
2323 2384
2324 2385 void
2325 2386 sigunintr(k_sigset_t *smask)
2326 2387 {
2327 2388 proc_t *p;
2328 2389 int owned;
2329 2390 klwp_t *lwp = ttolwp(curthread);
2330 2391
2331 2392 /*
2332 2393 * Reset previous mask (See sigintr() above)
2333 2394 */
2334 2395 if (lwp != NULL) {
2335 2396 lwp->lwp_nostop--; /* restore lwp stoppability */
2336 2397 p = ttoproc(curthread);
2337 2398 owned = mutex_owned(&p->p_lock); /* this is filthy */
2338 2399 if (!owned)
2339 2400 mutex_enter(&p->p_lock);
2340 2401 curthread->t_hold = *smask;
2341 2402 /* so unmasked signals will be seen */
2342 2403 curthread->t_sig_check = 1;
2343 2404 if (!owned)
2344 2405 mutex_exit(&p->p_lock);
2345 2406 }
2346 2407 }
2347 2408
2348 2409 void
2349 2410 sigreplace(k_sigset_t *newmask, k_sigset_t *oldmask)
2350 2411 {
2351 2412 proc_t *p;
2352 2413 int owned;
2353 2414 /*
2354 2415 * Save current signal mask in oldmask, then
2355 2416 * set it to newmask.
2356 2417 */
2357 2418 if (ttolwp(curthread) != NULL) {
2358 2419 p = ttoproc(curthread);
2359 2420 owned = mutex_owned(&p->p_lock); /* this is filthy */
2360 2421 if (!owned)
2361 2422 mutex_enter(&p->p_lock);
2362 2423 schedctl_finish_sigblock(curthread);
2363 2424 if (oldmask != NULL)
2364 2425 *oldmask = curthread->t_hold;
2365 2426 curthread->t_hold = *newmask;
2366 2427 curthread->t_sig_check = 1;
2367 2428 if (!owned)
2368 2429 mutex_exit(&p->p_lock);
2369 2430 }
2370 2431 }
2371 2432
2372 2433 /*
2373 2434 * Return true if the signal number is in range
2374 2435 * and the signal code specifies signal queueing.
2375 2436 */
2376 2437 int
2377 2438 sigwillqueue(int sig, int code)
2378 2439 {
2379 2440 if (sig >= 0 && sig < NSIG) {
2380 2441 switch (code) {
2381 2442 case SI_QUEUE:
2382 2443 case SI_TIMER:
2383 2444 case SI_ASYNCIO:
2384 2445 case SI_MESGQ:
2385 2446 return (1);
2386 2447 }
2387 2448 }
2388 2449 return (0);
2389 2450 }
2390 2451
2391 2452 /*
2392 2453 * The pre-allocated pool (with _SIGQUEUE_PREALLOC entries) is
2393 2454 * allocated at the first sigqueue/signotify call.
2394 2455 */
2395 2456 sigqhdr_t *
2396 2457 sigqhdralloc(size_t size, uint_t maxcount)
2397 2458 {
2398 2459 size_t i;
2399 2460 sigqueue_t *sq, *next;
2400 2461 sigqhdr_t *sqh;
2401 2462
2402 2463 /*
2403 2464 * Before the introduction of process.max-sigqueue-size
2404 2465 * _SC_SIGQUEUE_MAX had this static value.
2405 2466 */
2406 2467 #define _SIGQUEUE_PREALLOC 32
2407 2468
2408 2469 i = (_SIGQUEUE_PREALLOC * size) + sizeof (sigqhdr_t);
2409 2470 ASSERT(maxcount <= INT_MAX);
2410 2471 sqh = kmem_alloc(i, KM_SLEEP);
2411 2472 sqh->sqb_count = maxcount;
2412 2473 sqh->sqb_maxcount = maxcount;
2413 2474 sqh->sqb_size = i;
2414 2475 sqh->sqb_pexited = 0;
2415 2476 sqh->sqb_sent = 0;
2416 2477 sqh->sqb_free = sq = (sigqueue_t *)(sqh + 1);
2417 2478 for (i = _SIGQUEUE_PREALLOC - 1; i != 0; i--) {
2418 2479 next = (sigqueue_t *)((uintptr_t)sq + size);
2419 2480 sq->sq_next = next;
2420 2481 sq = next;
2421 2482 }
2422 2483 sq->sq_next = NULL;
2423 2484 cv_init(&sqh->sqb_cv, NULL, CV_DEFAULT, NULL);
2424 2485 mutex_init(&sqh->sqb_lock, NULL, MUTEX_DEFAULT, NULL);
2425 2486 return (sqh);
2426 2487 }
2427 2488
2428 2489 static void sigqrel(sigqueue_t *);
2429 2490
2430 2491 /*
2431 2492 * Allocate a sigqueue/signotify structure from the per process
2432 2493 * pre-allocated pool or allocate a new sigqueue/signotify structure
2433 2494 * if the pre-allocated pool is exhausted.
2434 2495 */
2435 2496 sigqueue_t *
2436 2497 sigqalloc(sigqhdr_t *sqh)
2437 2498 {
2438 2499 sigqueue_t *sq = NULL;
2439 2500
2440 2501 ASSERT(MUTEX_HELD(&curproc->p_lock));
2441 2502
2442 2503 if (sqh != NULL) {
2443 2504 mutex_enter(&sqh->sqb_lock);
2444 2505 if (sqh->sqb_count > 0) {
2445 2506 sqh->sqb_count--;
2446 2507 if (sqh->sqb_free == NULL) {
2447 2508 /*
2448 2509 * The pre-allocated pool is exhausted.
2449 2510 */
2450 2511 sq = kmem_alloc(sizeof (sigqueue_t), KM_SLEEP);
2451 2512 sq->sq_func = NULL;
2452 2513 } else {
2453 2514 sq = sqh->sqb_free;
2454 2515 sq->sq_func = sigqrel;
2455 2516 sqh->sqb_free = sq->sq_next;
2456 2517 }
2457 2518 mutex_exit(&sqh->sqb_lock);
2458 2519 bzero(&sq->sq_info, sizeof (k_siginfo_t));
2459 2520 sq->sq_backptr = sqh;
2460 2521 sq->sq_next = NULL;
2461 2522 sq->sq_external = 0;
2462 2523 } else {
2463 2524 mutex_exit(&sqh->sqb_lock);
2464 2525 }
2465 2526 }
2466 2527 return (sq);
2467 2528 }
2468 2529
2469 2530 /*
2470 2531 * Return a sigqueue structure back to the pre-allocated pool.
2471 2532 */
2472 2533 static void
2473 2534 sigqrel(sigqueue_t *sq)
2474 2535 {
2475 2536 sigqhdr_t *sqh;
2476 2537
2477 2538 /* make sure that p_lock of the affected process is held */
2478 2539
2479 2540 sqh = (sigqhdr_t *)sq->sq_backptr;
2480 2541 mutex_enter(&sqh->sqb_lock);
2481 2542 if (sqh->sqb_pexited && sqh->sqb_sent == 1) {
2482 2543 mutex_exit(&sqh->sqb_lock);
2483 2544 cv_destroy(&sqh->sqb_cv);
2484 2545 mutex_destroy(&sqh->sqb_lock);
2485 2546 kmem_free(sqh, sqh->sqb_size);
2486 2547 } else {
2487 2548 sqh->sqb_count++;
2488 2549 sqh->sqb_sent--;
2489 2550 sq->sq_next = sqh->sqb_free;
2490 2551 sq->sq_backptr = NULL;
2491 2552 sqh->sqb_free = sq;
2492 2553 cv_signal(&sqh->sqb_cv);
2493 2554 mutex_exit(&sqh->sqb_lock);
2494 2555 }
2495 2556 }
2496 2557
2497 2558 /*
2498 2559 * Free up the pre-allocated sigqueue headers of sigqueue pool
2499 2560 * and signotify pool, if possible.
2500 2561 * Called only by the owning process during exec() and exit().
2501 2562 */
2502 2563 void
2503 2564 sigqfree(proc_t *p)
2504 2565 {
2505 2566 ASSERT(MUTEX_HELD(&p->p_lock));
2506 2567
2507 2568 if (p->p_sigqhdr != NULL) { /* sigqueue pool */
2508 2569 sigqhdrfree(p->p_sigqhdr);
2509 2570 p->p_sigqhdr = NULL;
2510 2571 }
2511 2572 if (p->p_signhdr != NULL) { /* signotify pool */
2512 2573 sigqhdrfree(p->p_signhdr);
2513 2574 p->p_signhdr = NULL;
2514 2575 }
2515 2576 }
2516 2577
2517 2578 /*
2518 2579 * Free up the pre-allocated header and sigq pool if possible.
2519 2580 */
2520 2581 void
2521 2582 sigqhdrfree(sigqhdr_t *sqh)
2522 2583 {
2523 2584 mutex_enter(&sqh->sqb_lock);
2524 2585 if (sqh->sqb_sent == 0) {
2525 2586 mutex_exit(&sqh->sqb_lock);
2526 2587 cv_destroy(&sqh->sqb_cv);
2527 2588 mutex_destroy(&sqh->sqb_lock);
2528 2589 kmem_free(sqh, sqh->sqb_size);
2529 2590 } else {
2530 2591 sqh->sqb_pexited = 1;
2531 2592 mutex_exit(&sqh->sqb_lock);
2532 2593 }
2533 2594 }
2534 2595
2535 2596 /*
2536 2597 * Free up a single sigqueue structure.
2537 2598 * No other code should free a sigqueue directly.
2538 2599 */
2539 2600 void
2540 2601 siginfofree(sigqueue_t *sqp)
2541 2602 {
2542 2603 if (sqp != NULL) {
2543 2604 if (sqp->sq_func != NULL)
2544 2605 (sqp->sq_func)(sqp);
2545 2606 else
2546 2607 kmem_free(sqp, sizeof (sigqueue_t));
2547 2608 }
2548 2609 }
2549 2610
2550 2611 /*
2551 2612 * Generate a synchronous signal caused by a hardware
2552 2613 * condition encountered by an lwp. Called from trap().
2553 2614 */
2554 2615 void
2555 2616 trapsig(k_siginfo_t *ip, int restartable)
2556 2617 {
2557 2618 proc_t *p = ttoproc(curthread);
2558 2619 int sig = ip->si_signo;
2559 2620 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
2560 2621
2561 2622 ASSERT(sig > 0 && sig < NSIG);
2562 2623
2563 2624 if (curthread->t_dtrace_on)
2564 2625 dtrace_safe_synchronous_signal();
2565 2626
2566 2627 mutex_enter(&p->p_lock);
2567 2628 schedctl_finish_sigblock(curthread);
2568 2629 /*
2569 2630 * Avoid a possible infinite loop if the lwp is holding the
2570 2631 * signal generated by a trap of a restartable instruction or
2571 2632 * if the signal so generated is being ignored by the process.
2572 2633 */
2573 2634 if (restartable &&
2574 2635 (sigismember(&curthread->t_hold, sig) ||
2575 2636 p->p_user.u_signal[sig-1] == SIG_IGN)) {
2576 2637 sigdelset(&curthread->t_hold, sig);
2577 2638 p->p_user.u_signal[sig-1] = SIG_DFL;
2578 2639 sigdelset(&p->p_ignore, sig);
2579 2640 }
2580 2641 bcopy(ip, &sqp->sq_info, sizeof (k_siginfo_t));
2581 2642 sigaddqa(p, curthread, sqp);
2582 2643 mutex_exit(&p->p_lock);
2583 2644 }
2584 2645
2585 2646 /*
2586 2647 * Dispatch the real time profiling signal in the traditional way,
2587 2648 * honoring all of the /proc tracing mechanism built into issig().
2588 2649 */
2589 2650 static void
2590 2651 realsigprof_slow(int sysnum, int nsysarg, int error)
2591 2652 {
2592 2653 kthread_t *t = curthread;
2593 2654 proc_t *p = ttoproc(t);
2594 2655 klwp_t *lwp = ttolwp(t);
2595 2656 k_siginfo_t *sip = &lwp->lwp_siginfo;
2596 2657 void (*func)();
2597 2658
2598 2659 mutex_enter(&p->p_lock);
2599 2660 func = PTOU(p)->u_signal[SIGPROF - 1];
2600 2661 if (p->p_rprof_cyclic == CYCLIC_NONE ||
2601 2662 func == SIG_DFL || func == SIG_IGN) {
2602 2663 bzero(t->t_rprof, sizeof (*t->t_rprof));
2603 2664 mutex_exit(&p->p_lock);
2604 2665 return;
2605 2666 }
2606 2667 if (sigismember(&t->t_hold, SIGPROF)) {
2607 2668 mutex_exit(&p->p_lock);
2608 2669 return;
2609 2670 }
2610 2671 sip->si_signo = SIGPROF;
2611 2672 sip->si_code = PROF_SIG;
2612 2673 sip->si_errno = error;
2613 2674 hrt2ts(gethrtime(), &sip->si_tstamp);
2614 2675 sip->si_syscall = sysnum;
2615 2676 sip->si_nsysarg = nsysarg;
2616 2677 sip->si_fault = lwp->lwp_lastfault;
2617 2678 sip->si_faddr = lwp->lwp_lastfaddr;
2618 2679 lwp->lwp_lastfault = 0;
2619 2680 lwp->lwp_lastfaddr = NULL;
2620 2681 sigtoproc(p, t, SIGPROF);
2621 2682 mutex_exit(&p->p_lock);
2622 2683 ASSERT(lwp->lwp_cursig == 0);
2623 2684 if (issig(FORREAL))
2624 2685 psig();
2625 2686 sip->si_signo = 0;
2626 2687 bzero(t->t_rprof, sizeof (*t->t_rprof));
2627 2688 }
2628 2689
2629 2690 /*
2630 2691 * We are not tracing the SIGPROF signal, or doing any other unnatural
2631 2692 * acts, like watchpoints, so dispatch the real time profiling signal
2632 2693 * directly, bypassing all of the overhead built into issig().
2633 2694 */
2634 2695 static void
2635 2696 realsigprof_fast(int sysnum, int nsysarg, int error)
2636 2697 {
2637 2698 kthread_t *t = curthread;
2638 2699 proc_t *p = ttoproc(t);
2639 2700 klwp_t *lwp = ttolwp(t);
2640 2701 k_siginfo_t *sip = &lwp->lwp_siginfo;
2641 2702 void (*func)();
2642 2703 int rc;
2643 2704 int code;
2644 2705
2645 2706 /*
2646 2707 * We don't need to acquire p->p_lock here;
2647 2708 * we are manipulating thread-private data.
2648 2709 */
2649 2710 func = PTOU(p)->u_signal[SIGPROF - 1];
2650 2711 if (p->p_rprof_cyclic == CYCLIC_NONE ||
2651 2712 func == SIG_DFL || func == SIG_IGN) {
2652 2713 bzero(t->t_rprof, sizeof (*t->t_rprof));
2653 2714 return;
2654 2715 }
2655 2716 if (lwp->lwp_cursig != 0 ||
2656 2717 lwp->lwp_curinfo != NULL ||
2657 2718 sigismember(&t->t_hold, SIGPROF)) {
2658 2719 return;
2659 2720 }
2660 2721 sip->si_signo = SIGPROF;
2661 2722 sip->si_code = PROF_SIG;
2662 2723 sip->si_errno = error;
2663 2724 hrt2ts(gethrtime(), &sip->si_tstamp);
2664 2725 sip->si_syscall = sysnum;
2665 2726 sip->si_nsysarg = nsysarg;
2666 2727 sip->si_fault = lwp->lwp_lastfault;
2667 2728 sip->si_faddr = lwp->lwp_lastfaddr;
2668 2729 lwp->lwp_lastfault = 0;
2669 2730 lwp->lwp_lastfaddr = NULL;
2670 2731 if (t->t_flag & T_TOMASK)
2671 2732 t->t_flag &= ~T_TOMASK;
2672 2733 else
2673 2734 lwp->lwp_sigoldmask = t->t_hold;
2674 2735 sigorset(&t->t_hold, &PTOU(p)->u_sigmask[SIGPROF - 1]);
2675 2736 if (!sigismember(&PTOU(p)->u_signodefer, SIGPROF))
2676 2737 sigaddset(&t->t_hold, SIGPROF);
2677 2738 lwp->lwp_extsig = 0;
2678 2739 lwp->lwp_ru.nsignals++;
2679 2740 if (p->p_model == DATAMODEL_NATIVE)
2680 2741 rc = sendsig(SIGPROF, sip, func);
2681 2742 #ifdef _SYSCALL32_IMPL
2682 2743 else
2683 2744 rc = sendsig32(SIGPROF, sip, func);
2684 2745 #endif /* _SYSCALL32_IMPL */
2685 2746 sip->si_signo = 0;
2686 2747 bzero(t->t_rprof, sizeof (*t->t_rprof));
2687 2748 if (rc == 0) {
2688 2749 /*
2689 2750 * sendsig() failed; we must dump core with a SIGSEGV.
2690 2751 * See psig(). This code is copied from there.
2691 2752 */
2692 2753 lwp->lwp_cursig = SIGSEGV;
2693 2754 code = CLD_KILLED;
2694 2755 proc_is_exiting(p);
2695 2756 if (exitlwps(1) != 0) {
2696 2757 mutex_enter(&p->p_lock);
2697 2758 lwp_exit();
2698 2759 }
2699 2760 if (audit_active == C2AUDIT_LOADED)
2700 2761 audit_core_start(SIGSEGV);
2701 2762 if (core(SIGSEGV, 0) == 0)
2702 2763 code = CLD_DUMPED;
2703 2764 if (audit_active == C2AUDIT_LOADED)
2704 2765 audit_core_finish(code);
2705 2766 exit(code, SIGSEGV);
2706 2767 }
2707 2768 }
2708 2769
2709 2770 /*
2710 2771 * Arrange for the real time profiling signal to be dispatched.
2711 2772 */
2712 2773 void
2713 2774 realsigprof(int sysnum, int nsysarg, int error)
2714 2775 {
2715 2776 kthread_t *t = curthread;
2716 2777 proc_t *p = ttoproc(t);
2717 2778
2718 2779 if (t->t_rprof->rp_anystate == 0)
2719 2780 return;
2720 2781
2721 2782 schedctl_finish_sigblock(t);
2722 2783
2723 2784 /* test for any activity that requires p->p_lock */
2724 2785 if (tracing(p, SIGPROF) || pr_watch_active(p) ||
2725 2786 sigismember(&PTOU(p)->u_sigresethand, SIGPROF)) {
2726 2787 /* do it the classic slow way */
2727 2788 realsigprof_slow(sysnum, nsysarg, error);
2728 2789 } else {
2729 2790 /* do it the cheating-a-little fast way */
2730 2791 realsigprof_fast(sysnum, nsysarg, error);
2731 2792 }
2732 2793 }
2733 2794
2734 2795 #ifdef _SYSCALL32_IMPL
2735 2796
2736 2797 /*
2737 2798 * It's tricky to transmit a sigval between 32-bit and 64-bit
2738 2799 * process, since in the 64-bit world, a pointer and an integer
2739 2800 * are different sizes. Since we're constrained by the standards
2740 2801 * world not to change the types, and it's unclear how useful it is
2741 2802 * to send pointers between address spaces this way, we preserve
2742 2803 * the 'int' interpretation for 32-bit processes interoperating
2743 2804 * with 64-bit processes. The full semantics (pointers or integers)
2744 2805 * are available for N-bit processes interoperating with N-bit
2745 2806 * processes.
2746 2807 */
2747 2808 void
2748 2809 siginfo_kto32(const k_siginfo_t *src, siginfo32_t *dest)
2749 2810 {
2750 2811 bzero(dest, sizeof (*dest));
2751 2812
2752 2813 /*
2753 2814 * The absolute minimum content is si_signo and si_code.
2754 2815 */
2755 2816 dest->si_signo = src->si_signo;
2756 2817 if ((dest->si_code = src->si_code) == SI_NOINFO)
2757 2818 return;
2758 2819
2759 2820 /*
2760 2821 * A siginfo generated by user level is structured
2761 2822 * differently from one generated by the kernel.
2762 2823 */
2763 2824 if (SI_FROMUSER(src)) {
2764 2825 dest->si_pid = src->si_pid;
2765 2826 dest->si_ctid = src->si_ctid;
2766 2827 dest->si_zoneid = src->si_zoneid;
2767 2828 dest->si_uid = src->si_uid;
2768 2829 if (SI_CANQUEUE(src->si_code))
2769 2830 dest->si_value.sival_int =
2770 2831 (int32_t)src->si_value.sival_int;
2771 2832 return;
2772 2833 }
2773 2834
2774 2835 dest->si_errno = src->si_errno;
2775 2836
2776 2837 switch (src->si_signo) {
2777 2838 default:
2778 2839 dest->si_pid = src->si_pid;
2779 2840 dest->si_ctid = src->si_ctid;
2780 2841 dest->si_zoneid = src->si_zoneid;
2781 2842 dest->si_uid = src->si_uid;
2782 2843 dest->si_value.sival_int = (int32_t)src->si_value.sival_int;
2783 2844 break;
2784 2845 case SIGCLD:
2785 2846 dest->si_pid = src->si_pid;
2786 2847 dest->si_ctid = src->si_ctid;
2787 2848 dest->si_zoneid = src->si_zoneid;
2788 2849 dest->si_status = src->si_status;
2789 2850 dest->si_stime = src->si_stime;
2790 2851 dest->si_utime = src->si_utime;
2791 2852 break;
2792 2853 case SIGSEGV:
2793 2854 case SIGBUS:
2794 2855 case SIGILL:
2795 2856 case SIGTRAP:
2796 2857 case SIGFPE:
2797 2858 case SIGEMT:
2798 2859 dest->si_addr = (caddr32_t)(uintptr_t)src->si_addr;
2799 2860 dest->si_trapno = src->si_trapno;
2800 2861 dest->si_pc = (caddr32_t)(uintptr_t)src->si_pc;
2801 2862 break;
2802 2863 case SIGPOLL:
2803 2864 case SIGXFSZ:
2804 2865 dest->si_fd = src->si_fd;
2805 2866 dest->si_band = src->si_band;
2806 2867 break;
2807 2868 case SIGPROF:
2808 2869 dest->si_faddr = (caddr32_t)(uintptr_t)src->si_faddr;
2809 2870 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec;
2810 2871 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec;
2811 2872 dest->si_syscall = src->si_syscall;
2812 2873 dest->si_nsysarg = src->si_nsysarg;
2813 2874 dest->si_fault = src->si_fault;
2814 2875 break;
2815 2876 }
2816 2877 }
2817 2878
2818 2879 void
2819 2880 siginfo_32tok(const siginfo32_t *src, k_siginfo_t *dest)
2820 2881 {
2821 2882 bzero(dest, sizeof (*dest));
2822 2883
2823 2884 /*
2824 2885 * The absolute minimum content is si_signo and si_code.
2825 2886 */
2826 2887 dest->si_signo = src->si_signo;
2827 2888 if ((dest->si_code = src->si_code) == SI_NOINFO)
2828 2889 return;
2829 2890
2830 2891 /*
2831 2892 * A siginfo generated by user level is structured
2832 2893 * differently from one generated by the kernel.
2833 2894 */
2834 2895 if (SI_FROMUSER(src)) {
2835 2896 dest->si_pid = src->si_pid;
2836 2897 dest->si_ctid = src->si_ctid;
2837 2898 dest->si_zoneid = src->si_zoneid;
2838 2899 dest->si_uid = src->si_uid;
2839 2900 if (SI_CANQUEUE(src->si_code))
2840 2901 dest->si_value.sival_int =
2841 2902 (int)src->si_value.sival_int;
2842 2903 return;
2843 2904 }
2844 2905
2845 2906 dest->si_errno = src->si_errno;
2846 2907
2847 2908 switch (src->si_signo) {
2848 2909 default:
2849 2910 dest->si_pid = src->si_pid;
2850 2911 dest->si_ctid = src->si_ctid;
2851 2912 dest->si_zoneid = src->si_zoneid;
2852 2913 dest->si_uid = src->si_uid;
2853 2914 dest->si_value.sival_int = (int)src->si_value.sival_int;
2854 2915 break;
2855 2916 case SIGCLD:
2856 2917 dest->si_pid = src->si_pid;
2857 2918 dest->si_ctid = src->si_ctid;
2858 2919 dest->si_zoneid = src->si_zoneid;
2859 2920 dest->si_status = src->si_status;
2860 2921 dest->si_stime = src->si_stime;
2861 2922 dest->si_utime = src->si_utime;
2862 2923 break;
2863 2924 case SIGSEGV:
2864 2925 case SIGBUS:
2865 2926 case SIGILL:
2866 2927 case SIGTRAP:
2867 2928 case SIGFPE:
2868 2929 case SIGEMT:
2869 2930 dest->si_addr = (void *)(uintptr_t)src->si_addr;
2870 2931 dest->si_trapno = src->si_trapno;
2871 2932 dest->si_pc = (void *)(uintptr_t)src->si_pc;
2872 2933 break;
2873 2934 case SIGPOLL:
2874 2935 case SIGXFSZ:
2875 2936 dest->si_fd = src->si_fd;
2876 2937 dest->si_band = src->si_band;
2877 2938 break;
2878 2939 case SIGPROF:
2879 2940 dest->si_faddr = (void *)(uintptr_t)src->si_faddr;
2880 2941 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec;
2881 2942 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec;
2882 2943 dest->si_syscall = src->si_syscall;
2883 2944 dest->si_nsysarg = src->si_nsysarg;
2884 2945 dest->si_fault = src->si_fault;
2885 2946 break;
2886 2947 }
2887 2948 }
2888 2949
2889 2950 #endif /* _SYSCALL32_IMPL */
|
↓ open down ↓ |
735 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX