5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 */
27
28 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
29 /* All Rights Reserved */
30
31 #include <sys/param.h>
32 #include <sys/types.h>
33 #include <sys/bitmap.h>
34 #include <sys/sysmacros.h>
35 #include <sys/systm.h>
36 #include <sys/cred.h>
37 #include <sys/user.h>
38 #include <sys/errno.h>
39 #include <sys/proc.h>
40 #include <sys/poll_impl.h> /* only needed for kludge in sigwaiting_send() */
41 #include <sys/signal.h>
42 #include <sys/siginfo.h>
43 #include <sys/fault.h>
44 #include <sys/ucontext.h>
45 #include <sys/procfs.h>
46 #include <sys/wait.h>
47 #include <sys/class.h>
48 #include <sys/mman.h>
49 #include <sys/procset.h>
50 #include <sys/kmem.h>
51 #include <sys/cpuvar.h>
52 #include <sys/prsystm.h>
53 #include <sys/debug.h>
54 #include <vm/as.h>
55 #include <sys/bitmap.h>
56 #include <c2/audit.h>
57 #include <sys/core.h>
58 #include <sys/schedctl.h>
59 #include <sys/contract/process_impl.h>
60 #include <sys/cyclic.h>
61 #include <sys/dtrace.h>
62 #include <sys/sdt.h>
63 #include <sys/signalfd.h>
64
65 const k_sigset_t nullsmask = {0, 0, 0};
66
67 const k_sigset_t fillset = /* MUST be contiguous */
68 {FILLSET0, FILLSET1, FILLSET2};
69
70 const k_sigset_t cantmask =
71 {CANTMASK0, CANTMASK1, CANTMASK2};
72
73 const k_sigset_t cantreset =
74 {(sigmask(SIGILL)|sigmask(SIGTRAP)|sigmask(SIGPWR)), 0, 0};
75
76 const k_sigset_t ignoredefault =
77 {(sigmask(SIGCONT)|sigmask(SIGCLD)|sigmask(SIGPWR)
78 |sigmask(SIGWINCH)|sigmask(SIGURG)|sigmask(SIGWAITING)),
79 (sigmask(SIGLWP)|sigmask(SIGCANCEL)|sigmask(SIGFREEZE)
80 |sigmask(SIGTHAW)|sigmask(SIGXRES)|sigmask(SIGJVM1)
81 |sigmask(SIGJVM2)|sigmask(SIGINFO)), 0};
82
83 const k_sigset_t stopdefault =
131 * Send the specified signal to the specified thread.
132 */
133 void
134 tsignal(kthread_t *t, int sig)
135 {
136 proc_t *p = ttoproc(t);
137
138 mutex_enter(&p->p_lock);
139 sigtoproc(p, t, sig);
140 mutex_exit(&p->p_lock);
141 }
142
143 int
144 signal_is_blocked(kthread_t *t, int sig)
145 {
146 return (sigismember(&t->t_hold, sig) ||
147 (schedctl_sigblock(t) && !sigismember(&cantmask, sig)));
148 }
149
150 /*
151 * Return true if the signal can safely be discarded on generation.
152 * That is, if there is no need for the signal on the receiving end.
153 * The answer is true if the process is a zombie or
154 * if all of these conditions are true:
155 * the signal is being ignored
156 * the process is single-threaded
157 * the signal is not being traced by /proc
158 * the signal is not blocked by the process
159 * the signal is not being accepted via sigwait()
160 */
161 static int
162 sig_discardable(proc_t *p, int sig)
163 {
164 kthread_t *t = p->p_tlist;
165
166 return (t == NULL || /* if zombie or ... */
167 (sigismember(&p->p_ignore, sig) && /* signal is ignored */
168 t->t_forw == t && /* and single-threaded */
169 !tracing(p, sig) && /* and no /proc tracing */
170 !signal_is_blocked(t, sig) && /* and signal not blocked */
171 !sigismember(&t->t_sigwait, sig))); /* and not being accepted */
172 }
173
174 /*
175 * Return true if this thread is going to eat this signal soon.
176 * Note that, if the signal is SIGKILL, we force stopped threads to be
177 * set running (to make SIGKILL be a sure kill), but only if the process
178 * is not currently locked by /proc (the P_PR_LOCK flag). Code in /proc
179 * relies on the fact that a process will not change shape while P_PR_LOCK
180 * is set (it drops and reacquires p->p_lock while leaving P_PR_LOCK set).
181 * We wish that we could simply call prbarrier() below, in sigtoproc(), to
182 * ensure that the process is not locked by /proc, but prbarrier() drops
183 * and reacquires p->p_lock and dropping p->p_lock here would be damaging.
184 */
185 int
186 eat_signal(kthread_t *t, int sig)
187 {
188 int rval = 0;
189 ASSERT(THREAD_LOCK_HELD(t));
190
191 /*
192 * Do not do anything if the target thread has the signal blocked.
193 */
194 if (!signal_is_blocked(t, sig)) {
195 t->t_sig_check = 1; /* have thread do an issig */
196 if (ISWAKEABLE(t) || ISWAITING(t)) {
197 setrun_locked(t);
198 rval = 1;
199 } else if (t->t_state == TS_STOPPED && sig == SIGKILL &&
200 !(ttoproc(t)->p_proc_flag & P_PR_LOCK)) {
201 ttoproc(t)->p_stopsig = 0;
202 t->t_dtrace_stop = 0;
203 t->t_schedflag |= TS_XSTART | TS_PSTART;
204 setrun_locked(t);
205 } else if (t != curthread && t->t_state == TS_ONPROC) {
206 aston(t); /* make it do issig promptly */
207 if (t->t_cpu != CPU)
208 poke_cpu(t->t_cpu->cpu_id);
209 rval = 1;
210 } else if (t->t_state == TS_RUN) {
211 rval = 1;
212 }
213 }
214
215 return (rval);
216 }
217
218 /*
219 * Post a signal.
220 * If a non-null thread pointer is passed, then post the signal
221 * to the thread/lwp, otherwise post the signal to the process.
222 */
223 void
280 * This test has a race condition which we can't fix:
281 * By the time the stopping signal is received by
282 * the target process/thread, the signal handler
283 * and/or the detached state might have changed.
284 */
285 if (PTOU(p)->u_signal[sig-1] == SIG_DFL &&
286 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned))
287 p->p_flag &= ~SSCONT;
288 sigdelq(p, NULL, SIGCONT);
289 sigdelset(&p->p_sig, SIGCONT);
290 sigdelset(&p->p_extsig, SIGCONT);
291 if ((tt = p->p_tlist) != NULL) {
292 do {
293 sigdelq(p, tt, SIGCONT);
294 sigdelset(&tt->t_sig, SIGCONT);
295 sigdelset(&tt->t_extsig, SIGCONT);
296 } while ((tt = tt->t_forw) != p->p_tlist);
297 }
298 }
299
300 if (sig_discardable(p, sig)) {
301 DTRACE_PROC3(signal__discard, kthread_t *, p->p_tlist,
302 proc_t *, p, int, sig);
303 return;
304 }
305
306 if (t != NULL) {
307 /*
308 * This is a directed signal, wake up the lwp.
309 */
310 sigaddset(&t->t_sig, sig);
311 if (ext)
312 sigaddset(&t->t_extsig, sig);
313 thread_lock(t);
314 (void) eat_signal(t, sig);
315 thread_unlock(t);
316 DTRACE_PROC2(signal__send, kthread_t *, t, int, sig);
317 if (p->p_sigfd != NULL && ((sigfd_proc_state_t *)
318 (p->p_sigfd))->sigfd_pollwake_cb != NULL)
319 (*((sigfd_proc_state_t *)(p->p_sigfd))->
320 sigfd_pollwake_cb)(p, sig);
480 return (1);
481
482 if (p->p_flag & SVFWAIT)
483 return (0);
484 set = p->p_sig;
485 sigorset(&set, &t->t_sig);
486 if (schedctl_sigblock(t)) /* all blockable signals blocked */
487 sigandset(&set, &cantmask);
488 else
489 sigdiffset(&set, &t->t_hold);
490 if (p->p_flag & SVFORK)
491 sigdiffset(&set, &holdvfork);
492
493 if (!sigisempty(&set)) {
494 int sig;
495
496 for (sig = 1; sig < NSIG; sig++) {
497 if (sigismember(&set, sig) &&
498 (tracing(p, sig) ||
499 sigismember(&t->t_sigwait, sig) ||
500 !sigismember(&p->p_ignore, sig))) {
501 /*
502 * Don't promote a signal that will stop
503 * the process when lwp_nostop is set.
504 */
505 if (!lwp->lwp_nostop ||
506 PTOU(p)->u_signal[sig-1] != SIG_DFL ||
507 !sigismember(&stopdefault, sig))
508 return (1);
509 }
510 }
511 }
512
513 return (0);
514 }
515
516 static int
517 issig_forreal(void)
518 {
519 int sig = 0, ext = 0;
520 kthread_t *t = curthread;
606 if ((t->t_proc_flag & TP_CHKPT) && !lwp->lwp_nostop) {
607 stop(PR_CHECKPOINT, 0);
608 continue;
609 }
610
611 /*
612 * Honor SHOLDFORK1, SHOLDWATCH, and TP_HOLDLWP before dealing
613 * with signals or /proc. Another lwp is executing fork1(),
614 * or is undergoing watchpoint activity (remapping a page),
615 * or is executing lwp_suspend() on this lwp.
616 * Again, go back to top of loop to check if an exit
617 * or hold event has occurred while stopped.
618 */
619 if (((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) ||
620 (t->t_proc_flag & TP_HOLDLWP)) && !lwp->lwp_nostop) {
621 stop(PR_SUSPENDED, SUSPEND_NORMAL);
622 continue;
623 }
624
625 /*
626 * Honor requested stop before dealing with the
627 * current signal; a debugger may change it.
628 * Do not want to go back to loop here since this is a special
629 * stop that means: make incremental progress before the next
630 * stop. The danger is that returning to top of loop would most
631 * likely drop the thread right back here to stop soon after it
632 * was continued, violating the incremental progress request.
633 */
634 if ((t->t_proc_flag & TP_PRSTOP) && !lwp->lwp_nostop)
635 stop(PR_REQUESTED, 0);
636
637 /*
638 * If a debugger wants us to take a signal it will have
639 * left it in lwp->lwp_cursig. If lwp_cursig has been cleared
640 * or if it's being ignored, we continue on looking for another
641 * signal. Otherwise we return the specified signal, provided
642 * it's not a signal that causes a job control stop.
643 *
644 * When stopped on PR_JOBCONTROL, there is no current
645 * signal; we cancel lwp->lwp_cursig temporarily before
646 * calling isjobstop(). The current signal may be reset
647 * by a debugger while we are stopped in isjobstop().
648 *
649 * If the current thread is accepting the signal
650 * (via sigwait(), sigwaitinfo(), or sigtimedwait()),
651 * we allow the signal to be accepted, even if it is
652 * being ignored, and without causing a job control stop.
653 */
654 if ((sig = lwp->lwp_cursig) != 0) {
655 ext = lwp->lwp_extsig;
656 lwp->lwp_cursig = 0;
657 lwp->lwp_extsig = 0;
658 if (sigismember(&t->t_sigwait, sig) ||
659 (!sigismember(&p->p_ignore, sig) &&
660 !isjobstop(sig))) {
661 if (p->p_flag & (SEXITLWPS|SKILLED)) {
662 sig = SIGKILL;
663 ext = (p->p_flag & SEXTKILLED) != 0;
664 }
665 lwp->lwp_cursig = (uchar_t)sig;
666 lwp->lwp_extsig = (uchar_t)ext;
667 break;
668 }
669 /*
670 * The signal is being ignored or it caused a
671 * job-control stop. If another current signal
672 * has not been established, return the current
673 * siginfo, if any, to the memory manager.
674 */
675 if (lwp->lwp_cursig == 0 && lwp->lwp_curinfo != NULL) {
676 siginfofree(lwp->lwp_curinfo);
677 lwp->lwp_curinfo = NULL;
678 }
679 /*
691 * Some lwp in the process has already stopped
692 * showing PR_JOBCONTROL. This is a stop in
693 * sympathy with the other lwp, even if this
694 * lwp is blocking the stopping signal.
695 */
696 stop(PR_JOBCONTROL, p->p_stopsig);
697 continue;
698 }
699
700 /*
701 * Loop on the pending signals until we find a
702 * non-held signal that is traced or not ignored.
703 * First check the signals pending for the lwp,
704 * then the signals pending for the process as a whole.
705 */
706 for (;;) {
707 if ((sig = fsig(&t->t_sig, t)) != 0) {
708 toproc = 0;
709 if (tracing(p, sig) ||
710 sigismember(&t->t_sigwait, sig) ||
711 !sigismember(&p->p_ignore, sig)) {
712 if (sigismember(&t->t_extsig, sig))
713 ext = 1;
714 break;
715 }
716 sigdelset(&t->t_sig, sig);
717 sigdelset(&t->t_extsig, sig);
718 sigdelq(p, t, sig);
719 } else if ((sig = fsig(&p->p_sig, t)) != 0) {
720 if (sig == SIGCLD)
721 sigcld_found = 1;
722 toproc = 1;
723 if (tracing(p, sig) ||
724 sigismember(&t->t_sigwait, sig) ||
725 !sigismember(&p->p_ignore, sig)) {
726 if (sigismember(&p->p_extsig, sig))
727 ext = 1;
728 break;
729 }
730 sigdelset(&p->p_sig, sig);
731 sigdelset(&p->p_extsig, sig);
732 sigdelq(p, NULL, sig);
733 } else {
734 /* no signal was found */
735 break;
736 }
737 }
738
739 if (sig == 0) { /* no signal was found */
740 if (p->p_flag & (SEXITLWPS|SKILLED)) {
741 lwp->lwp_cursig = SIGKILL;
742 sig = SIGKILL;
743 ext = (p->p_flag & SEXTKILLED) != 0;
744 }
745 break;
937 } else {
938 if (!((t->t_proc_flag & TP_HOLDLWP) ||
939 (p->p_flag & (SHOLDFORK|SHOLDFORK1|SHOLDWATCH)))) {
940 t->t_proc_flag &= ~TP_STOPPING;
941 return;
942 }
943 /*
944 * If SHOLDFORK is in effect and we are stopping
945 * while asleep (not at the top of the stack),
946 * we return now to allow the hold to take effect
947 * when we reach the top of the kernel stack.
948 */
949 if (lwp->lwp_asleep && (p->p_flag & SHOLDFORK)) {
950 t->t_proc_flag &= ~TP_STOPPING;
951 return;
952 }
953 flags &= ~TS_CSTART;
954 }
955 break;
956
957 default: /* /proc stop */
958 flags &= ~TS_PSTART;
959 /*
960 * Do synchronous stop unless the async-stop flag is set.
961 * If why is PR_REQUESTED and t->t_dtrace_stop flag is set,
962 * then no debugger is present and we also do synchronous stop.
963 */
964 if ((why != PR_REQUESTED || t->t_dtrace_stop) &&
965 !(p->p_proc_flag & P_PR_ASYNC)) {
966 int notify;
967
968 for (tx = t->t_forw; tx != t; tx = tx->t_forw) {
969 notify = 0;
970 thread_lock(tx);
971 if (ISTOPPED(tx) ||
972 (tx->t_proc_flag & TP_PRSTOP)) {
973 thread_unlock(tx);
974 continue;
975 }
976 tx->t_proc_flag |= TP_PRSTOP;
1048 */
1049 mutex_enter(&p->p_lock);
1050 mutex_exit(&pidlock);
1051 p->p_stopsig = 0;
1052 } else if (why == PR_JOBCONTROL && p->p_stopsig == 0) {
1053 /*
1054 * Set p->p_stopsig and wake up sleeping lwps
1055 * so they will stop in sympathy with this lwp.
1056 */
1057 p->p_stopsig = (uchar_t)what;
1058 pokelwps(p);
1059 /*
1060 * We do this just in case one of the threads we asked
1061 * to stop is in holdlwps() (called from cfork()) or
1062 * lwp_suspend().
1063 */
1064 cv_broadcast(&p->p_holdlwps);
1065 }
1066 }
1067
1068 if (why != PR_JOBCONTROL && why != PR_CHECKPOINT) {
1069 /*
1070 * Do process-level notification when all lwps are
1071 * either stopped on events of interest to /proc
1072 * or are stopped showing PR_SUSPENDED or are zombies.
1073 */
1074 procstop = 1;
1075 for (tx = t->t_forw; procstop && tx != t; tx = tx->t_forw) {
1076 if (VSTOPPED(tx))
1077 continue;
1078 thread_lock(tx);
1079 switch (tx->t_state) {
1080 case TS_ZOMB:
1081 break;
1082 case TS_STOPPED:
1083 /* neither ISTOPPED nor SUSPENDED? */
1084 if ((tx->t_schedflag &
1085 (TS_CSTART | TS_UNPAUSE | TS_PSTART)) ==
1086 (TS_CSTART | TS_UNPAUSE | TS_PSTART))
1087 procstop = 0;
1088 break;
1154 * because checks for TP_PAUSE take precedence over checks for
1155 * SHOLDWATCH. If a thread is trying to stop because of
1156 * SUSPEND_PAUSE and tries to do a holdwatch(), it will be
1157 * waiting for the rest of the threads to enter a stopped state.
1158 * If we are stopping for a SUSPEND_PAUSE, we may be the last
1159 * lwp and not know it, so broadcast just in case.
1160 */
1161 if (what == SUSPEND_PAUSE ||
1162 --p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP))
1163 cv_broadcast(&p->p_holdlwps);
1164
1165 }
1166
1167 /*
1168 * Need to do this here (rather than after the thread is officially
1169 * stopped) because we can't call mutex_enter from a stopped thread.
1170 */
1171 if (why == PR_CHECKPOINT)
1172 del_one_utstop();
1173
1174 thread_lock(t);
1175 ASSERT((t->t_schedflag & TS_ALLSTART) == 0);
1176 t->t_schedflag |= flags;
1177 t->t_whystop = (short)why;
1178 t->t_whatstop = (short)what;
1179 CL_STOP(t, why, what);
1180 (void) new_mstate(t, LMS_STOPPED);
1181 thread_stop(t); /* set stop state and drop lock */
1182
1183 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) {
1184 /*
1185 * We may have gotten a SIGKILL or a SIGCONT when
1186 * we released p->p_lock; make one last check.
1187 * Also check for a /proc run-on-last-close.
1188 */
1189 if (sigismember(&t->t_sig, SIGKILL) ||
1190 sigismember(&p->p_sig, SIGKILL) ||
1191 (t->t_proc_flag & TP_LWPEXIT) ||
1192 (p->p_flag & (SEXITLWPS|SKILLED))) {
1193 p->p_stopsig = 0;
1194 thread_lock(t);
1195 t->t_schedflag |= TS_XSTART | TS_PSTART;
1196 setrun_locked(t);
1197 thread_unlock_nopreempt(t);
1198 } else if (why == PR_JOBCONTROL) {
1199 if (p->p_flag & SSCONT) {
1200 /*
1201 * This resulted from a SIGCONT posted
1202 * while we were not holding p->p_lock.
1203 */
1204 p->p_stopsig = 0;
1205 thread_lock(t);
1206 t->t_schedflag |= TS_XSTART;
1207 setrun_locked(t);
1208 thread_unlock_nopreempt(t);
1209 }
1210 } else if (!(t->t_proc_flag & TP_STOPPING)) {
1211 /*
1212 * This resulted from a /proc run-on-last-close.
1213 */
1214 thread_lock(t);
1215 t->t_schedflag |= TS_PSTART;
1310 */
1311 if (sig == 0) {
1312 if (lwp->lwp_curinfo) {
1313 siginfofree(lwp->lwp_curinfo);
1314 lwp->lwp_curinfo = NULL;
1315 }
1316 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */
1317 t->t_flag &= ~T_TOMASK;
1318 t->t_hold = lwp->lwp_sigoldmask;
1319 }
1320 mutex_exit(&p->p_lock);
1321 return;
1322 }
1323 func = PTOU(curproc)->u_signal[sig-1];
1324
1325 /*
1326 * The signal disposition could have changed since we promoted
1327 * this signal from pending to current (we dropped p->p_lock).
1328 * This can happen only in a multi-threaded process.
1329 */
1330 if (sigismember(&p->p_ignore, sig) ||
1331 (func == SIG_DFL && sigismember(&stopdefault, sig))) {
1332 lwp->lwp_cursig = 0;
1333 lwp->lwp_extsig = 0;
1334 if (lwp->lwp_curinfo) {
1335 siginfofree(lwp->lwp_curinfo);
1336 lwp->lwp_curinfo = NULL;
1337 }
1338 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */
1339 t->t_flag &= ~T_TOMASK;
1340 t->t_hold = lwp->lwp_sigoldmask;
1341 }
1342 mutex_exit(&p->p_lock);
1343 return;
1344 }
1345
1346 /*
1347 * We check lwp_curinfo first since pr_setsig can actually
1348 * stuff a sigqueue_t there for SIGKILL.
1349 */
1350 if (lwp->lwp_curinfo) {
1754 proc_t *pp = cp->p_parent;
1755 k_siginfo_t info;
1756
1757 ASSERT(MUTEX_HELD(&pidlock));
1758 mutex_enter(&pp->p_lock);
1759
1760 /*
1761 * If a SIGCLD is pending, then just mark the child process
1762 * so that its SIGCLD will be posted later, when the first
1763 * SIGCLD is taken off the queue or when the parent is ready
1764 * to receive it or accept it, if ever.
1765 */
1766 if (sigismember(&pp->p_sig, SIGCLD)) {
1767 cp->p_pidflag |= CLDPEND;
1768 } else {
1769 cp->p_pidflag &= ~CLDPEND;
1770 if (sqp == NULL) {
1771 /*
1772 * This can only happen when the parent is init.
1773 * (See call to sigcld(q, NULL) in exit().)
1774 * Use KM_NOSLEEP to avoid deadlock.
1775 */
1776 ASSERT(pp == proc_init);
1777 winfo(cp, &info, 0);
1778 sigaddq(pp, NULL, &info, KM_NOSLEEP);
1779 } else {
1780 winfo(cp, &sqp->sq_info, 0);
1781 sigaddqa(pp, NULL, sqp);
1782 sqp = NULL;
1783 }
1784 }
1785
1786 mutex_exit(&pp->p_lock);
1787
1788 if (sqp)
1789 siginfofree(sqp);
1790 }
1791
1792 /*
1793 * Search for a child that has a pending SIGCLD for us, the parent.
1794 * The queue of SIGCLD signals is implied by the list of children.
1795 * We post the SIGCLD signals one at a time so they don't get lost.
1796 * When one is dequeued, another is enqueued, until there are no more.
1797 */
1798 void
1799 sigcld_repost()
1800 {
1801 proc_t *pp = curproc;
1802 proc_t *cp;
1803 sigqueue_t *sqp;
1804
1805 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
1806 mutex_enter(&pidlock);
1807 for (cp = pp->p_child; cp; cp = cp->p_sibling) {
1808 if (cp->p_pidflag & CLDPEND) {
1809 post_sigcld(cp, sqp);
1810 mutex_exit(&pidlock);
1811 return;
1812 }
1813 }
1814 mutex_exit(&pidlock);
1815 kmem_free(sqp, sizeof (sigqueue_t));
1816 }
1817
1818 /*
1819 * count number of sigqueue send by sigaddqa()
1820 */
1821 void
1822 sigqsend(int cmd, proc_t *p, kthread_t *t, sigqueue_t *sigqp)
1823 {
1824 sigqhdr_t *sqh;
1825
1826 sqh = (sigqhdr_t *)sigqp->sq_backptr;
2098 }
2099 }
2100 }
2101 *psqp = sigqp;
2102 sigqp->sq_next = NULL;
2103 }
2104
2105 /*
2106 * The function sigaddqa() is called with sigqueue already allocated.
2107 * If signal is ignored, discard but guarantee KILL and generation semantics.
2108 * It is called from sigqueue() and other places.
2109 */
2110 void
2111 sigaddqa(proc_t *p, kthread_t *t, sigqueue_t *sigqp)
2112 {
2113 int sig = sigqp->sq_info.si_signo;
2114
2115 ASSERT(MUTEX_HELD(&p->p_lock));
2116 ASSERT(sig >= 1 && sig < NSIG);
2117
2118 if (sig_discardable(p, sig))
2119 siginfofree(sigqp);
2120 else
2121 sigaddqins(p, t, sigqp);
2122
2123 sigtoproc(p, t, sig);
2124 }
2125
2126 /*
2127 * Allocate the sigqueue_t structure and call sigaddqins().
2128 */
2129 void
2130 sigaddq(proc_t *p, kthread_t *t, k_siginfo_t *infop, int km_flags)
2131 {
2132 sigqueue_t *sqp;
2133 int sig = infop->si_signo;
2134
2135 ASSERT(MUTEX_HELD(&p->p_lock));
2136 ASSERT(sig >= 1 && sig < NSIG);
2137
2138 /*
2139 * If the signal will be discarded by sigtoproc() or
2140 * if the process isn't requesting siginfo and it isn't
2141 * blocking the signal (it *could* change it's mind while
2142 * the signal is pending) then don't bother creating one.
2143 */
2144 if (!sig_discardable(p, sig) &&
2145 (sigismember(&p->p_siginfo, sig) ||
2146 (curproc->p_ct_process != p->p_ct_process) ||
2147 (sig == SIGCLD && SI_FROMKERNEL(infop))) &&
2148 ((sqp = kmem_alloc(sizeof (sigqueue_t), km_flags)) != NULL)) {
2149 bcopy(infop, &sqp->sq_info, sizeof (k_siginfo_t));
2150 sqp->sq_func = NULL;
2151 sqp->sq_next = NULL;
2152 sigaddqins(p, t, sqp);
2153 }
2154 sigtoproc(p, t, sig);
2155 }
2156
2157 /*
2158 * Handle stop-on-fault processing for the debugger. Returns 0
2159 * if the fault is cleared during the stop, nonzero if it isn't.
2160 */
2161 int
2162 stop_on_fault(uint_t fault, k_siginfo_t *sip)
2163 {
2164 proc_t *p = ttoproc(curthread);
|
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2015, Joyent, Inc.
26 */
27
28 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
29 /* All Rights Reserved */
30
31 #include <sys/param.h>
32 #include <sys/types.h>
33 #include <sys/bitmap.h>
34 #include <sys/sysmacros.h>
35 #include <sys/systm.h>
36 #include <sys/cred.h>
37 #include <sys/user.h>
38 #include <sys/errno.h>
39 #include <sys/proc.h>
40 #include <sys/poll_impl.h> /* only needed for kludge in sigwaiting_send() */
41 #include <sys/signal.h>
42 #include <sys/siginfo.h>
43 #include <sys/fault.h>
44 #include <sys/ucontext.h>
45 #include <sys/procfs.h>
46 #include <sys/wait.h>
47 #include <sys/class.h>
48 #include <sys/mman.h>
49 #include <sys/procset.h>
50 #include <sys/kmem.h>
51 #include <sys/cpuvar.h>
52 #include <sys/prsystm.h>
53 #include <sys/debug.h>
54 #include <vm/as.h>
55 #include <sys/bitmap.h>
56 #include <c2/audit.h>
57 #include <sys/core.h>
58 #include <sys/schedctl.h>
59 #include <sys/contract/process_impl.h>
60 #include <sys/cyclic.h>
61 #include <sys/dtrace.h>
62 #include <sys/sdt.h>
63 #include <sys/signalfd.h>
64 #include <sys/brand.h>
65
66 const k_sigset_t nullsmask = {0, 0, 0};
67
68 const k_sigset_t fillset = /* MUST be contiguous */
69 {FILLSET0, FILLSET1, FILLSET2};
70
71 const k_sigset_t cantmask =
72 {CANTMASK0, CANTMASK1, CANTMASK2};
73
74 const k_sigset_t cantreset =
75 {(sigmask(SIGILL)|sigmask(SIGTRAP)|sigmask(SIGPWR)), 0, 0};
76
77 const k_sigset_t ignoredefault =
78 {(sigmask(SIGCONT)|sigmask(SIGCLD)|sigmask(SIGPWR)
79 |sigmask(SIGWINCH)|sigmask(SIGURG)|sigmask(SIGWAITING)),
80 (sigmask(SIGLWP)|sigmask(SIGCANCEL)|sigmask(SIGFREEZE)
81 |sigmask(SIGTHAW)|sigmask(SIGXRES)|sigmask(SIGJVM1)
82 |sigmask(SIGJVM2)|sigmask(SIGINFO)), 0};
83
84 const k_sigset_t stopdefault =
132 * Send the specified signal to the specified thread.
133 */
134 void
135 tsignal(kthread_t *t, int sig)
136 {
137 proc_t *p = ttoproc(t);
138
139 mutex_enter(&p->p_lock);
140 sigtoproc(p, t, sig);
141 mutex_exit(&p->p_lock);
142 }
143
144 int
145 signal_is_blocked(kthread_t *t, int sig)
146 {
147 return (sigismember(&t->t_hold, sig) ||
148 (schedctl_sigblock(t) && !sigismember(&cantmask, sig)));
149 }
150
151 /*
152 * Return true if the signal can safely be ignored.
153 * That is, if the signal is included in the p_ignore mask and doing so is not
154 * forbidden by any process branding.
155 */
156 static int
157 sig_ignorable(proc_t *p, klwp_t *lwp, int sig)
158 {
159 return (sigismember(&p->p_ignore, sig) && /* sig in ignore mask */
160 !(PROC_IS_BRANDED(p) && /* allowed by brand */
161 BROP(p)->b_sig_ignorable != NULL &&
162 BROP(p)->b_sig_ignorable(p, lwp, sig) == B_FALSE));
163
164 }
165
166 /*
167 * Return true if the signal can safely be discarded on generation.
168 * That is, if there is no need for the signal on the receiving end.
169 * The answer is true if the process is a zombie or
170 * if all of these conditions are true:
171 * the signal is being ignored
172 * the process is single-threaded
173 * the signal is not being traced by /proc
174 * the signal is not blocked by the process
175 * the signal is not being accepted via sigwait()
176 */
177 static int
178 sig_discardable(proc_t *p, kthread_t *tp, int sig)
179 {
180 kthread_t *t = p->p_tlist;
181 klwp_t *lwp = (tp == NULL) ? NULL : tp->t_lwp;
182
183 return (t == NULL || /* if zombie or ... */
184 (sig_ignorable(p, lwp, sig) && /* signal is ignored */
185 t->t_forw == t && /* and single-threaded */
186 !tracing(p, sig) && /* and no /proc tracing */
187 !signal_is_blocked(t, sig) && /* and signal not blocked */
188 !sigismember(&t->t_sigwait, sig))); /* and not being accepted */
189 }
190
191 /*
192 * Return true if this thread is going to eat this signal soon.
193 * Note that, if the signal is SIGKILL, we force stopped threads to be
194 * set running (to make SIGKILL be a sure kill), but only if the process
195 * is not currently locked by /proc (the P_PR_LOCK flag). Code in /proc
196 * relies on the fact that a process will not change shape while P_PR_LOCK
197 * is set (it drops and reacquires p->p_lock while leaving P_PR_LOCK set).
198 * We wish that we could simply call prbarrier() below, in sigtoproc(), to
199 * ensure that the process is not locked by /proc, but prbarrier() drops
200 * and reacquires p->p_lock and dropping p->p_lock here would be damaging.
201 */
202 int
203 eat_signal(kthread_t *t, int sig)
204 {
205 int rval = 0;
206 ASSERT(THREAD_LOCK_HELD(t));
207
208 /*
209 * Do not do anything if the target thread has the signal blocked.
210 */
211 if (!signal_is_blocked(t, sig)) {
212 t->t_sig_check = 1; /* have thread do an issig */
213 if (ISWAKEABLE(t) || ISWAITING(t)) {
214 setrun_locked(t);
215 rval = 1;
216 } else if (t->t_state == TS_STOPPED && sig == SIGKILL &&
217 !(ttoproc(t)->p_proc_flag & P_PR_LOCK)) {
218 ttoproc(t)->p_stopsig = 0;
219 t->t_dtrace_stop = 0;
220 t->t_schedflag |= TS_XSTART | TS_PSTART | TS_BSTART;
221 setrun_locked(t);
222 } else if (t != curthread && t->t_state == TS_ONPROC) {
223 aston(t); /* make it do issig promptly */
224 if (t->t_cpu != CPU)
225 poke_cpu(t->t_cpu->cpu_id);
226 rval = 1;
227 } else if (t->t_state == TS_RUN) {
228 rval = 1;
229 }
230 }
231
232 return (rval);
233 }
234
235 /*
236 * Post a signal.
237 * If a non-null thread pointer is passed, then post the signal
238 * to the thread/lwp, otherwise post the signal to the process.
239 */
240 void
297 * This test has a race condition which we can't fix:
298 * By the time the stopping signal is received by
299 * the target process/thread, the signal handler
300 * and/or the detached state might have changed.
301 */
302 if (PTOU(p)->u_signal[sig-1] == SIG_DFL &&
303 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned))
304 p->p_flag &= ~SSCONT;
305 sigdelq(p, NULL, SIGCONT);
306 sigdelset(&p->p_sig, SIGCONT);
307 sigdelset(&p->p_extsig, SIGCONT);
308 if ((tt = p->p_tlist) != NULL) {
309 do {
310 sigdelq(p, tt, SIGCONT);
311 sigdelset(&tt->t_sig, SIGCONT);
312 sigdelset(&tt->t_extsig, SIGCONT);
313 } while ((tt = tt->t_forw) != p->p_tlist);
314 }
315 }
316
317 if (sig_discardable(p, t, sig)) {
318 DTRACE_PROC3(signal__discard, kthread_t *, p->p_tlist,
319 proc_t *, p, int, sig);
320 return;
321 }
322
323 if (t != NULL) {
324 /*
325 * This is a directed signal, wake up the lwp.
326 */
327 sigaddset(&t->t_sig, sig);
328 if (ext)
329 sigaddset(&t->t_extsig, sig);
330 thread_lock(t);
331 (void) eat_signal(t, sig);
332 thread_unlock(t);
333 DTRACE_PROC2(signal__send, kthread_t *, t, int, sig);
334 if (p->p_sigfd != NULL && ((sigfd_proc_state_t *)
335 (p->p_sigfd))->sigfd_pollwake_cb != NULL)
336 (*((sigfd_proc_state_t *)(p->p_sigfd))->
337 sigfd_pollwake_cb)(p, sig);
497 return (1);
498
499 if (p->p_flag & SVFWAIT)
500 return (0);
501 set = p->p_sig;
502 sigorset(&set, &t->t_sig);
503 if (schedctl_sigblock(t)) /* all blockable signals blocked */
504 sigandset(&set, &cantmask);
505 else
506 sigdiffset(&set, &t->t_hold);
507 if (p->p_flag & SVFORK)
508 sigdiffset(&set, &holdvfork);
509
510 if (!sigisempty(&set)) {
511 int sig;
512
513 for (sig = 1; sig < NSIG; sig++) {
514 if (sigismember(&set, sig) &&
515 (tracing(p, sig) ||
516 sigismember(&t->t_sigwait, sig) ||
517 !sig_ignorable(p, lwp, sig))) {
518 /*
519 * Don't promote a signal that will stop
520 * the process when lwp_nostop is set.
521 */
522 if (!lwp->lwp_nostop ||
523 PTOU(p)->u_signal[sig-1] != SIG_DFL ||
524 !sigismember(&stopdefault, sig))
525 return (1);
526 }
527 }
528 }
529
530 return (0);
531 }
532
533 static int
534 issig_forreal(void)
535 {
536 int sig = 0, ext = 0;
537 kthread_t *t = curthread;
623 if ((t->t_proc_flag & TP_CHKPT) && !lwp->lwp_nostop) {
624 stop(PR_CHECKPOINT, 0);
625 continue;
626 }
627
628 /*
629 * Honor SHOLDFORK1, SHOLDWATCH, and TP_HOLDLWP before dealing
630 * with signals or /proc. Another lwp is executing fork1(),
631 * or is undergoing watchpoint activity (remapping a page),
632 * or is executing lwp_suspend() on this lwp.
633 * Again, go back to top of loop to check if an exit
634 * or hold event has occurred while stopped.
635 */
636 if (((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) ||
637 (t->t_proc_flag & TP_HOLDLWP)) && !lwp->lwp_nostop) {
638 stop(PR_SUSPENDED, SUSPEND_NORMAL);
639 continue;
640 }
641
642 /*
643 * Allow the brand the chance to alter (or suppress) delivery
644 * of this signal.
645 */
646 if (PROC_IS_BRANDED(p) && BROP(p)->b_issig_stop != NULL) {
647 /*
648 * The brand hook will return 0 if it would like
649 * us to drive on, or -1 if we should restart
650 * the loop to check other conditions.
651 */
652 if (BROP(p)->b_issig_stop(p, lwp) != 0) {
653 continue;
654 }
655 }
656
657 /*
658 * Honor requested stop before dealing with the
659 * current signal; a debugger may change it.
660 * Do not want to go back to loop here since this is a special
661 * stop that means: make incremental progress before the next
662 * stop. The danger is that returning to top of loop would most
663 * likely drop the thread right back here to stop soon after it
664 * was continued, violating the incremental progress request.
665 */
666 if ((t->t_proc_flag & TP_PRSTOP) && !lwp->lwp_nostop)
667 stop(PR_REQUESTED, 0);
668
669 /*
670 * If a debugger wants us to take a signal it will have
671 * left it in lwp->lwp_cursig. If lwp_cursig has been cleared
672 * or if it's being ignored, we continue on looking for another
673 * signal. Otherwise we return the specified signal, provided
674 * it's not a signal that causes a job control stop.
675 *
676 * When stopped on PR_JOBCONTROL, there is no current
677 * signal; we cancel lwp->lwp_cursig temporarily before
678 * calling isjobstop(). The current signal may be reset
679 * by a debugger while we are stopped in isjobstop().
680 *
681 * If the current thread is accepting the signal
682 * (via sigwait(), sigwaitinfo(), or sigtimedwait()),
683 * we allow the signal to be accepted, even if it is
684 * being ignored, and without causing a job control stop.
685 */
686 if ((sig = lwp->lwp_cursig) != 0) {
687 ext = lwp->lwp_extsig;
688 lwp->lwp_cursig = 0;
689 lwp->lwp_extsig = 0;
690 if (sigismember(&t->t_sigwait, sig) ||
691 (!sig_ignorable(p, lwp, sig) &&
692 !isjobstop(sig))) {
693 if (p->p_flag & (SEXITLWPS|SKILLED)) {
694 sig = SIGKILL;
695 ext = (p->p_flag & SEXTKILLED) != 0;
696 }
697 lwp->lwp_cursig = (uchar_t)sig;
698 lwp->lwp_extsig = (uchar_t)ext;
699 break;
700 }
701 /*
702 * The signal is being ignored or it caused a
703 * job-control stop. If another current signal
704 * has not been established, return the current
705 * siginfo, if any, to the memory manager.
706 */
707 if (lwp->lwp_cursig == 0 && lwp->lwp_curinfo != NULL) {
708 siginfofree(lwp->lwp_curinfo);
709 lwp->lwp_curinfo = NULL;
710 }
711 /*
723 * Some lwp in the process has already stopped
724 * showing PR_JOBCONTROL. This is a stop in
725 * sympathy with the other lwp, even if this
726 * lwp is blocking the stopping signal.
727 */
728 stop(PR_JOBCONTROL, p->p_stopsig);
729 continue;
730 }
731
732 /*
733 * Loop on the pending signals until we find a
734 * non-held signal that is traced or not ignored.
735 * First check the signals pending for the lwp,
736 * then the signals pending for the process as a whole.
737 */
738 for (;;) {
739 if ((sig = fsig(&t->t_sig, t)) != 0) {
740 toproc = 0;
741 if (tracing(p, sig) ||
742 sigismember(&t->t_sigwait, sig) ||
743 !sig_ignorable(p, lwp, sig)) {
744 if (sigismember(&t->t_extsig, sig))
745 ext = 1;
746 break;
747 }
748 sigdelset(&t->t_sig, sig);
749 sigdelset(&t->t_extsig, sig);
750 sigdelq(p, t, sig);
751 } else if ((sig = fsig(&p->p_sig, t)) != 0) {
752 if (sig == SIGCLD)
753 sigcld_found = 1;
754 toproc = 1;
755 if (tracing(p, sig) ||
756 sigismember(&t->t_sigwait, sig) ||
757 !sig_ignorable(p, lwp, sig)) {
758 if (sigismember(&p->p_extsig, sig))
759 ext = 1;
760 break;
761 }
762 sigdelset(&p->p_sig, sig);
763 sigdelset(&p->p_extsig, sig);
764 sigdelq(p, NULL, sig);
765 } else {
766 /* no signal was found */
767 break;
768 }
769 }
770
771 if (sig == 0) { /* no signal was found */
772 if (p->p_flag & (SEXITLWPS|SKILLED)) {
773 lwp->lwp_cursig = SIGKILL;
774 sig = SIGKILL;
775 ext = (p->p_flag & SEXTKILLED) != 0;
776 }
777 break;
969 } else {
970 if (!((t->t_proc_flag & TP_HOLDLWP) ||
971 (p->p_flag & (SHOLDFORK|SHOLDFORK1|SHOLDWATCH)))) {
972 t->t_proc_flag &= ~TP_STOPPING;
973 return;
974 }
975 /*
976 * If SHOLDFORK is in effect and we are stopping
977 * while asleep (not at the top of the stack),
978 * we return now to allow the hold to take effect
979 * when we reach the top of the kernel stack.
980 */
981 if (lwp->lwp_asleep && (p->p_flag & SHOLDFORK)) {
982 t->t_proc_flag &= ~TP_STOPPING;
983 return;
984 }
985 flags &= ~TS_CSTART;
986 }
987 break;
988
989 case PR_BRAND:
990 /*
991 * We have been stopped by the brand code for a brand-private
992 * reason. This is an asynchronous stop affecting only this
993 * LWP.
994 */
995 VERIFY(PROC_IS_BRANDED(p));
996 flags &= ~TS_BSTART;
997 break;
998
999 default: /* /proc stop */
1000 flags &= ~TS_PSTART;
1001 /*
1002 * Do synchronous stop unless the async-stop flag is set.
1003 * If why is PR_REQUESTED and t->t_dtrace_stop flag is set,
1004 * then no debugger is present and we also do synchronous stop.
1005 */
1006 if ((why != PR_REQUESTED || t->t_dtrace_stop) &&
1007 !(p->p_proc_flag & P_PR_ASYNC)) {
1008 int notify;
1009
1010 for (tx = t->t_forw; tx != t; tx = tx->t_forw) {
1011 notify = 0;
1012 thread_lock(tx);
1013 if (ISTOPPED(tx) ||
1014 (tx->t_proc_flag & TP_PRSTOP)) {
1015 thread_unlock(tx);
1016 continue;
1017 }
1018 tx->t_proc_flag |= TP_PRSTOP;
1090 */
1091 mutex_enter(&p->p_lock);
1092 mutex_exit(&pidlock);
1093 p->p_stopsig = 0;
1094 } else if (why == PR_JOBCONTROL && p->p_stopsig == 0) {
1095 /*
1096 * Set p->p_stopsig and wake up sleeping lwps
1097 * so they will stop in sympathy with this lwp.
1098 */
1099 p->p_stopsig = (uchar_t)what;
1100 pokelwps(p);
1101 /*
1102 * We do this just in case one of the threads we asked
1103 * to stop is in holdlwps() (called from cfork()) or
1104 * lwp_suspend().
1105 */
1106 cv_broadcast(&p->p_holdlwps);
1107 }
1108 }
1109
1110 if (why != PR_JOBCONTROL && why != PR_CHECKPOINT && why != PR_BRAND) {
1111 /*
1112 * Do process-level notification when all lwps are
1113 * either stopped on events of interest to /proc
1114 * or are stopped showing PR_SUSPENDED or are zombies.
1115 */
1116 procstop = 1;
1117 for (tx = t->t_forw; procstop && tx != t; tx = tx->t_forw) {
1118 if (VSTOPPED(tx))
1119 continue;
1120 thread_lock(tx);
1121 switch (tx->t_state) {
1122 case TS_ZOMB:
1123 break;
1124 case TS_STOPPED:
1125 /* neither ISTOPPED nor SUSPENDED? */
1126 if ((tx->t_schedflag &
1127 (TS_CSTART | TS_UNPAUSE | TS_PSTART)) ==
1128 (TS_CSTART | TS_UNPAUSE | TS_PSTART))
1129 procstop = 0;
1130 break;
1196 * because checks for TP_PAUSE take precedence over checks for
1197 * SHOLDWATCH. If a thread is trying to stop because of
1198 * SUSPEND_PAUSE and tries to do a holdwatch(), it will be
1199 * waiting for the rest of the threads to enter a stopped state.
1200 * If we are stopping for a SUSPEND_PAUSE, we may be the last
1201 * lwp and not know it, so broadcast just in case.
1202 */
1203 if (what == SUSPEND_PAUSE ||
1204 --p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP))
1205 cv_broadcast(&p->p_holdlwps);
1206
1207 }
1208
1209 /*
1210 * Need to do this here (rather than after the thread is officially
1211 * stopped) because we can't call mutex_enter from a stopped thread.
1212 */
1213 if (why == PR_CHECKPOINT)
1214 del_one_utstop();
1215
1216 /*
1217 * Allow the brand to post notification of this stop condition.
1218 */
1219 if (PROC_IS_BRANDED(p) && BROP(p)->b_stop_notify != NULL) {
1220 BROP(p)->b_stop_notify(p, lwp, why, what);
1221 }
1222
1223 thread_lock(t);
1224 ASSERT((t->t_schedflag & TS_ALLSTART) == 0);
1225 t->t_schedflag |= flags;
1226 t->t_whystop = (short)why;
1227 t->t_whatstop = (short)what;
1228 CL_STOP(t, why, what);
1229 (void) new_mstate(t, LMS_STOPPED);
1230 thread_stop(t); /* set stop state and drop lock */
1231
1232 if (why != PR_SUSPENDED && why != PR_CHECKPOINT) {
1233 /*
1234 * We may have gotten a SIGKILL or a SIGCONT when
1235 * we released p->p_lock; make one last check.
1236 * Also check for a /proc run-on-last-close.
1237 */
1238 if (sigismember(&t->t_sig, SIGKILL) ||
1239 sigismember(&p->p_sig, SIGKILL) ||
1240 (t->t_proc_flag & TP_LWPEXIT) ||
1241 (p->p_flag & (SEXITLWPS|SKILLED))) {
1242 p->p_stopsig = 0;
1243 thread_lock(t);
1244 t->t_schedflag |= TS_XSTART | TS_PSTART | TS_BSTART;
1245 setrun_locked(t);
1246 thread_unlock_nopreempt(t);
1247 } else if (why == PR_JOBCONTROL) {
1248 if (p->p_flag & SSCONT) {
1249 /*
1250 * This resulted from a SIGCONT posted
1251 * while we were not holding p->p_lock.
1252 */
1253 p->p_stopsig = 0;
1254 thread_lock(t);
1255 t->t_schedflag |= TS_XSTART;
1256 setrun_locked(t);
1257 thread_unlock_nopreempt(t);
1258 }
1259 } else if (!(t->t_proc_flag & TP_STOPPING)) {
1260 /*
1261 * This resulted from a /proc run-on-last-close.
1262 */
1263 thread_lock(t);
1264 t->t_schedflag |= TS_PSTART;
1359 */
1360 if (sig == 0) {
1361 if (lwp->lwp_curinfo) {
1362 siginfofree(lwp->lwp_curinfo);
1363 lwp->lwp_curinfo = NULL;
1364 }
1365 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */
1366 t->t_flag &= ~T_TOMASK;
1367 t->t_hold = lwp->lwp_sigoldmask;
1368 }
1369 mutex_exit(&p->p_lock);
1370 return;
1371 }
1372 func = PTOU(curproc)->u_signal[sig-1];
1373
1374 /*
1375 * The signal disposition could have changed since we promoted
1376 * this signal from pending to current (we dropped p->p_lock).
1377 * This can happen only in a multi-threaded process.
1378 */
1379 if (sig_ignorable(p, lwp, sig) ||
1380 (func == SIG_DFL && sigismember(&stopdefault, sig))) {
1381 lwp->lwp_cursig = 0;
1382 lwp->lwp_extsig = 0;
1383 if (lwp->lwp_curinfo) {
1384 siginfofree(lwp->lwp_curinfo);
1385 lwp->lwp_curinfo = NULL;
1386 }
1387 if (t->t_flag & T_TOMASK) { /* sigsuspend or pollsys */
1388 t->t_flag &= ~T_TOMASK;
1389 t->t_hold = lwp->lwp_sigoldmask;
1390 }
1391 mutex_exit(&p->p_lock);
1392 return;
1393 }
1394
1395 /*
1396 * We check lwp_curinfo first since pr_setsig can actually
1397 * stuff a sigqueue_t there for SIGKILL.
1398 */
1399 if (lwp->lwp_curinfo) {
1803 proc_t *pp = cp->p_parent;
1804 k_siginfo_t info;
1805
1806 ASSERT(MUTEX_HELD(&pidlock));
1807 mutex_enter(&pp->p_lock);
1808
1809 /*
1810 * If a SIGCLD is pending, then just mark the child process
1811 * so that its SIGCLD will be posted later, when the first
1812 * SIGCLD is taken off the queue or when the parent is ready
1813 * to receive it or accept it, if ever.
1814 */
1815 if (sigismember(&pp->p_sig, SIGCLD)) {
1816 cp->p_pidflag |= CLDPEND;
1817 } else {
1818 cp->p_pidflag &= ~CLDPEND;
1819 if (sqp == NULL) {
1820 /*
1821 * This can only happen when the parent is init.
1822 * (See call to sigcld(q, NULL) in exit().)
1823 * Use KM_NOSLEEP to avoid deadlock. The child procs
1824 * initpid can be 1 for zlogin.
1825 */
1826 ASSERT(pp->p_pidp->pid_id ==
1827 cp->p_zone->zone_proc_initpid ||
1828 pp->p_pidp->pid_id == 1);
1829 winfo(cp, &info, 0);
1830 sigaddq(pp, NULL, &info, KM_NOSLEEP);
1831 } else {
1832 winfo(cp, &sqp->sq_info, 0);
1833 sigaddqa(pp, NULL, sqp);
1834 sqp = NULL;
1835 }
1836 }
1837
1838 mutex_exit(&pp->p_lock);
1839
1840 if (sqp)
1841 siginfofree(sqp);
1842 }
1843
1844 /*
1845 * Search for a child that has a pending SIGCLD for us, the parent.
1846 * The queue of SIGCLD signals is implied by the list of children.
1847 * We post the SIGCLD signals one at a time so they don't get lost.
1848 * When one is dequeued, another is enqueued, until there are no more.
1849 */
1850 void
1851 sigcld_repost()
1852 {
1853 proc_t *pp = curproc;
1854 proc_t *cp;
1855 sigqueue_t *sqp;
1856
1857 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
1858 mutex_enter(&pidlock);
1859 if (PROC_IS_BRANDED(pp) && BROP(pp)->b_sigcld_repost != NULL) {
1860 /*
1861 * Allow the brand to inject synthetic SIGCLD signals.
1862 */
1863 if (BROP(pp)->b_sigcld_repost(pp, sqp) == 0) {
1864 mutex_exit(&pidlock);
1865 return;
1866 }
1867 }
1868 for (cp = pp->p_child; cp; cp = cp->p_sibling) {
1869 if (cp->p_pidflag & CLDPEND) {
1870 post_sigcld(cp, sqp);
1871 mutex_exit(&pidlock);
1872 return;
1873 }
1874 }
1875 mutex_exit(&pidlock);
1876 kmem_free(sqp, sizeof (sigqueue_t));
1877 }
1878
1879 /*
1880 * count number of sigqueue send by sigaddqa()
1881 */
1882 void
1883 sigqsend(int cmd, proc_t *p, kthread_t *t, sigqueue_t *sigqp)
1884 {
1885 sigqhdr_t *sqh;
1886
1887 sqh = (sigqhdr_t *)sigqp->sq_backptr;
2159 }
2160 }
2161 }
2162 *psqp = sigqp;
2163 sigqp->sq_next = NULL;
2164 }
2165
2166 /*
2167 * The function sigaddqa() is called with sigqueue already allocated.
2168 * If signal is ignored, discard but guarantee KILL and generation semantics.
2169 * It is called from sigqueue() and other places.
2170 */
2171 void
2172 sigaddqa(proc_t *p, kthread_t *t, sigqueue_t *sigqp)
2173 {
2174 int sig = sigqp->sq_info.si_signo;
2175
2176 ASSERT(MUTEX_HELD(&p->p_lock));
2177 ASSERT(sig >= 1 && sig < NSIG);
2178
2179 if (sig_discardable(p, t, sig))
2180 siginfofree(sigqp);
2181 else
2182 sigaddqins(p, t, sigqp);
2183
2184 sigtoproc(p, t, sig);
2185 }
2186
2187 /*
2188 * Allocate the sigqueue_t structure and call sigaddqins().
2189 */
2190 void
2191 sigaddq(proc_t *p, kthread_t *t, k_siginfo_t *infop, int km_flags)
2192 {
2193 sigqueue_t *sqp;
2194 int sig = infop->si_signo;
2195
2196 ASSERT(MUTEX_HELD(&p->p_lock));
2197 ASSERT(sig >= 1 && sig < NSIG);
2198
2199 /*
2200 * If the signal will be discarded by sigtoproc() or
2201 * if the process isn't requesting siginfo and it isn't
2202 * blocking the signal (it *could* change it's mind while
2203 * the signal is pending) then don't bother creating one.
2204 */
2205 if (!sig_discardable(p, t, sig) &&
2206 (sigismember(&p->p_siginfo, sig) ||
2207 (curproc->p_ct_process != p->p_ct_process) ||
2208 (sig == SIGCLD && SI_FROMKERNEL(infop))) &&
2209 ((sqp = kmem_alloc(sizeof (sigqueue_t), km_flags)) != NULL)) {
2210 bcopy(infop, &sqp->sq_info, sizeof (k_siginfo_t));
2211 sqp->sq_func = NULL;
2212 sqp->sq_next = NULL;
2213 sigaddqins(p, t, sqp);
2214 }
2215 sigtoproc(p, t, sig);
2216 }
2217
2218 /*
2219 * Handle stop-on-fault processing for the debugger. Returns 0
2220 * if the fault is cleared during the stop, nonzero if it isn't.
2221 */
2222 int
2223 stop_on_fault(uint_t fault, k_siginfo_t *sip)
2224 {
2225 proc_t *p = ttoproc(curthread);
|