1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  * Copyright 2015, Joyent, Inc.
  26  */
  27 
  28 /*      Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T     */
  29 /*        All Rights Reserved   */
  30 
  31 #include <sys/param.h>
  32 #include <sys/types.h>
  33 #include <sys/bitmap.h>
  34 #include <sys/sysmacros.h>
  35 #include <sys/systm.h>
  36 #include <sys/cred.h>
  37 #include <sys/user.h>
  38 #include <sys/errno.h>
  39 #include <sys/proc.h>
  40 #include <sys/poll_impl.h> /* only needed for kludge in sigwaiting_send() */
  41 #include <sys/signal.h>
  42 #include <sys/siginfo.h>
  43 #include <sys/fault.h>
  44 #include <sys/ucontext.h>
  45 #include <sys/procfs.h>
  46 #include <sys/wait.h>
  47 #include <sys/class.h>
  48 #include <sys/mman.h>
  49 #include <sys/procset.h>
  50 #include <sys/kmem.h>
  51 #include <sys/cpuvar.h>
  52 #include <sys/prsystm.h>
  53 #include <sys/debug.h>
  54 #include <vm/as.h>
  55 #include <sys/bitmap.h>
  56 #include <c2/audit.h>
  57 #include <sys/core.h>
  58 #include <sys/schedctl.h>
  59 #include <sys/contract/process_impl.h>
  60 #include <sys/cyclic.h>
  61 #include <sys/dtrace.h>
  62 #include <sys/sdt.h>
  63 #include <sys/signalfd.h>
  64 #include <sys/brand.h>
  65 
  66 const k_sigset_t nullsmask = {0, 0, 0};
  67 
  68 const k_sigset_t fillset =      /* MUST be contiguous */
  69         {FILLSET0, FILLSET1, FILLSET2};
  70 
  71 const k_sigset_t cantmask =
  72         {CANTMASK0, CANTMASK1, CANTMASK2};
  73 
  74 const k_sigset_t cantreset =
  75         {(sigmask(SIGILL)|sigmask(SIGTRAP)|sigmask(SIGPWR)), 0, 0};
  76 
  77 const k_sigset_t ignoredefault =
  78         {(sigmask(SIGCONT)|sigmask(SIGCLD)|sigmask(SIGPWR)
  79         |sigmask(SIGWINCH)|sigmask(SIGURG)|sigmask(SIGWAITING)),
  80         (sigmask(SIGLWP)|sigmask(SIGCANCEL)|sigmask(SIGFREEZE)
  81         |sigmask(SIGTHAW)|sigmask(SIGXRES)|sigmask(SIGJVM1)
  82         |sigmask(SIGJVM2)|sigmask(SIGINFO)), 0};
  83 
  84 const k_sigset_t stopdefault =
  85         {(sigmask(SIGSTOP)|sigmask(SIGTSTP)|sigmask(SIGTTOU)|sigmask(SIGTTIN)),
  86         0, 0};
  87 
  88 const k_sigset_t coredefault =
  89         {(sigmask(SIGQUIT)|sigmask(SIGILL)|sigmask(SIGTRAP)|sigmask(SIGIOT)
  90         |sigmask(SIGEMT)|sigmask(SIGFPE)|sigmask(SIGBUS)|sigmask(SIGSEGV)
  91         |sigmask(SIGSYS)|sigmask(SIGXCPU)|sigmask(SIGXFSZ)), 0, 0};
  92 
  93 const k_sigset_t holdvfork =
  94         {(sigmask(SIGTTOU)|sigmask(SIGTTIN)|sigmask(SIGTSTP)), 0, 0};
  95 
  96 static  int     isjobstop(int);
  97 static  void    post_sigcld(proc_t *, sigqueue_t *);
  98 
  99 
 100 /*
 101  * signalfd helper function which is set when the signalfd driver loads.
 102  */
 103 void (*sigfd_exit_helper)();
 104 
 105 /*
 106  * Internal variables for counting number of user thread stop requests posted.
 107  * They may not be accurate at some special situation such as that a virtually
 108  * stopped thread starts to run.
 109  */
 110 static int num_utstop;
 111 /*
 112  * Internal variables for broadcasting an event when all thread stop requests
 113  * are processed.
 114  */
 115 static kcondvar_t utstop_cv;
 116 
 117 static kmutex_t thread_stop_lock;
 118 void del_one_utstop(void);
 119 
 120 /*
 121  * Send the specified signal to the specified process.
 122  */
 123 void
 124 psignal(proc_t *p, int sig)
 125 {
 126         mutex_enter(&p->p_lock);
 127         sigtoproc(p, NULL, sig);
 128         mutex_exit(&p->p_lock);
 129 }
 130 
 131 /*
 132  * Send the specified signal to the specified thread.
 133  */
 134 void
 135 tsignal(kthread_t *t, int sig)
 136 {
 137         proc_t *p = ttoproc(t);
 138 
 139         mutex_enter(&p->p_lock);
 140         sigtoproc(p, t, sig);
 141         mutex_exit(&p->p_lock);
 142 }
 143 
 144 int
 145 signal_is_blocked(kthread_t *t, int sig)
 146 {
 147         return (sigismember(&t->t_hold, sig) ||
 148             (schedctl_sigblock(t) && !sigismember(&cantmask, sig)));
 149 }
 150 
 151 /*
 152  * Return true if the signal can safely be ignored.
 153  * That is, if the signal is included in the p_ignore mask and doing so is not
 154  * forbidden by any process branding.
 155  */
 156 static int
 157 sig_ignorable(proc_t *p, klwp_t *lwp, int sig)
 158 {
 159         return (sigismember(&p->p_ignore, sig) &&        /* sig in ignore mask */
 160             !(PROC_IS_BRANDED(p) &&                     /* allowed by brand */
 161             BROP(p)->b_sig_ignorable != NULL &&
 162             BROP(p)->b_sig_ignorable(p, lwp, sig) == B_FALSE));
 163 
 164 }
 165 
 166 /*
 167  * Return true if the signal can safely be discarded on generation.
 168  * That is, if there is no need for the signal on the receiving end.
 169  * The answer is true if the process is a zombie or
 170  * if all of these conditions are true:
 171  *      the signal is being ignored
 172  *      the process is single-threaded
 173  *      the signal is not being traced by /proc
 174  *      the signal is not blocked by the process
 175  *      the signal is not being accepted via sigwait()
 176  */
 177 static int
 178 sig_discardable(proc_t *p, kthread_t *tp, int sig)
 179 {
 180         kthread_t *t = p->p_tlist;
 181         klwp_t *lwp = (tp == NULL) ? NULL : tp->t_lwp;
 182 
 183         return (t == NULL ||            /* if zombie or ... */
 184             (sig_ignorable(p, lwp, sig) &&              /* signal is ignored */
 185             t->t_forw == t &&                        /* and single-threaded */
 186             !tracing(p, sig) &&                 /* and no /proc tracing */
 187             !signal_is_blocked(t, sig) &&       /* and signal not blocked */
 188             !sigismember(&t->t_sigwait, sig)));  /* and not being accepted */
 189 }
 190 
 191 /*
 192  * Return true if this thread is going to eat this signal soon.
 193  * Note that, if the signal is SIGKILL, we force stopped threads to be
 194  * set running (to make SIGKILL be a sure kill), but only if the process
 195  * is not currently locked by /proc (the P_PR_LOCK flag).  Code in /proc
 196  * relies on the fact that a process will not change shape while P_PR_LOCK
 197  * is set (it drops and reacquires p->p_lock while leaving P_PR_LOCK set).
 198  * We wish that we could simply call prbarrier() below, in sigtoproc(), to
 199  * ensure that the process is not locked by /proc, but prbarrier() drops
 200  * and reacquires p->p_lock and dropping p->p_lock here would be damaging.
 201  */
 202 int
 203 eat_signal(kthread_t *t, int sig)
 204 {
 205         int rval = 0;
 206         ASSERT(THREAD_LOCK_HELD(t));
 207 
 208         /*
 209          * Do not do anything if the target thread has the signal blocked.
 210          */
 211         if (!signal_is_blocked(t, sig)) {
 212                 t->t_sig_check = 1;  /* have thread do an issig */
 213                 if (ISWAKEABLE(t) || ISWAITING(t)) {
 214                         setrun_locked(t);
 215                         rval = 1;
 216                 } else if (t->t_state == TS_STOPPED && sig == SIGKILL &&
 217                     !(ttoproc(t)->p_proc_flag & P_PR_LOCK)) {
 218                         ttoproc(t)->p_stopsig = 0;
 219                         t->t_dtrace_stop = 0;
 220                         t->t_schedflag |= TS_XSTART | TS_PSTART | TS_BSTART;
 221                         setrun_locked(t);
 222                 } else if (t != curthread && t->t_state == TS_ONPROC) {
 223                         aston(t);       /* make it do issig promptly */
 224                         if (t->t_cpu != CPU)
 225                                 poke_cpu(t->t_cpu->cpu_id);
 226                         rval = 1;
 227                 } else if (t->t_state == TS_RUN) {
 228                         rval = 1;
 229                 }
 230         }
 231 
 232         return (rval);
 233 }
 234 
 235 /*
 236  * Post a signal.
 237  * If a non-null thread pointer is passed, then post the signal
 238  * to the thread/lwp, otherwise post the signal to the process.
 239  */
 240 void
 241 sigtoproc(proc_t *p, kthread_t *t, int sig)
 242 {
 243         kthread_t *tt;
 244         int ext = !(curproc->p_flag & SSYS) &&
 245             (curproc->p_ct_process != p->p_ct_process);
 246 
 247         ASSERT(MUTEX_HELD(&p->p_lock));
 248 
 249         /* System processes don't get signals */
 250         if (sig <= 0 || sig >= NSIG || (p->p_flag & SSYS))
 251                 return;
 252 
 253         /*
 254          * Regardless of origin or directedness,
 255          * SIGKILL kills all lwps in the process immediately
 256          * and jobcontrol signals affect all lwps in the process.
 257          */
 258         if (sig == SIGKILL) {
 259                 p->p_flag |= SKILLED | (ext ? SEXTKILLED : 0);
 260                 t = NULL;
 261         } else if (sig == SIGCONT) {
 262                 /*
 263                  * The SSCONT flag will remain set until a stopping
 264                  * signal comes in (below).  This is harmless.
 265                  */
 266                 p->p_flag |= SSCONT;
 267                 sigdelq(p, NULL, SIGSTOP);
 268                 sigdelq(p, NULL, SIGTSTP);
 269                 sigdelq(p, NULL, SIGTTOU);
 270                 sigdelq(p, NULL, SIGTTIN);
 271                 sigdiffset(&p->p_sig, &stopdefault);
 272                 sigdiffset(&p->p_extsig, &stopdefault);
 273                 p->p_stopsig = 0;
 274                 if ((tt = p->p_tlist) != NULL) {
 275                         do {
 276                                 sigdelq(p, tt, SIGSTOP);
 277                                 sigdelq(p, tt, SIGTSTP);
 278                                 sigdelq(p, tt, SIGTTOU);
 279                                 sigdelq(p, tt, SIGTTIN);
 280                                 sigdiffset(&tt->t_sig, &stopdefault);
 281                                 sigdiffset(&tt->t_extsig, &stopdefault);
 282                         } while ((tt = tt->t_forw) != p->p_tlist);
 283                 }
 284                 if ((tt = p->p_tlist) != NULL) {
 285                         do {
 286                                 thread_lock(tt);
 287                                 if (tt->t_state == TS_STOPPED &&
 288                                     tt->t_whystop == PR_JOBCONTROL) {
 289                                         tt->t_schedflag |= TS_XSTART;
 290                                         setrun_locked(tt);
 291                                 }
 292                                 thread_unlock(tt);
 293                         } while ((tt = tt->t_forw) != p->p_tlist);
 294                 }
 295         } else if (sigismember(&stopdefault, sig)) {
 296                 /*
 297                  * This test has a race condition which we can't fix:
 298                  * By the time the stopping signal is received by
 299                  * the target process/thread, the signal handler
 300                  * and/or the detached state might have changed.
 301                  */
 302                 if (PTOU(p)->u_signal[sig-1] == SIG_DFL &&
 303                     (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned))
 304                         p->p_flag &= ~SSCONT;
 305                 sigdelq(p, NULL, SIGCONT);
 306                 sigdelset(&p->p_sig, SIGCONT);
 307                 sigdelset(&p->p_extsig, SIGCONT);
 308                 if ((tt = p->p_tlist) != NULL) {
 309                         do {
 310                                 sigdelq(p, tt, SIGCONT);
 311                                 sigdelset(&tt->t_sig, SIGCONT);
 312                                 sigdelset(&tt->t_extsig, SIGCONT);
 313                         } while ((tt = tt->t_forw) != p->p_tlist);
 314                 }
 315         }
 316 
 317         if (sig_discardable(p, t, sig)) {
 318                 DTRACE_PROC3(signal__discard, kthread_t *, p->p_tlist,
 319                     proc_t *, p, int, sig);
 320                 return;
 321         }
 322 
 323         if (t != NULL) {
 324                 /*
 325                  * This is a directed signal, wake up the lwp.
 326                  */
 327                 sigaddset(&t->t_sig, sig);
 328                 if (ext)
 329                         sigaddset(&t->t_extsig, sig);
 330                 thread_lock(t);
 331                 (void) eat_signal(t, sig);
 332                 thread_unlock(t);
 333                 DTRACE_PROC2(signal__send, kthread_t *, t, int, sig);
 334                 if (p->p_sigfd != NULL && ((sigfd_proc_state_t *)
 335                     (p->p_sigfd))->sigfd_pollwake_cb != NULL)
 336                         (*((sigfd_proc_state_t *)(p->p_sigfd))->
 337                             sigfd_pollwake_cb)(p, sig);
 338 
 339         } else if ((tt = p->p_tlist) != NULL) {
 340                 /*
 341                  * Make sure that some lwp that already exists
 342                  * in the process fields the signal soon.
 343                  * Wake up an interruptibly sleeping lwp if necessary.
 344                  * For SIGKILL make all of the lwps see the signal;
 345                  * This is needed to guarantee a sure kill for processes
 346                  * with a mix of realtime and non-realtime threads.
 347                  */
 348                 int su = 0;
 349 
 350                 sigaddset(&p->p_sig, sig);
 351                 if (ext)
 352                         sigaddset(&p->p_extsig, sig);
 353                 do {
 354                         thread_lock(tt);
 355                         if (eat_signal(tt, sig) && sig != SIGKILL) {
 356                                 thread_unlock(tt);
 357                                 break;
 358                         }
 359                         if (SUSPENDED(tt))
 360                                 su++;
 361                         thread_unlock(tt);
 362                 } while ((tt = tt->t_forw) != p->p_tlist);
 363                 /*
 364                  * If the process is deadlocked, make somebody run and die.
 365                  */
 366                 if (sig == SIGKILL && p->p_stat != SIDL &&
 367                     p->p_lwprcnt == 0 && p->p_lwpcnt == su &&
 368                     !(p->p_proc_flag & P_PR_LOCK)) {
 369                         thread_lock(tt);
 370                         p->p_lwprcnt++;
 371                         tt->t_schedflag |= TS_CSTART;
 372                         setrun_locked(tt);
 373                         thread_unlock(tt);
 374                 }
 375 
 376                 DTRACE_PROC2(signal__send, kthread_t *, tt, int, sig);
 377                 if (p->p_sigfd != NULL && ((sigfd_proc_state_t *)
 378                     (p->p_sigfd))->sigfd_pollwake_cb != NULL)
 379                         (*((sigfd_proc_state_t *)(p->p_sigfd))->
 380                             sigfd_pollwake_cb)(p, sig);
 381         }
 382 }
 383 
 384 static int
 385 isjobstop(int sig)
 386 {
 387         proc_t *p = ttoproc(curthread);
 388 
 389         ASSERT(MUTEX_HELD(&p->p_lock));
 390 
 391         if (PTOU(curproc)->u_signal[sig-1] == SIG_DFL &&
 392             sigismember(&stopdefault, sig)) {
 393                 /*
 394                  * If SIGCONT has been posted since we promoted this signal
 395                  * from pending to current, then don't do a jobcontrol stop.
 396                  */
 397                 if (!(p->p_flag & SSCONT) &&
 398                     (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned) &&
 399                     curthread != p->p_agenttp) {
 400                         sigqueue_t *sqp;
 401 
 402                         stop(PR_JOBCONTROL, sig);
 403                         mutex_exit(&p->p_lock);
 404                         sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
 405                         mutex_enter(&pidlock);
 406                         /*
 407                          * Only the first lwp to continue notifies the parent.
 408                          */
 409                         if (p->p_pidflag & CLDCONT)
 410                                 siginfofree(sqp);
 411                         else {
 412                                 p->p_pidflag |= CLDCONT;
 413                                 p->p_wcode = CLD_CONTINUED;
 414                                 p->p_wdata = SIGCONT;
 415                                 sigcld(p, sqp);
 416                         }
 417                         mutex_exit(&pidlock);
 418                         mutex_enter(&p->p_lock);
 419                 }
 420                 return (1);
 421         }
 422         return (0);
 423 }
 424 
 425 /*
 426  * Returns true if the current process has a signal to process, and
 427  * the signal is not held.  The signal to process is put in p_cursig.
 428  * This is asked at least once each time a process enters the system
 429  * (though this can usually be done without actually calling issig by
 430  * checking the pending signal masks).  A signal does not do anything
 431  * directly to a process; it sets a flag that asks the process to do
 432  * something to itself.
 433  *
 434  * The "why" argument indicates the allowable side-effects of the call:
 435  *
 436  * FORREAL:  Extract the next pending signal from p_sig into p_cursig;
 437  * stop the process if a stop has been requested or if a traced signal
 438  * is pending.
 439  *
 440  * JUSTLOOKING:  Don't stop the process, just indicate whether or not
 441  * a signal might be pending (FORREAL is needed to tell for sure).
 442  *
 443  * XXX: Changes to the logic in these routines should be propagated
 444  * to lm_sigispending().  See bug 1201594.
 445  */
 446 
 447 static int issig_forreal(void);
 448 static int issig_justlooking(void);
 449 
 450 int
 451 issig(int why)
 452 {
 453         ASSERT(why == FORREAL || why == JUSTLOOKING);
 454 
 455         return ((why == FORREAL)? issig_forreal() : issig_justlooking());
 456 }
 457 
 458 
 459 static int
 460 issig_justlooking(void)
 461 {
 462         kthread_t *t = curthread;
 463         klwp_t *lwp = ttolwp(t);
 464         proc_t *p = ttoproc(t);
 465         k_sigset_t set;
 466 
 467         /*
 468          * This function answers the question:
 469          * "Is there any reason to call issig_forreal()?"
 470          *
 471          * We have to answer the question w/o grabbing any locks
 472          * because we are (most likely) being called after we
 473          * put ourselves on the sleep queue.
 474          */
 475 
 476         if (t->t_dtrace_stop | t->t_dtrace_sig)
 477                 return (1);
 478 
 479         /*
 480          * Another piece of complexity in this process.  When single-stepping a
 481          * process, we don't want an intervening signal or TP_PAUSE request to
 482          * suspend the current thread.  Otherwise, the controlling process will
 483          * hang beacuse we will be stopped with TS_PSTART set in t_schedflag.
 484          * We will trigger any remaining signals when we re-enter the kernel on
 485          * the single step trap.
 486          */
 487         if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP)
 488                 return (0);
 489 
 490         if ((lwp->lwp_asleep && MUSTRETURN(p, t)) ||
 491             (p->p_flag & (SEXITLWPS|SKILLED)) ||
 492             (lwp->lwp_nostop == 0 &&
 493             (p->p_stopsig | (p->p_flag & (SHOLDFORK1|SHOLDWATCH)) |
 494             (t->t_proc_flag &
 495             (TP_PRSTOP|TP_HOLDLWP|TP_CHKPT|TP_PAUSE)))) ||
 496             lwp->lwp_cursig)
 497                 return (1);
 498 
 499         if (p->p_flag & SVFWAIT)
 500                 return (0);
 501         set = p->p_sig;
 502         sigorset(&set, &t->t_sig);
 503         if (schedctl_sigblock(t))       /* all blockable signals blocked */
 504                 sigandset(&set, &cantmask);
 505         else
 506                 sigdiffset(&set, &t->t_hold);
 507         if (p->p_flag & SVFORK)
 508                 sigdiffset(&set, &holdvfork);
 509 
 510         if (!sigisempty(&set)) {
 511                 int sig;
 512 
 513                 for (sig = 1; sig < NSIG; sig++) {
 514                         if (sigismember(&set, sig) &&
 515                             (tracing(p, sig) ||
 516                             sigismember(&t->t_sigwait, sig) ||
 517                             !sig_ignorable(p, lwp, sig))) {
 518                                 /*
 519                                  * Don't promote a signal that will stop
 520                                  * the process when lwp_nostop is set.
 521                                  */
 522                                 if (!lwp->lwp_nostop ||
 523                                     PTOU(p)->u_signal[sig-1] != SIG_DFL ||
 524                                     !sigismember(&stopdefault, sig))
 525                                         return (1);
 526                         }
 527                 }
 528         }
 529 
 530         return (0);
 531 }
 532 
 533 static int
 534 issig_forreal(void)
 535 {
 536         int sig = 0, ext = 0;
 537         kthread_t *t = curthread;
 538         klwp_t *lwp = ttolwp(t);
 539         proc_t *p = ttoproc(t);
 540         int toproc = 0;
 541         int sigcld_found = 0;
 542         int nostop_break = 0;
 543 
 544         ASSERT(t->t_state == TS_ONPROC);
 545 
 546         mutex_enter(&p->p_lock);
 547         schedctl_finish_sigblock(t);
 548 
 549         if (t->t_dtrace_stop | t->t_dtrace_sig) {
 550                 if (t->t_dtrace_stop) {
 551                         /*
 552                          * If DTrace's "stop" action has been invoked on us,
 553                          * set TP_PRSTOP.
 554                          */
 555                         t->t_proc_flag |= TP_PRSTOP;
 556                 }
 557 
 558                 if (t->t_dtrace_sig != 0) {
 559                         k_siginfo_t info;
 560 
 561                         /*
 562                          * Post the signal generated as the result of
 563                          * DTrace's "raise" action as a normal signal before
 564                          * the full-fledged signal checking begins.
 565                          */
 566                         bzero(&info, sizeof (info));
 567                         info.si_signo = t->t_dtrace_sig;
 568                         info.si_code = SI_DTRACE;
 569 
 570                         sigaddq(p, NULL, &info, KM_NOSLEEP);
 571 
 572                         t->t_dtrace_sig = 0;
 573                 }
 574         }
 575 
 576         for (;;) {
 577                 if (p->p_flag & (SEXITLWPS|SKILLED)) {
 578                         lwp->lwp_cursig = sig = SIGKILL;
 579                         lwp->lwp_extsig = ext = (p->p_flag & SEXTKILLED) != 0;
 580                         t->t_sig_check = 1;
 581                         break;
 582                 }
 583 
 584                 /*
 585                  * Another piece of complexity in this process.  When
 586                  * single-stepping a process, we don't want an intervening
 587                  * signal or TP_PAUSE request to suspend the current thread.
 588                  * Otherwise, the controlling process will hang beacuse we will
 589                  * be stopped with TS_PSTART set in t_schedflag.  We will
 590                  * trigger any remaining signals when we re-enter the kernel on
 591                  * the single step trap.
 592                  */
 593                 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP) {
 594                         sig = 0;
 595                         break;
 596                 }
 597 
 598                 /*
 599                  * Hold the lwp here for watchpoint manipulation.
 600                  */
 601                 if ((t->t_proc_flag & TP_PAUSE) && !lwp->lwp_nostop) {
 602                         stop(PR_SUSPENDED, SUSPEND_PAUSE);
 603                         continue;
 604                 }
 605 
 606                 if (lwp->lwp_asleep && MUSTRETURN(p, t)) {
 607                         if ((sig = lwp->lwp_cursig) != 0) {
 608                                 /*
 609                                  * Make sure we call ISSIG() in post_syscall()
 610                                  * to re-validate this current signal.
 611                                  */
 612                                 t->t_sig_check = 1;
 613                         }
 614                         break;
 615                 }
 616 
 617                 /*
 618                  * If the request is PR_CHECKPOINT, ignore the rest of signals
 619                  * or requests.  Honor other stop requests or signals later.
 620                  * Go back to top of loop here to check if an exit or hold
 621                  * event has occurred while stopped.
 622                  */
 623                 if ((t->t_proc_flag & TP_CHKPT) && !lwp->lwp_nostop) {
 624                         stop(PR_CHECKPOINT, 0);
 625                         continue;
 626                 }
 627 
 628                 /*
 629                  * Honor SHOLDFORK1, SHOLDWATCH, and TP_HOLDLWP before dealing
 630                  * with signals or /proc.  Another lwp is executing fork1(),
 631                  * or is undergoing watchpoint activity (remapping a page),
 632                  * or is executing lwp_suspend() on this lwp.
 633                  * Again, go back to top of loop to check if an exit
 634                  * or hold event has occurred while stopped.
 635                  */
 636                 if (((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) ||
 637                     (t->t_proc_flag & TP_HOLDLWP)) && !lwp->lwp_nostop) {
 638                         stop(PR_SUSPENDED, SUSPEND_NORMAL);
 639                         continue;
 640                 }
 641 
 642                 /*
 643                  * Allow the brand the chance to alter (or suppress) delivery
 644                  * of this signal.
 645                  */
 646                 if (PROC_IS_BRANDED(p) && BROP(p)->b_issig_stop != NULL) {
 647                         /*
 648                          * The brand hook will return 0 if it would like
 649                          * us to drive on, or -1 if we should restart
 650                          * the loop to check other conditions.
 651                          */
 652                         if (BROP(p)->b_issig_stop(p, lwp) != 0) {
 653                                 continue;
 654                         }
 655                 }
 656 
 657                 /*
 658                  * Honor requested stop before dealing with the
 659                  * current signal; a debugger may change it.
 660                  * Do not want to go back to loop here since this is a special
 661                  * stop that means: make incremental progress before the next
 662                  * stop. The danger is that returning to top of loop would most
 663                  * likely drop the thread right back here to stop soon after it
 664                  * was continued, violating the incremental progress request.
 665                  */
 666                 if ((t->t_proc_flag & TP_PRSTOP) && !lwp->lwp_nostop)
 667                         stop(PR_REQUESTED, 0);
 668 
 669                 /*
 670                  * If a debugger wants us to take a signal it will have
 671                  * left it in lwp->lwp_cursig.  If lwp_cursig has been cleared
 672                  * or if it's being ignored, we continue on looking for another
 673                  * signal.  Otherwise we return the specified signal, provided
 674                  * it's not a signal that causes a job control stop.
 675                  *
 676                  * When stopped on PR_JOBCONTROL, there is no current
 677                  * signal; we cancel lwp->lwp_cursig temporarily before
 678                  * calling isjobstop().  The current signal may be reset
 679                  * by a debugger while we are stopped in isjobstop().
 680                  *
 681                  * If the current thread is accepting the signal
 682                  * (via sigwait(), sigwaitinfo(), or sigtimedwait()),
 683                  * we allow the signal to be accepted, even if it is
 684                  * being ignored, and without causing a job control stop.
 685                  */
 686                 if ((sig = lwp->lwp_cursig) != 0) {
 687                         ext = lwp->lwp_extsig;
 688                         lwp->lwp_cursig = 0;
 689                         lwp->lwp_extsig = 0;
 690                         if (sigismember(&t->t_sigwait, sig) ||
 691                             (!sig_ignorable(p, lwp, sig) &&
 692                             !isjobstop(sig))) {
 693                                 if (p->p_flag & (SEXITLWPS|SKILLED)) {
 694                                         sig = SIGKILL;
 695                                         ext = (p->p_flag & SEXTKILLED) != 0;
 696                                 }
 697                                 lwp->lwp_cursig = (uchar_t)sig;
 698                                 lwp->lwp_extsig = (uchar_t)ext;
 699                                 break;
 700                         }
 701                         /*
 702                          * The signal is being ignored or it caused a
 703                          * job-control stop.  If another current signal
 704                          * has not been established, return the current
 705                          * siginfo, if any, to the memory manager.
 706                          */
 707                         if (lwp->lwp_cursig == 0 && lwp->lwp_curinfo != NULL) {
 708                                 siginfofree(lwp->lwp_curinfo);
 709                                 lwp->lwp_curinfo = NULL;
 710                         }
 711                         /*
 712                          * Loop around again in case we were stopped
 713                          * on a job control signal and a /proc stop
 714                          * request was posted or another current signal
 715                          * was established while we were stopped.
 716                          */
 717                         continue;
 718                 }
 719 
 720                 if (p->p_stopsig && !lwp->lwp_nostop &&
 721                     curthread != p->p_agenttp) {
 722                         /*
 723                          * Some lwp in the process has already stopped
 724                          * showing PR_JOBCONTROL.  This is a stop in
 725                          * sympathy with the other lwp, even if this
 726                          * lwp is blocking the stopping signal.
 727                          */
 728                         stop(PR_JOBCONTROL, p->p_stopsig);
 729                         continue;
 730                 }
 731 
 732                 /*
 733                  * Loop on the pending signals until we find a
 734                  * non-held signal that is traced or not ignored.
 735                  * First check the signals pending for the lwp,
 736                  * then the signals pending for the process as a whole.
 737                  */
 738                 for (;;) {
 739                         if ((sig = fsig(&t->t_sig, t)) != 0) {
 740                                 toproc = 0;
 741                                 if (tracing(p, sig) ||
 742                                     sigismember(&t->t_sigwait, sig) ||
 743                                     !sig_ignorable(p, lwp, sig)) {
 744                                         if (sigismember(&t->t_extsig, sig))
 745                                                 ext = 1;
 746                                         break;
 747                                 }
 748                                 sigdelset(&t->t_sig, sig);
 749                                 sigdelset(&t->t_extsig, sig);
 750                                 sigdelq(p, t, sig);
 751                         } else if ((sig = fsig(&p->p_sig, t)) != 0) {
 752                                 if (sig == SIGCLD)
 753                                         sigcld_found = 1;
 754                                 toproc = 1;
 755                                 if (tracing(p, sig) ||
 756                                     sigismember(&t->t_sigwait, sig) ||
 757                                     !sig_ignorable(p, lwp, sig)) {
 758                                         if (sigismember(&p->p_extsig, sig))
 759                                                 ext = 1;
 760                                         break;
 761                                 }
 762                                 sigdelset(&p->p_sig, sig);
 763                                 sigdelset(&p->p_extsig, sig);
 764                                 sigdelq(p, NULL, sig);
 765                         } else {
 766                                 /* no signal was found */
 767                                 break;
 768                         }
 769                 }
 770 
 771                 if (sig == 0) { /* no signal was found */
 772                         if (p->p_flag & (SEXITLWPS|SKILLED)) {
 773                                 lwp->lwp_cursig = SIGKILL;
 774                                 sig = SIGKILL;
 775                                 ext = (p->p_flag & SEXTKILLED) != 0;
 776                         }
 777                         break;
 778                 }
 779 
 780                 /*
 781                  * If we have been informed not to stop (i.e., we are being
 782                  * called from within a network operation), then don't promote
 783                  * the signal at this time, just return the signal number.
 784                  * We will call issig() again later when it is safe.
 785                  *
 786                  * fsig() does not return a jobcontrol stopping signal
 787                  * with a default action of stopping the process if
 788                  * lwp_nostop is set, so we won't be causing a bogus
 789                  * EINTR by this action.  (Such a signal is eaten by
 790                  * isjobstop() when we loop around to do final checks.)
 791                  */
 792                 if (lwp->lwp_nostop) {
 793                         nostop_break = 1;
 794                         break;
 795                 }
 796 
 797                 /*
 798                  * Promote the signal from pending to current.
 799                  *
 800                  * Note that sigdeq() will set lwp->lwp_curinfo to NULL
 801                  * if no siginfo_t exists for this signal.
 802                  */
 803                 lwp->lwp_cursig = (uchar_t)sig;
 804                 lwp->lwp_extsig = (uchar_t)ext;
 805                 t->t_sig_check = 1;  /* so post_syscall will see signal */
 806                 ASSERT(lwp->lwp_curinfo == NULL);
 807                 sigdeq(p, toproc ? NULL : t, sig, &lwp->lwp_curinfo);
 808 
 809                 if (tracing(p, sig))
 810                         stop(PR_SIGNALLED, sig);
 811 
 812                 /*
 813                  * Loop around to check for requested stop before
 814                  * performing the usual current-signal actions.
 815                  */
 816         }
 817 
 818         mutex_exit(&p->p_lock);
 819 
 820         /*
 821          * If SIGCLD was dequeued from the process's signal queue,
 822          * search for other pending SIGCLD's from the list of children.
 823          */
 824         if (sigcld_found)
 825                 sigcld_repost();
 826 
 827         if (sig != 0)
 828                 (void) undo_watch_step(NULL);
 829 
 830         /*
 831          * If we have been blocked since the p_lock was dropped off
 832          * above, then this promoted signal might have been handled
 833          * already when we were on the way back from sleep queue, so
 834          * just ignore it.
 835          * If we have been informed not to stop, just return the signal
 836          * number. Also see comments above.
 837          */
 838         if (!nostop_break) {
 839                 sig = lwp->lwp_cursig;
 840         }
 841 
 842         return (sig != 0);
 843 }
 844 
 845 /*
 846  * Return true if the process is currently stopped showing PR_JOBCONTROL.
 847  * This is true only if all of the process's lwp's are so stopped.
 848  * If this is asked by one of the lwps in the process, exclude that lwp.
 849  */
 850 int
 851 jobstopped(proc_t *p)
 852 {
 853         kthread_t *t;
 854 
 855         ASSERT(MUTEX_HELD(&p->p_lock));
 856 
 857         if ((t = p->p_tlist) == NULL)
 858                 return (0);
 859 
 860         do {
 861                 thread_lock(t);
 862                 /* ignore current, zombie and suspended lwps in the test */
 863                 if (!(t == curthread || t->t_state == TS_ZOMB ||
 864                     SUSPENDED(t)) &&
 865                     (t->t_state != TS_STOPPED ||
 866                     t->t_whystop != PR_JOBCONTROL)) {
 867                         thread_unlock(t);
 868                         return (0);
 869                 }
 870                 thread_unlock(t);
 871         } while ((t = t->t_forw) != p->p_tlist);
 872 
 873         return (1);
 874 }
 875 
 876 /*
 877  * Put ourself (curthread) into the stopped state and notify tracers.
 878  */
 879 void
 880 stop(int why, int what)
 881 {
 882         kthread_t       *t = curthread;
 883         proc_t          *p = ttoproc(t);
 884         klwp_t          *lwp = ttolwp(t);
 885         kthread_t       *tx;
 886         lwpent_t        *lep;
 887         int             procstop;
 888         int             flags = TS_ALLSTART;
 889         hrtime_t        stoptime;
 890 
 891         /*
 892          * Can't stop a system process.
 893          */
 894         if (p == NULL || lwp == NULL || (p->p_flag & SSYS) || p->p_as == &kas)
 895                 return;
 896 
 897         ASSERT(MUTEX_HELD(&p->p_lock));
 898 
 899         if (why != PR_SUSPENDED && why != PR_CHECKPOINT) {
 900                 /*
 901                  * Don't stop an lwp with SIGKILL pending.
 902                  * Don't stop if the process or lwp is exiting.
 903                  */
 904                 if (lwp->lwp_cursig == SIGKILL ||
 905                     sigismember(&t->t_sig, SIGKILL) ||
 906                     sigismember(&p->p_sig, SIGKILL) ||
 907                     (t->t_proc_flag & TP_LWPEXIT) ||
 908                     (p->p_flag & (SEXITLWPS|SKILLED))) {
 909                         p->p_stopsig = 0;
 910                         t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP);
 911                         return;
 912                 }
 913         }
 914 
 915         /*
 916          * Make sure we don't deadlock on a recursive call to prstop().
 917          * prstop() sets the lwp_nostop flag.
 918          */
 919         if (lwp->lwp_nostop)
 920                 return;
 921 
 922         /*
 923          * Make sure the lwp is in an orderly state for inspection
 924          * by a debugger through /proc or for dumping via core().
 925          */
 926         schedctl_finish_sigblock(t);
 927         t->t_proc_flag |= TP_STOPPING;       /* must set before dropping p_lock */
 928         mutex_exit(&p->p_lock);
 929         stoptime = gethrtime();
 930         prstop(why, what);
 931         (void) undo_watch_step(NULL);
 932         mutex_enter(&p->p_lock);
 933         ASSERT(t->t_state == TS_ONPROC);
 934 
 935         switch (why) {
 936         case PR_CHECKPOINT:
 937                 /*
 938                  * The situation may have changed since we dropped
 939                  * and reacquired p->p_lock. Double-check now
 940                  * whether we should stop or not.
 941                  */
 942                 if (!(t->t_proc_flag & TP_CHKPT)) {
 943                         t->t_proc_flag &= ~TP_STOPPING;
 944                         return;
 945                 }
 946                 t->t_proc_flag &= ~TP_CHKPT;
 947                 flags &= ~TS_RESUME;
 948                 break;
 949 
 950         case PR_JOBCONTROL:
 951                 ASSERT(what == SIGSTOP || what == SIGTSTP ||
 952                     what == SIGTTIN || what == SIGTTOU);
 953                 flags &= ~TS_XSTART;
 954                 break;
 955 
 956         case PR_SUSPENDED:
 957                 ASSERT(what == SUSPEND_NORMAL || what == SUSPEND_PAUSE);
 958                 /*
 959                  * The situation may have changed since we dropped
 960                  * and reacquired p->p_lock.  Double-check now
 961                  * whether we should stop or not.
 962                  */
 963                 if (what == SUSPEND_PAUSE) {
 964                         if (!(t->t_proc_flag & TP_PAUSE)) {
 965                                 t->t_proc_flag &= ~TP_STOPPING;
 966                                 return;
 967                         }
 968                         flags &= ~TS_UNPAUSE;
 969                 } else {
 970                         if (!((t->t_proc_flag & TP_HOLDLWP) ||
 971                             (p->p_flag & (SHOLDFORK|SHOLDFORK1|SHOLDWATCH)))) {
 972                                 t->t_proc_flag &= ~TP_STOPPING;
 973                                 return;
 974                         }
 975                         /*
 976                          * If SHOLDFORK is in effect and we are stopping
 977                          * while asleep (not at the top of the stack),
 978                          * we return now to allow the hold to take effect
 979                          * when we reach the top of the kernel stack.
 980                          */
 981                         if (lwp->lwp_asleep && (p->p_flag & SHOLDFORK)) {
 982                                 t->t_proc_flag &= ~TP_STOPPING;
 983                                 return;
 984                         }
 985                         flags &= ~TS_CSTART;
 986                 }
 987                 break;
 988 
 989         case PR_BRAND:
 990                 /*
 991                  * We have been stopped by the brand code for a brand-private
 992                  * reason.  This is an asynchronous stop affecting only this
 993                  * LWP.
 994                  */
 995                 VERIFY(PROC_IS_BRANDED(p));
 996                 flags &= ~TS_BSTART;
 997                 break;
 998 
 999         default:        /* /proc stop */
1000                 flags &= ~TS_PSTART;
1001                 /*
1002                  * Do synchronous stop unless the async-stop flag is set.
1003                  * If why is PR_REQUESTED and t->t_dtrace_stop flag is set,
1004                  * then no debugger is present and we also do synchronous stop.
1005                  */
1006                 if ((why != PR_REQUESTED || t->t_dtrace_stop) &&
1007                     !(p->p_proc_flag & P_PR_ASYNC)) {
1008                         int notify;
1009 
1010                         for (tx = t->t_forw; tx != t; tx = tx->t_forw) {
1011                                 notify = 0;
1012                                 thread_lock(tx);
1013                                 if (ISTOPPED(tx) ||
1014                                     (tx->t_proc_flag & TP_PRSTOP)) {
1015                                         thread_unlock(tx);
1016                                         continue;
1017                                 }
1018                                 tx->t_proc_flag |= TP_PRSTOP;
1019                                 tx->t_sig_check = 1;
1020                                 if (tx->t_state == TS_SLEEP &&
1021                                     (tx->t_flag & T_WAKEABLE)) {
1022                                         /*
1023                                          * Don't actually wake it up if it's
1024                                          * in one of the lwp_*() syscalls.
1025                                          * Mark it virtually stopped and
1026                                          * notify /proc waiters (below).
1027                                          */
1028                                         if (tx->t_wchan0 == NULL)
1029                                                 setrun_locked(tx);
1030                                         else {
1031                                                 tx->t_proc_flag |= TP_PRVSTOP;
1032                                                 tx->t_stoptime = stoptime;
1033                                                 notify = 1;
1034                                         }
1035                                 }
1036 
1037                                 /* Move waiting thread to run queue */
1038                                 if (ISWAITING(tx))
1039                                         setrun_locked(tx);
1040 
1041                                 /*
1042                                  * force the thread into the kernel
1043                                  * if it is not already there.
1044                                  */
1045                                 if (tx->t_state == TS_ONPROC &&
1046                                     tx->t_cpu != CPU)
1047                                         poke_cpu(tx->t_cpu->cpu_id);
1048                                 thread_unlock(tx);
1049                                 lep = p->p_lwpdir[tx->t_dslot].ld_entry;
1050                                 if (notify && lep->le_trace)
1051                                         prnotify(lep->le_trace);
1052                         }
1053                         /*
1054                          * We do this just in case one of the threads we asked
1055                          * to stop is in holdlwps() (called from cfork()) or
1056                          * lwp_suspend().
1057                          */
1058                         cv_broadcast(&p->p_holdlwps);
1059                 }
1060                 break;
1061         }
1062 
1063         t->t_stoptime = stoptime;
1064 
1065         if (why == PR_JOBCONTROL || (why == PR_SUSPENDED && p->p_stopsig)) {
1066                 /*
1067                  * Determine if the whole process is jobstopped.
1068                  */
1069                 if (jobstopped(p)) {
1070                         sigqueue_t *sqp;
1071                         int sig;
1072 
1073                         if ((sig = p->p_stopsig) == 0)
1074                                 p->p_stopsig = (uchar_t)(sig = what);
1075                         mutex_exit(&p->p_lock);
1076                         sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
1077                         mutex_enter(&pidlock);
1078                         /*
1079                          * The last lwp to stop notifies the parent.
1080                          * Turn off the CLDCONT flag now so the first
1081                          * lwp to continue knows what to do.
1082                          */
1083                         p->p_pidflag &= ~CLDCONT;
1084                         p->p_wcode = CLD_STOPPED;
1085                         p->p_wdata = sig;
1086                         sigcld(p, sqp);
1087                         /*
1088                          * Grab p->p_lock before releasing pidlock so the
1089                          * parent and the child don't have a race condition.
1090                          */
1091                         mutex_enter(&p->p_lock);
1092                         mutex_exit(&pidlock);
1093                         p->p_stopsig = 0;
1094                 } else if (why == PR_JOBCONTROL && p->p_stopsig == 0) {
1095                         /*
1096                          * Set p->p_stopsig and wake up sleeping lwps
1097                          * so they will stop in sympathy with this lwp.
1098                          */
1099                         p->p_stopsig = (uchar_t)what;
1100                         pokelwps(p);
1101                         /*
1102                          * We do this just in case one of the threads we asked
1103                          * to stop is in holdlwps() (called from cfork()) or
1104                          * lwp_suspend().
1105                          */
1106                         cv_broadcast(&p->p_holdlwps);
1107                 }
1108         }
1109 
1110         if (why != PR_JOBCONTROL && why != PR_CHECKPOINT && why != PR_BRAND) {
1111                 /*
1112                  * Do process-level notification when all lwps are
1113                  * either stopped on events of interest to /proc
1114                  * or are stopped showing PR_SUSPENDED or are zombies.
1115                  */
1116                 procstop = 1;
1117                 for (tx = t->t_forw; procstop && tx != t; tx = tx->t_forw) {
1118                         if (VSTOPPED(tx))
1119                                 continue;
1120                         thread_lock(tx);
1121                         switch (tx->t_state) {
1122                         case TS_ZOMB:
1123                                 break;
1124                         case TS_STOPPED:
1125                                 /* neither ISTOPPED nor SUSPENDED? */
1126                                 if ((tx->t_schedflag &
1127                                     (TS_CSTART | TS_UNPAUSE | TS_PSTART)) ==
1128                                     (TS_CSTART | TS_UNPAUSE | TS_PSTART))
1129                                         procstop = 0;
1130                                 break;
1131                         case TS_SLEEP:
1132                                 /* not paused for watchpoints? */
1133                                 if (!(tx->t_flag & T_WAKEABLE) ||
1134                                     tx->t_wchan0 == NULL ||
1135                                     !(tx->t_proc_flag & TP_PAUSE))
1136                                         procstop = 0;
1137                                 break;
1138                         default:
1139                                 procstop = 0;
1140                                 break;
1141                         }
1142                         thread_unlock(tx);
1143                 }
1144                 if (procstop) {
1145                         /* there must not be any remapped watched pages now */
1146                         ASSERT(p->p_mapcnt == 0);
1147                         if (p->p_proc_flag & P_PR_PTRACE) {
1148                                 /* ptrace() compatibility */
1149                                 mutex_exit(&p->p_lock);
1150                                 mutex_enter(&pidlock);
1151                                 p->p_wcode = CLD_TRAPPED;
1152                                 p->p_wdata = (why == PR_SIGNALLED)?
1153                                     what : SIGTRAP;
1154                                 cv_broadcast(&p->p_parent->p_cv);
1155                                 /*
1156                                  * Grab p->p_lock before releasing pidlock so
1157                                  * parent and child don't have a race condition.
1158                                  */
1159                                 mutex_enter(&p->p_lock);
1160                                 mutex_exit(&pidlock);
1161                         }
1162                         if (p->p_trace)                      /* /proc */
1163                                 prnotify(p->p_trace);
1164                         cv_broadcast(&pr_pid_cv[p->p_slot]); /* pauselwps() */
1165                         cv_broadcast(&p->p_holdlwps);    /* holdwatch() */
1166                 }
1167                 if (why != PR_SUSPENDED) {
1168                         lep = p->p_lwpdir[t->t_dslot].ld_entry;
1169                         if (lep->le_trace)           /* /proc */
1170                                 prnotify(lep->le_trace);
1171                         /*
1172                          * Special notification for creation of the agent lwp.
1173                          */
1174                         if (t == p->p_agenttp &&
1175                             (t->t_proc_flag & TP_PRSTOP) &&
1176                             p->p_trace)
1177                                 prnotify(p->p_trace);
1178                         /*
1179                          * The situation may have changed since we dropped
1180                          * and reacquired p->p_lock. Double-check now
1181                          * whether we should stop or not.
1182                          */
1183                         if (!(t->t_proc_flag & TP_STOPPING)) {
1184                                 if (t->t_proc_flag & TP_PRSTOP)
1185                                         t->t_proc_flag |= TP_STOPPING;
1186                         }
1187                         t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP);
1188                         prnostep(lwp);
1189                 }
1190         }
1191 
1192         if (why == PR_SUSPENDED) {
1193 
1194                 /*
1195                  * We always broadcast in the case of SUSPEND_PAUSE.  This is
1196                  * because checks for TP_PAUSE take precedence over checks for
1197                  * SHOLDWATCH.  If a thread is trying to stop because of
1198                  * SUSPEND_PAUSE and tries to do a holdwatch(), it will be
1199                  * waiting for the rest of the threads to enter a stopped state.
1200                  * If we are stopping for a SUSPEND_PAUSE, we may be the last
1201                  * lwp and not know it, so broadcast just in case.
1202                  */
1203                 if (what == SUSPEND_PAUSE ||
1204                     --p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP))
1205                         cv_broadcast(&p->p_holdlwps);
1206 
1207         }
1208 
1209         /*
1210          * Need to do this here (rather than after the thread is officially
1211          * stopped) because we can't call mutex_enter from a stopped thread.
1212          */
1213         if (why == PR_CHECKPOINT)
1214                 del_one_utstop();
1215 
1216         /*
1217          * Allow the brand to post notification of this stop condition.
1218          */
1219         if (PROC_IS_BRANDED(p) && BROP(p)->b_stop_notify != NULL) {
1220                 BROP(p)->b_stop_notify(p, lwp, why, what);
1221         }
1222 
1223         thread_lock(t);
1224         ASSERT((t->t_schedflag & TS_ALLSTART) == 0);
1225         t->t_schedflag |= flags;
1226         t->t_whystop = (short)why;
1227         t->t_whatstop = (short)what;
1228         CL_STOP(t, why, what);
1229         (void) new_mstate(t, LMS_STOPPED);
1230         thread_stop(t);                 /* set stop state and drop lock */
1231 
1232         if (why != PR_SUSPENDED && why != PR_CHECKPOINT) {
1233                 /*
1234                  * We may have gotten a SIGKILL or a SIGCONT when
1235                  * we released p->p_lock; make one last check.
1236                  * Also check for a /proc run-on-last-close.
1237                  */
1238                 if (sigismember(&t->t_sig, SIGKILL) ||
1239                     sigismember(&p->p_sig, SIGKILL) ||
1240                     (t->t_proc_flag & TP_LWPEXIT) ||
1241                     (p->p_flag & (SEXITLWPS|SKILLED))) {
1242                         p->p_stopsig = 0;
1243                         thread_lock(t);
1244                         t->t_schedflag |= TS_XSTART | TS_PSTART | TS_BSTART;
1245                         setrun_locked(t);
1246                         thread_unlock_nopreempt(t);
1247                 } else if (why == PR_JOBCONTROL) {
1248                         if (p->p_flag & SSCONT) {
1249                                 /*
1250                                  * This resulted from a SIGCONT posted
1251                                  * while we were not holding p->p_lock.
1252                                  */
1253                                 p->p_stopsig = 0;
1254                                 thread_lock(t);
1255                                 t->t_schedflag |= TS_XSTART;
1256                                 setrun_locked(t);
1257                                 thread_unlock_nopreempt(t);
1258                         }
1259                 } else if (!(t->t_proc_flag & TP_STOPPING)) {
1260                         /*
1261                          * This resulted from a /proc run-on-last-close.
1262                          */
1263                         thread_lock(t);
1264                         t->t_schedflag |= TS_PSTART;
1265                         setrun_locked(t);
1266                         thread_unlock_nopreempt(t);
1267                 }
1268         }
1269 
1270         t->t_proc_flag &= ~TP_STOPPING;
1271         mutex_exit(&p->p_lock);
1272 
1273         swtch();
1274         setallwatch();  /* reestablish any watchpoints set while stopped */
1275         mutex_enter(&p->p_lock);
1276         prbarrier(p);   /* barrier against /proc locking */
1277 }
1278 
1279 /* Interface for resetting user thread stop count. */
1280 void
1281 utstop_init(void)
1282 {
1283         mutex_enter(&thread_stop_lock);
1284         num_utstop = 0;
1285         mutex_exit(&thread_stop_lock);
1286 }
1287 
1288 /* Interface for registering a user thread stop request. */
1289 void
1290 add_one_utstop(void)
1291 {
1292         mutex_enter(&thread_stop_lock);
1293         num_utstop++;
1294         mutex_exit(&thread_stop_lock);
1295 }
1296 
1297 /* Interface for cancelling a user thread stop request */
1298 void
1299 del_one_utstop(void)
1300 {
1301         mutex_enter(&thread_stop_lock);
1302         num_utstop--;
1303         if (num_utstop == 0)
1304                 cv_broadcast(&utstop_cv);
1305         mutex_exit(&thread_stop_lock);
1306 }
1307 
1308 /* Interface to wait for all user threads to be stopped */
1309 void
1310 utstop_timedwait(clock_t ticks)
1311 {
1312         mutex_enter(&thread_stop_lock);
1313         if (num_utstop > 0)
1314                 (void) cv_reltimedwait(&utstop_cv, &thread_stop_lock, ticks,
1315                     TR_CLOCK_TICK);
1316         mutex_exit(&thread_stop_lock);
1317 }
1318 
1319 /*
1320  * Perform the action specified by the current signal.
1321  * The usual sequence is:
1322  *      if (issig())
1323  *              psig();
1324  * The signal bit has already been cleared by issig(),
1325  * the current signal number has been stored in lwp_cursig,
1326  * and the current siginfo is now referenced by lwp_curinfo.
1327  */
1328 void
1329 psig(void)
1330 {
1331         kthread_t *t = curthread;
1332         proc_t *p = ttoproc(t);
1333         klwp_t *lwp = ttolwp(t);
1334         void (*func)();
1335         int sig, rc, code, ext;
1336         pid_t pid = -1;
1337         id_t ctid = 0;
1338         zoneid_t zoneid = -1;
1339         sigqueue_t *sqp = NULL;
1340         uint32_t auditing = AU_AUDITING();
1341 
1342         mutex_enter(&p->p_lock);
1343         schedctl_finish_sigblock(t);
1344         code = CLD_KILLED;
1345 
1346         if (p->p_flag & SEXITLWPS) {
1347                 lwp_exit();
1348                 return;                 /* not reached */
1349         }
1350         sig = lwp->lwp_cursig;
1351         ext = lwp->lwp_extsig;
1352 
1353         ASSERT(sig < NSIG);
1354 
1355         /*
1356          * Re-check lwp_cursig after we acquire p_lock.  Since p_lock was
1357          * dropped between issig() and psig(), a debugger may have cleared
1358          * lwp_cursig via /proc in the intervening window.
1359          */
1360         if (sig == 0) {
1361                 if (lwp->lwp_curinfo) {
1362                         siginfofree(lwp->lwp_curinfo);
1363                         lwp->lwp_curinfo = NULL;
1364                 }
1365                 if (t->t_flag & T_TOMASK) {      /* sigsuspend or pollsys */
1366                         t->t_flag &= ~T_TOMASK;
1367                         t->t_hold = lwp->lwp_sigoldmask;
1368                 }
1369                 mutex_exit(&p->p_lock);
1370                 return;
1371         }
1372         func = PTOU(curproc)->u_signal[sig-1];
1373 
1374         /*
1375          * The signal disposition could have changed since we promoted
1376          * this signal from pending to current (we dropped p->p_lock).
1377          * This can happen only in a multi-threaded process.
1378          */
1379         if (sig_ignorable(p, lwp, sig) ||
1380             (func == SIG_DFL && sigismember(&stopdefault, sig))) {
1381                 lwp->lwp_cursig = 0;
1382                 lwp->lwp_extsig = 0;
1383                 if (lwp->lwp_curinfo) {
1384                         siginfofree(lwp->lwp_curinfo);
1385                         lwp->lwp_curinfo = NULL;
1386                 }
1387                 if (t->t_flag & T_TOMASK) {      /* sigsuspend or pollsys */
1388                         t->t_flag &= ~T_TOMASK;
1389                         t->t_hold = lwp->lwp_sigoldmask;
1390                 }
1391                 mutex_exit(&p->p_lock);
1392                 return;
1393         }
1394 
1395         /*
1396          * We check lwp_curinfo first since pr_setsig can actually
1397          * stuff a sigqueue_t there for SIGKILL.
1398          */
1399         if (lwp->lwp_curinfo) {
1400                 sqp = lwp->lwp_curinfo;
1401         } else if (sig == SIGKILL && p->p_killsqp) {
1402                 sqp = p->p_killsqp;
1403         }
1404 
1405         if (sqp != NULL) {
1406                 if (SI_FROMUSER(&sqp->sq_info)) {
1407                         pid = sqp->sq_info.si_pid;
1408                         ctid = sqp->sq_info.si_ctid;
1409                         zoneid = sqp->sq_info.si_zoneid;
1410                 }
1411                 /*
1412                  * If we have a sigqueue_t, its sq_external value
1413                  * trumps the lwp_extsig value.  It is theoretically
1414                  * possible to make lwp_extsig reflect reality, but it
1415                  * would unnecessarily complicate things elsewhere.
1416                  */
1417                 ext = sqp->sq_external;
1418         }
1419 
1420         if (func == SIG_DFL) {
1421                 mutex_exit(&p->p_lock);
1422                 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *,
1423                     NULL, void (*)(void), func);
1424         } else {
1425                 k_siginfo_t *sip = NULL;
1426 
1427                 /*
1428                  * If DTrace user-land tracing is active, give DTrace a
1429                  * chance to defer the signal until after tracing is
1430                  * complete.
1431                  */
1432                 if (t->t_dtrace_on && dtrace_safe_defer_signal()) {
1433                         mutex_exit(&p->p_lock);
1434                         return;
1435                 }
1436 
1437                 /*
1438                  * save siginfo pointer here, in case the
1439                  * the signal's reset bit is on
1440                  *
1441                  * The presence of a current signal prevents paging
1442                  * from succeeding over a network.  We copy the current
1443                  * signal information to the side and cancel the current
1444                  * signal so that sendsig() will succeed.
1445                  */
1446                 if (sigismember(&p->p_siginfo, sig)) {
1447                         sip = &lwp->lwp_siginfo;
1448                         if (sqp) {
1449                                 bcopy(&sqp->sq_info, sip, sizeof (*sip));
1450                                 /*
1451                                  * If we were interrupted out of a system call
1452                                  * due to pthread_cancel(), inform libc.
1453                                  */
1454                                 if (sig == SIGCANCEL &&
1455                                     sip->si_code == SI_LWP &&
1456                                     t->t_sysnum != 0)
1457                                         schedctl_cancel_eintr();
1458                         } else if (sig == SIGPROF && sip->si_signo == SIGPROF &&
1459                             t->t_rprof != NULL && t->t_rprof->rp_anystate) {
1460                                 /* EMPTY */;
1461                         } else {
1462                                 bzero(sip, sizeof (*sip));
1463                                 sip->si_signo = sig;
1464                                 sip->si_code = SI_NOINFO;
1465                         }
1466                 }
1467 
1468                 if (t->t_flag & T_TOMASK)
1469                         t->t_flag &= ~T_TOMASK;
1470                 else
1471                         lwp->lwp_sigoldmask = t->t_hold;
1472                 sigorset(&t->t_hold, &PTOU(curproc)->u_sigmask[sig-1]);
1473                 if (!sigismember(&PTOU(curproc)->u_signodefer, sig))
1474                         sigaddset(&t->t_hold, sig);
1475                 if (sigismember(&PTOU(curproc)->u_sigresethand, sig))
1476                         setsigact(sig, SIG_DFL, &nullsmask, 0);
1477 
1478                 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *,
1479                     sip, void (*)(void), func);
1480 
1481                 lwp->lwp_cursig = 0;
1482                 lwp->lwp_extsig = 0;
1483                 if (lwp->lwp_curinfo) {
1484                         /* p->p_killsqp is freed by freeproc */
1485                         siginfofree(lwp->lwp_curinfo);
1486                         lwp->lwp_curinfo = NULL;
1487                 }
1488                 mutex_exit(&p->p_lock);
1489                 lwp->lwp_ru.nsignals++;
1490 
1491                 if (p->p_model == DATAMODEL_NATIVE)
1492                         rc = sendsig(sig, sip, func);
1493 #ifdef _SYSCALL32_IMPL
1494                 else
1495                         rc = sendsig32(sig, sip, func);
1496 #endif  /* _SYSCALL32_IMPL */
1497                 if (rc)
1498                         return;
1499                 sig = lwp->lwp_cursig = SIGSEGV;
1500                 ext = 0;        /* lwp_extsig was set above */
1501                 pid = -1;
1502                 ctid = 0;
1503         }
1504 
1505         if (sigismember(&coredefault, sig)) {
1506                 /*
1507                  * Terminate all LWPs but don't discard them.
1508                  * If another lwp beat us to the punch by calling exit(),
1509                  * evaporate now.
1510                  */
1511                 proc_is_exiting(p);
1512                 if (exitlwps(1) != 0) {
1513                         mutex_enter(&p->p_lock);
1514                         lwp_exit();
1515                 }
1516                 /* if we got a SIGKILL from anywhere, no core dump */
1517                 if (p->p_flag & SKILLED) {
1518                         sig = SIGKILL;
1519                         ext = (p->p_flag & SEXTKILLED) != 0;
1520                 } else {
1521                         if (auditing)           /* audit core dump */
1522                                 audit_core_start(sig);
1523                         if (core(sig, ext) == 0)
1524                                 code = CLD_DUMPED;
1525                         if (auditing)           /* audit core dump */
1526                                 audit_core_finish(code);
1527                 }
1528         }
1529 
1530         /*
1531          * Generate a contract event once if the process is killed
1532          * by a signal.
1533          */
1534         if (ext) {
1535                 proc_is_exiting(p);
1536                 if (exitlwps(0) != 0) {
1537                         mutex_enter(&p->p_lock);
1538                         lwp_exit();
1539                 }
1540                 contract_process_sig(p->p_ct_process, p, sig, pid, ctid,
1541                     zoneid);
1542         }
1543 
1544         exit(code, sig);
1545 }
1546 
1547 /*
1548  * Find next unheld signal in ssp for thread t.
1549  */
1550 int
1551 fsig(k_sigset_t *ssp, kthread_t *t)
1552 {
1553         proc_t *p = ttoproc(t);
1554         user_t *up = PTOU(p);
1555         int i;
1556         k_sigset_t temp;
1557 
1558         ASSERT(MUTEX_HELD(&p->p_lock));
1559 
1560         /*
1561          * Don't promote any signals for the parent of a vfork()d
1562          * child that hasn't yet released the parent's memory.
1563          */
1564         if (p->p_flag & SVFWAIT)
1565                 return (0);
1566 
1567         temp = *ssp;
1568         sigdiffset(&temp, &t->t_hold);
1569 
1570         /*
1571          * Don't promote stopping signals (except SIGSTOP) for a child
1572          * of vfork() that hasn't yet released the parent's memory.
1573          */
1574         if (p->p_flag & SVFORK)
1575                 sigdiffset(&temp, &holdvfork);
1576 
1577         /*
1578          * Don't promote a signal that will stop
1579          * the process when lwp_nostop is set.
1580          */
1581         if (ttolwp(t)->lwp_nostop) {
1582                 sigdelset(&temp, SIGSTOP);
1583                 if (!p->p_pgidp->pid_pgorphaned) {
1584                         if (up->u_signal[SIGTSTP-1] == SIG_DFL)
1585                                 sigdelset(&temp, SIGTSTP);
1586                         if (up->u_signal[SIGTTIN-1] == SIG_DFL)
1587                                 sigdelset(&temp, SIGTTIN);
1588                         if (up->u_signal[SIGTTOU-1] == SIG_DFL)
1589                                 sigdelset(&temp, SIGTTOU);
1590                 }
1591         }
1592 
1593         /*
1594          * Choose SIGKILL and SIGPROF before all other pending signals.
1595          * The rest are promoted in signal number order.
1596          */
1597         if (sigismember(&temp, SIGKILL))
1598                 return (SIGKILL);
1599         if (sigismember(&temp, SIGPROF))
1600                 return (SIGPROF);
1601 
1602         for (i = 0; i < sizeof (temp) / sizeof (temp.__sigbits[0]); i++) {
1603                 if (temp.__sigbits[i])
1604                         return ((i * NBBY * sizeof (temp.__sigbits[0])) +
1605                             lowbit(temp.__sigbits[i]));
1606         }
1607 
1608         return (0);
1609 }
1610 
1611 void
1612 setsigact(int sig, void (*disp)(), const k_sigset_t *mask, int flags)
1613 {
1614         proc_t *p = ttoproc(curthread);
1615         kthread_t *t;
1616 
1617         ASSERT(MUTEX_HELD(&p->p_lock));
1618 
1619         PTOU(curproc)->u_signal[sig - 1] = disp;
1620 
1621         /*
1622          * Honor the SA_SIGINFO flag if the signal is being caught.
1623          * Force the SA_SIGINFO flag if the signal is not being caught.
1624          * This is necessary to make sigqueue() and sigwaitinfo() work
1625          * properly together when the signal is set to default or is
1626          * being temporarily ignored.
1627          */
1628         if ((flags & SA_SIGINFO) || disp == SIG_DFL || disp == SIG_IGN)
1629                 sigaddset(&p->p_siginfo, sig);
1630         else
1631                 sigdelset(&p->p_siginfo, sig);
1632 
1633         if (disp != SIG_DFL && disp != SIG_IGN) {
1634                 sigdelset(&p->p_ignore, sig);
1635                 PTOU(curproc)->u_sigmask[sig - 1] = *mask;
1636                 if (!sigismember(&cantreset, sig)) {
1637                         if (flags & SA_RESETHAND)
1638                                 sigaddset(&PTOU(curproc)->u_sigresethand, sig);
1639                         else
1640                                 sigdelset(&PTOU(curproc)->u_sigresethand, sig);
1641                 }
1642                 if (flags & SA_NODEFER)
1643                         sigaddset(&PTOU(curproc)->u_signodefer, sig);
1644                 else
1645                         sigdelset(&PTOU(curproc)->u_signodefer, sig);
1646                 if (flags & SA_RESTART)
1647                         sigaddset(&PTOU(curproc)->u_sigrestart, sig);
1648                 else
1649                         sigdelset(&PTOU(curproc)->u_sigrestart, sig);
1650                 if (flags & SA_ONSTACK)
1651                         sigaddset(&PTOU(curproc)->u_sigonstack, sig);
1652                 else
1653                         sigdelset(&PTOU(curproc)->u_sigonstack, sig);
1654         } else if (disp == SIG_IGN ||
1655             (disp == SIG_DFL && sigismember(&ignoredefault, sig))) {
1656                 /*
1657                  * Setting the signal action to SIG_IGN results in the
1658                  * discarding of all pending signals of that signal number.
1659                  * Setting the signal action to SIG_DFL does the same *only*
1660                  * if the signal's default behavior is to be ignored.
1661                  */
1662                 sigaddset(&p->p_ignore, sig);
1663                 sigdelset(&p->p_sig, sig);
1664                 sigdelset(&p->p_extsig, sig);
1665                 sigdelq(p, NULL, sig);
1666                 t = p->p_tlist;
1667                 do {
1668                         sigdelset(&t->t_sig, sig);
1669                         sigdelset(&t->t_extsig, sig);
1670                         sigdelq(p, t, sig);
1671                 } while ((t = t->t_forw) != p->p_tlist);
1672         } else {
1673                 /*
1674                  * The signal action is being set to SIG_DFL and the default
1675                  * behavior is to do something: make sure it is not ignored.
1676                  */
1677                 sigdelset(&p->p_ignore, sig);
1678         }
1679 
1680         if (sig == SIGCLD) {
1681                 if (flags & SA_NOCLDWAIT)
1682                         p->p_flag |= SNOWAIT;
1683                 else
1684                         p->p_flag &= ~SNOWAIT;
1685 
1686                 if (flags & SA_NOCLDSTOP)
1687                         p->p_flag &= ~SJCTL;
1688                 else
1689                         p->p_flag |= SJCTL;
1690 
1691                 if ((p->p_flag & SNOWAIT) || disp == SIG_IGN) {
1692                         proc_t *cp, *tp;
1693 
1694                         mutex_exit(&p->p_lock);
1695                         mutex_enter(&pidlock);
1696                         for (cp = p->p_child; cp != NULL; cp = tp) {
1697                                 tp = cp->p_sibling;
1698                                 if (cp->p_stat == SZOMB &&
1699                                     !(cp->p_pidflag & CLDWAITPID))
1700                                         freeproc(cp);
1701                         }
1702                         mutex_exit(&pidlock);
1703                         mutex_enter(&p->p_lock);
1704                 }
1705         }
1706 }
1707 
1708 /*
1709  * Set all signal actions not already set to SIG_DFL or SIG_IGN to SIG_DFL.
1710  * Called from exec_common() for a process undergoing execve()
1711  * and from cfork() for a newly-created child of vfork().
1712  * In the vfork() case, 'p' is not the current process.
1713  * In both cases, there is only one thread in the process.
1714  */
1715 void
1716 sigdefault(proc_t *p)
1717 {
1718         kthread_t *t = p->p_tlist;
1719         struct user *up = PTOU(p);
1720         int sig;
1721 
1722         ASSERT(MUTEX_HELD(&p->p_lock));
1723 
1724         for (sig = 1; sig < NSIG; sig++) {
1725                 if (up->u_signal[sig - 1] != SIG_DFL &&
1726                     up->u_signal[sig - 1] != SIG_IGN) {
1727                         up->u_signal[sig - 1] = SIG_DFL;
1728                         sigemptyset(&up->u_sigmask[sig - 1]);
1729                         if (sigismember(&ignoredefault, sig)) {
1730                                 sigdelq(p, NULL, sig);
1731                                 sigdelq(p, t, sig);
1732                         }
1733                         if (sig == SIGCLD)
1734                                 p->p_flag &= ~(SNOWAIT|SJCTL);
1735                 }
1736         }
1737         sigorset(&p->p_ignore, &ignoredefault);
1738         sigfillset(&p->p_siginfo);
1739         sigdiffset(&p->p_siginfo, &cantmask);
1740         sigdiffset(&p->p_sig, &ignoredefault);
1741         sigdiffset(&p->p_extsig, &ignoredefault);
1742         sigdiffset(&t->t_sig, &ignoredefault);
1743         sigdiffset(&t->t_extsig, &ignoredefault);
1744 }
1745 
1746 void
1747 sigcld(proc_t *cp, sigqueue_t *sqp)
1748 {
1749         proc_t *pp = cp->p_parent;
1750 
1751         ASSERT(MUTEX_HELD(&pidlock));
1752 
1753         switch (cp->p_wcode) {
1754         case CLD_EXITED:
1755         case CLD_DUMPED:
1756         case CLD_KILLED:
1757                 ASSERT(cp->p_stat == SZOMB);
1758                 /*
1759                  * The broadcast on p_srwchan_cv is a kludge to
1760                  * wakeup a possible thread in uadmin(A_SHUTDOWN).
1761                  */
1762                 cv_broadcast(&cp->p_srwchan_cv);
1763 
1764                 /*
1765                  * Add to newstate list of the parent
1766                  */
1767                 add_ns(pp, cp);
1768 
1769                 cv_broadcast(&pp->p_cv);
1770                 if ((pp->p_flag & SNOWAIT) ||
1771                     PTOU(pp)->u_signal[SIGCLD - 1] == SIG_IGN) {
1772                         if (!(cp->p_pidflag & CLDWAITPID))
1773                                 freeproc(cp);
1774                 } else if (!(cp->p_pidflag & CLDNOSIGCHLD)) {
1775                         post_sigcld(cp, sqp);
1776                         sqp = NULL;
1777                 }
1778                 break;
1779 
1780         case CLD_STOPPED:
1781         case CLD_CONTINUED:
1782                 cv_broadcast(&pp->p_cv);
1783                 if (pp->p_flag & SJCTL) {
1784                         post_sigcld(cp, sqp);
1785                         sqp = NULL;
1786                 }
1787                 break;
1788         }
1789 
1790         if (sqp)
1791                 siginfofree(sqp);
1792 }
1793 
1794 /*
1795  * Common code called from sigcld() and from
1796  * waitid() and issig_forreal() via sigcld_repost().
1797  * Give the parent process a SIGCLD if it does not have one pending,
1798  * else mark the child process so a SIGCLD can be posted later.
1799  */
1800 static void
1801 post_sigcld(proc_t *cp, sigqueue_t *sqp)
1802 {
1803         proc_t *pp = cp->p_parent;
1804         k_siginfo_t info;
1805 
1806         ASSERT(MUTEX_HELD(&pidlock));
1807         mutex_enter(&pp->p_lock);
1808 
1809         /*
1810          * If a SIGCLD is pending, then just mark the child process
1811          * so that its SIGCLD will be posted later, when the first
1812          * SIGCLD is taken off the queue or when the parent is ready
1813          * to receive it or accept it, if ever.
1814          */
1815         if (sigismember(&pp->p_sig, SIGCLD)) {
1816                 cp->p_pidflag |= CLDPEND;
1817         } else {
1818                 cp->p_pidflag &= ~CLDPEND;
1819                 if (sqp == NULL) {
1820                         /*
1821                          * This can only happen when the parent is init.
1822                          * (See call to sigcld(q, NULL) in exit().)
1823                          * Use KM_NOSLEEP to avoid deadlock. The child procs
1824                          * initpid can be 1 for zlogin.
1825                          */
1826                         ASSERT(pp->p_pidp->pid_id ==
1827                             cp->p_zone->zone_proc_initpid ||
1828                             pp->p_pidp->pid_id == 1);
1829                         winfo(cp, &info, 0);
1830                         sigaddq(pp, NULL, &info, KM_NOSLEEP);
1831                 } else {
1832                         winfo(cp, &sqp->sq_info, 0);
1833                         sigaddqa(pp, NULL, sqp);
1834                         sqp = NULL;
1835                 }
1836         }
1837 
1838         mutex_exit(&pp->p_lock);
1839 
1840         if (sqp)
1841                 siginfofree(sqp);
1842 }
1843 
1844 /*
1845  * Search for a child that has a pending SIGCLD for us, the parent.
1846  * The queue of SIGCLD signals is implied by the list of children.
1847  * We post the SIGCLD signals one at a time so they don't get lost.
1848  * When one is dequeued, another is enqueued, until there are no more.
1849  */
1850 void
1851 sigcld_repost()
1852 {
1853         proc_t *pp = curproc;
1854         proc_t *cp;
1855         sigqueue_t *sqp;
1856 
1857         sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
1858         mutex_enter(&pidlock);
1859         if (PROC_IS_BRANDED(pp) && BROP(pp)->b_sigcld_repost != NULL) {
1860                 /*
1861                  * Allow the brand to inject synthetic SIGCLD signals.
1862                  */
1863                 if (BROP(pp)->b_sigcld_repost(pp, sqp) == 0) {
1864                         mutex_exit(&pidlock);
1865                         return;
1866                 }
1867         }
1868         for (cp = pp->p_child; cp; cp = cp->p_sibling) {
1869                 if (cp->p_pidflag & CLDPEND) {
1870                         post_sigcld(cp, sqp);
1871                         mutex_exit(&pidlock);
1872                         return;
1873                 }
1874         }
1875         mutex_exit(&pidlock);
1876         kmem_free(sqp, sizeof (sigqueue_t));
1877 }
1878 
1879 /*
1880  * count number of sigqueue send by sigaddqa()
1881  */
1882 void
1883 sigqsend(int cmd, proc_t *p, kthread_t *t, sigqueue_t *sigqp)
1884 {
1885         sigqhdr_t *sqh;
1886 
1887         sqh = (sigqhdr_t *)sigqp->sq_backptr;
1888         ASSERT(sqh);
1889 
1890         mutex_enter(&sqh->sqb_lock);
1891         sqh->sqb_sent++;
1892         mutex_exit(&sqh->sqb_lock);
1893 
1894         if (cmd == SN_SEND)
1895                 sigaddqa(p, t, sigqp);
1896         else
1897                 siginfofree(sigqp);
1898 }
1899 
1900 int
1901 sigsendproc(proc_t *p, sigsend_t *pv)
1902 {
1903         struct cred *cr;
1904         proc_t *myprocp = curproc;
1905 
1906         ASSERT(MUTEX_HELD(&pidlock));
1907 
1908         if (p->p_pid == 1 && pv->sig && sigismember(&cantmask, pv->sig))
1909                 return (EPERM);
1910 
1911         cr = CRED();
1912 
1913         if (pv->checkperm == 0 ||
1914             (pv->sig == SIGCONT && p->p_sessp == myprocp->p_sessp) ||
1915             prochasprocperm(p, myprocp, cr)) {
1916                 pv->perm++;
1917                 if (pv->sig) {
1918                         /* Make sure we should be setting si_pid and friends */
1919                         ASSERT(pv->sicode <= 0);
1920                         if (SI_CANQUEUE(pv->sicode)) {
1921                                 sigqueue_t *sqp;
1922 
1923                                 mutex_enter(&myprocp->p_lock);
1924                                 sqp = sigqalloc(myprocp->p_sigqhdr);
1925                                 mutex_exit(&myprocp->p_lock);
1926                                 if (sqp == NULL)
1927                                         return (EAGAIN);
1928                                 sqp->sq_info.si_signo = pv->sig;
1929                                 sqp->sq_info.si_code = pv->sicode;
1930                                 sqp->sq_info.si_pid = myprocp->p_pid;
1931                                 sqp->sq_info.si_ctid = PRCTID(myprocp);
1932                                 sqp->sq_info.si_zoneid = getzoneid();
1933                                 sqp->sq_info.si_uid = crgetruid(cr);
1934                                 sqp->sq_info.si_value = pv->value;
1935                                 mutex_enter(&p->p_lock);
1936                                 sigqsend(SN_SEND, p, NULL, sqp);
1937                                 mutex_exit(&p->p_lock);
1938                         } else {
1939                                 k_siginfo_t info;
1940                                 bzero(&info, sizeof (info));
1941                                 info.si_signo = pv->sig;
1942                                 info.si_code = pv->sicode;
1943                                 info.si_pid = myprocp->p_pid;
1944                                 info.si_ctid = PRCTID(myprocp);
1945                                 info.si_zoneid = getzoneid();
1946                                 info.si_uid = crgetruid(cr);
1947                                 mutex_enter(&p->p_lock);
1948                                 /*
1949                                  * XXX: Should be KM_SLEEP but
1950                                  * we have to avoid deadlock.
1951                                  */
1952                                 sigaddq(p, NULL, &info, KM_NOSLEEP);
1953                                 mutex_exit(&p->p_lock);
1954                         }
1955                 }
1956         }
1957 
1958         return (0);
1959 }
1960 
1961 int
1962 sigsendset(procset_t *psp, sigsend_t *pv)
1963 {
1964         int error;
1965 
1966         error = dotoprocs(psp, sigsendproc, (char *)pv);
1967         if (error == 0 && pv->perm == 0)
1968                 return (EPERM);
1969 
1970         return (error);
1971 }
1972 
1973 /*
1974  * Dequeue a queued siginfo structure.
1975  * If a non-null thread pointer is passed then dequeue from
1976  * the thread queue, otherwise dequeue from the process queue.
1977  */
1978 void
1979 sigdeq(proc_t *p, kthread_t *t, int sig, sigqueue_t **qpp)
1980 {
1981         sigqueue_t **psqp, *sqp;
1982 
1983         ASSERT(MUTEX_HELD(&p->p_lock));
1984 
1985         *qpp = NULL;
1986 
1987         if (t != NULL) {
1988                 sigdelset(&t->t_sig, sig);
1989                 sigdelset(&t->t_extsig, sig);
1990                 psqp = &t->t_sigqueue;
1991         } else {
1992                 sigdelset(&p->p_sig, sig);
1993                 sigdelset(&p->p_extsig, sig);
1994                 psqp = &p->p_sigqueue;
1995         }
1996 
1997         for (;;) {
1998                 if ((sqp = *psqp) == NULL)
1999                         return;
2000                 if (sqp->sq_info.si_signo == sig)
2001                         break;
2002                 else
2003                         psqp = &sqp->sq_next;
2004         }
2005         *qpp = sqp;
2006         *psqp = sqp->sq_next;
2007         for (sqp = *psqp; sqp; sqp = sqp->sq_next) {
2008                 if (sqp->sq_info.si_signo == sig) {
2009                         if (t != (kthread_t *)NULL) {
2010                                 sigaddset(&t->t_sig, sig);
2011                                 t->t_sig_check = 1;
2012                         } else {
2013                                 sigaddset(&p->p_sig, sig);
2014                                 set_proc_ast(p);
2015                         }
2016                         break;
2017                 }
2018         }
2019 }
2020 
2021 /*
2022  * Delete a queued SIGCLD siginfo structure matching the k_siginfo_t argument.
2023  */
2024 void
2025 sigcld_delete(k_siginfo_t *ip)
2026 {
2027         proc_t *p = curproc;
2028         int another_sigcld = 0;
2029         sigqueue_t **psqp, *sqp;
2030 
2031         ASSERT(ip->si_signo == SIGCLD);
2032 
2033         mutex_enter(&p->p_lock);
2034 
2035         if (!sigismember(&p->p_sig, SIGCLD)) {
2036                 mutex_exit(&p->p_lock);
2037                 return;
2038         }
2039 
2040         psqp = &p->p_sigqueue;
2041         for (;;) {
2042                 if ((sqp = *psqp) == NULL) {
2043                         mutex_exit(&p->p_lock);
2044                         return;
2045                 }
2046                 if (sqp->sq_info.si_signo == SIGCLD) {
2047                         if (sqp->sq_info.si_pid == ip->si_pid &&
2048                             sqp->sq_info.si_code == ip->si_code &&
2049                             sqp->sq_info.si_status == ip->si_status)
2050                                 break;
2051                         another_sigcld = 1;
2052                 }
2053                 psqp = &sqp->sq_next;
2054         }
2055         *psqp = sqp->sq_next;
2056 
2057         siginfofree(sqp);
2058 
2059         for (sqp = *psqp; !another_sigcld && sqp; sqp = sqp->sq_next) {
2060                 if (sqp->sq_info.si_signo == SIGCLD)
2061                         another_sigcld = 1;
2062         }
2063 
2064         if (!another_sigcld) {
2065                 sigdelset(&p->p_sig, SIGCLD);
2066                 sigdelset(&p->p_extsig, SIGCLD);
2067         }
2068 
2069         mutex_exit(&p->p_lock);
2070 }
2071 
2072 /*
2073  * Delete queued siginfo structures.
2074  * If a non-null thread pointer is passed then delete from
2075  * the thread queue, otherwise delete from the process queue.
2076  */
2077 void
2078 sigdelq(proc_t *p, kthread_t *t, int sig)
2079 {
2080         sigqueue_t **psqp, *sqp;
2081 
2082         /*
2083          * We must be holding p->p_lock unless the process is
2084          * being reaped or has failed to get started on fork.
2085          */
2086         ASSERT(MUTEX_HELD(&p->p_lock) ||
2087             p->p_stat == SIDL || p->p_stat == SZOMB);
2088 
2089         if (t != (kthread_t *)NULL)
2090                 psqp = &t->t_sigqueue;
2091         else
2092                 psqp = &p->p_sigqueue;
2093 
2094         while (*psqp) {
2095                 sqp = *psqp;
2096                 if (sig == 0 || sqp->sq_info.si_signo == sig) {
2097                         *psqp = sqp->sq_next;
2098                         siginfofree(sqp);
2099                 } else
2100                         psqp = &sqp->sq_next;
2101         }
2102 }
2103 
2104 /*
2105  * Insert a siginfo structure into a queue.
2106  * If a non-null thread pointer is passed then add to the thread queue,
2107  * otherwise add to the process queue.
2108  *
2109  * The function sigaddqins() is called with sigqueue already allocated.
2110  * It is called from sigaddqa() and sigaddq() below.
2111  *
2112  * The value of si_code implicitly indicates whether sigp is to be
2113  * explicitly queued, or to be queued to depth one.
2114  */
2115 static void
2116 sigaddqins(proc_t *p, kthread_t *t, sigqueue_t *sigqp)
2117 {
2118         sigqueue_t **psqp;
2119         int sig = sigqp->sq_info.si_signo;
2120 
2121         sigqp->sq_external = (curproc != &p0) &&
2122             (curproc->p_ct_process != p->p_ct_process);
2123 
2124         /*
2125          * issig_forreal() doesn't bother dequeueing signals if SKILLED
2126          * is set, and even if it did, we would want to avoid situation
2127          * (which would be unique to SIGKILL) where one thread dequeued
2128          * the sigqueue_t and another executed psig().  So we create a
2129          * separate stash for SIGKILL's sigqueue_t.  Because a second
2130          * SIGKILL can set SEXTKILLED, we overwrite the existing entry
2131          * if (and only if) it was non-extracontractual.
2132          */
2133         if (sig == SIGKILL) {
2134                 if (p->p_killsqp == NULL || !p->p_killsqp->sq_external) {
2135                         if (p->p_killsqp != NULL)
2136                                 siginfofree(p->p_killsqp);
2137                         p->p_killsqp = sigqp;
2138                         sigqp->sq_next = NULL;
2139                 } else {
2140                         siginfofree(sigqp);
2141                 }
2142                 return;
2143         }
2144 
2145         ASSERT(sig >= 1 && sig < NSIG);
2146         if (t != NULL)  /* directed to a thread */
2147                 psqp = &t->t_sigqueue;
2148         else            /* directed to a process */
2149                 psqp = &p->p_sigqueue;
2150         if (SI_CANQUEUE(sigqp->sq_info.si_code) &&
2151             sigismember(&p->p_siginfo, sig)) {
2152                 for (; *psqp != NULL; psqp = &(*psqp)->sq_next)
2153                                 ;
2154         } else {
2155                 for (; *psqp != NULL; psqp = &(*psqp)->sq_next) {
2156                         if ((*psqp)->sq_info.si_signo == sig) {
2157                                 siginfofree(sigqp);
2158                                 return;
2159                         }
2160                 }
2161         }
2162         *psqp = sigqp;
2163         sigqp->sq_next = NULL;
2164 }
2165 
2166 /*
2167  * The function sigaddqa() is called with sigqueue already allocated.
2168  * If signal is ignored, discard but guarantee KILL and generation semantics.
2169  * It is called from sigqueue() and other places.
2170  */
2171 void
2172 sigaddqa(proc_t *p, kthread_t *t, sigqueue_t *sigqp)
2173 {
2174         int sig = sigqp->sq_info.si_signo;
2175 
2176         ASSERT(MUTEX_HELD(&p->p_lock));
2177         ASSERT(sig >= 1 && sig < NSIG);
2178 
2179         if (sig_discardable(p, t, sig))
2180                 siginfofree(sigqp);
2181         else
2182                 sigaddqins(p, t, sigqp);
2183 
2184         sigtoproc(p, t, sig);
2185 }
2186 
2187 /*
2188  * Allocate the sigqueue_t structure and call sigaddqins().
2189  */
2190 void
2191 sigaddq(proc_t *p, kthread_t *t, k_siginfo_t *infop, int km_flags)
2192 {
2193         sigqueue_t *sqp;
2194         int sig = infop->si_signo;
2195 
2196         ASSERT(MUTEX_HELD(&p->p_lock));
2197         ASSERT(sig >= 1 && sig < NSIG);
2198 
2199         /*
2200          * If the signal will be discarded by sigtoproc() or
2201          * if the process isn't requesting siginfo and it isn't
2202          * blocking the signal (it *could* change it's mind while
2203          * the signal is pending) then don't bother creating one.
2204          */
2205         if (!sig_discardable(p, t, sig) &&
2206             (sigismember(&p->p_siginfo, sig) ||
2207             (curproc->p_ct_process != p->p_ct_process) ||
2208             (sig == SIGCLD && SI_FROMKERNEL(infop))) &&
2209             ((sqp = kmem_alloc(sizeof (sigqueue_t), km_flags)) != NULL)) {
2210                 bcopy(infop, &sqp->sq_info, sizeof (k_siginfo_t));
2211                 sqp->sq_func = NULL;
2212                 sqp->sq_next = NULL;
2213                 sigaddqins(p, t, sqp);
2214         }
2215         sigtoproc(p, t, sig);
2216 }
2217 
2218 /*
2219  * Handle stop-on-fault processing for the debugger.  Returns 0
2220  * if the fault is cleared during the stop, nonzero if it isn't.
2221  */
2222 int
2223 stop_on_fault(uint_t fault, k_siginfo_t *sip)
2224 {
2225         proc_t *p = ttoproc(curthread);
2226         klwp_t *lwp = ttolwp(curthread);
2227 
2228         ASSERT(prismember(&p->p_fltmask, fault));
2229 
2230         /*
2231          * Record current fault and siginfo structure so debugger can
2232          * find it.
2233          */
2234         mutex_enter(&p->p_lock);
2235         lwp->lwp_curflt = (uchar_t)fault;
2236         lwp->lwp_siginfo = *sip;
2237 
2238         stop(PR_FAULTED, fault);
2239 
2240         fault = lwp->lwp_curflt;
2241         lwp->lwp_curflt = 0;
2242         mutex_exit(&p->p_lock);
2243         return (fault);
2244 }
2245 
2246 void
2247 sigorset(k_sigset_t *s1, const k_sigset_t *s2)
2248 {
2249         s1->__sigbits[0] |= s2->__sigbits[0];
2250         s1->__sigbits[1] |= s2->__sigbits[1];
2251         s1->__sigbits[2] |= s2->__sigbits[2];
2252 }
2253 
2254 void
2255 sigandset(k_sigset_t *s1, const k_sigset_t *s2)
2256 {
2257         s1->__sigbits[0] &= s2->__sigbits[0];
2258         s1->__sigbits[1] &= s2->__sigbits[1];
2259         s1->__sigbits[2] &= s2->__sigbits[2];
2260 }
2261 
2262 void
2263 sigdiffset(k_sigset_t *s1, const k_sigset_t *s2)
2264 {
2265         s1->__sigbits[0] &= ~(s2->__sigbits[0]);
2266         s1->__sigbits[1] &= ~(s2->__sigbits[1]);
2267         s1->__sigbits[2] &= ~(s2->__sigbits[2]);
2268 }
2269 
2270 /*
2271  * Return non-zero if curthread->t_sig_check should be set to 1, that is,
2272  * if there are any signals the thread might take on return from the kernel.
2273  * If ksigset_t's were a single word, we would do:
2274  *      return (((p->p_sig | t->t_sig) & ~t->t_hold) & fillset);
2275  */
2276 int
2277 sigcheck(proc_t *p, kthread_t *t)
2278 {
2279         sc_shared_t *tdp = t->t_schedctl;
2280 
2281         /*
2282          * If signals are blocked via the schedctl interface
2283          * then we only check for the unmaskable signals.
2284          * The unmaskable signal numbers should all be contained
2285          * in __sigbits[0] and we assume this for speed.
2286          */
2287 #if (CANTMASK1 == 0 && CANTMASK2 == 0)
2288         if (tdp != NULL && tdp->sc_sigblock)
2289                 return ((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) &
2290                     CANTMASK0);
2291 #else
2292 #error "fix me: CANTMASK1 and CANTMASK2 are not zero"
2293 #endif
2294 
2295 /* see uts/common/sys/signal.h for why this must be true */
2296 #if ((MAXSIG > (2 * 32)) && (MAXSIG <= (3 * 32)))
2297         return (((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) &
2298             ~t->t_hold.__sigbits[0]) |
2299             ((p->p_sig.__sigbits[1] | t->t_sig.__sigbits[1]) &
2300             ~t->t_hold.__sigbits[1]) |
2301             (((p->p_sig.__sigbits[2] | t->t_sig.__sigbits[2]) &
2302             ~t->t_hold.__sigbits[2]) & FILLSET2));
2303 #else
2304 #error "fix me: MAXSIG out of bounds"
2305 #endif
2306 }
2307 
2308 void
2309 sigintr(k_sigset_t *smask, int intable)
2310 {
2311         proc_t *p;
2312         int owned;
2313         k_sigset_t lmask;               /* local copy of cantmask */
2314         klwp_t *lwp = ttolwp(curthread);
2315 
2316         /*
2317          * Mask out all signals except SIGHUP, SIGINT, SIGQUIT
2318          *    and SIGTERM. (Preserving the existing masks).
2319          *    This function supports the -intr nfs and ufs mount option.
2320          */
2321 
2322         /*
2323          * don't do kernel threads
2324          */
2325         if (lwp == NULL)
2326                 return;
2327 
2328         /*
2329          * get access to signal mask
2330          */
2331         p = ttoproc(curthread);
2332         owned = mutex_owned(&p->p_lock); /* this is filthy */
2333         if (!owned)
2334                 mutex_enter(&p->p_lock);
2335 
2336         /*
2337          * remember the current mask
2338          */
2339         schedctl_finish_sigblock(curthread);
2340         *smask = curthread->t_hold;
2341 
2342         /*
2343          * mask out all signals
2344          */
2345         sigfillset(&curthread->t_hold);
2346 
2347         /*
2348          * Unmask the non-maskable signals (e.g., KILL), as long as
2349          * they aren't already masked (which could happen at exit).
2350          * The first sigdiffset sets lmask to (cantmask & ~curhold).  The
2351          * second sets the current hold mask to (~0 & ~lmask), which reduces
2352          * to (~cantmask | curhold).
2353          */
2354         lmask = cantmask;
2355         sigdiffset(&lmask, smask);
2356         sigdiffset(&curthread->t_hold, &lmask);
2357 
2358         /*
2359          * Re-enable HUP, QUIT, and TERM iff they were originally enabled
2360          * Re-enable INT if it's originally enabled and the NFS mount option
2361          * nointr is not set.
2362          */
2363         if (!sigismember(smask, SIGHUP))
2364                 sigdelset(&curthread->t_hold, SIGHUP);
2365         if (!sigismember(smask, SIGINT) && intable)
2366                 sigdelset(&curthread->t_hold, SIGINT);
2367         if (!sigismember(smask, SIGQUIT))
2368                 sigdelset(&curthread->t_hold, SIGQUIT);
2369         if (!sigismember(smask, SIGTERM))
2370                 sigdelset(&curthread->t_hold, SIGTERM);
2371 
2372         /*
2373          * release access to signal mask
2374          */
2375         if (!owned)
2376                 mutex_exit(&p->p_lock);
2377 
2378         /*
2379          * Indicate that this lwp is not to be stopped.
2380          */
2381         lwp->lwp_nostop++;
2382 
2383 }
2384 
2385 void
2386 sigunintr(k_sigset_t *smask)
2387 {
2388         proc_t *p;
2389         int owned;
2390         klwp_t *lwp = ttolwp(curthread);
2391 
2392         /*
2393          * Reset previous mask (See sigintr() above)
2394          */
2395         if (lwp != NULL) {
2396                 lwp->lwp_nostop--;   /* restore lwp stoppability */
2397                 p = ttoproc(curthread);
2398                 owned = mutex_owned(&p->p_lock); /* this is filthy */
2399                 if (!owned)
2400                         mutex_enter(&p->p_lock);
2401                 curthread->t_hold = *smask;
2402                 /* so unmasked signals will be seen */
2403                 curthread->t_sig_check = 1;
2404                 if (!owned)
2405                         mutex_exit(&p->p_lock);
2406         }
2407 }
2408 
2409 void
2410 sigreplace(k_sigset_t *newmask, k_sigset_t *oldmask)
2411 {
2412         proc_t  *p;
2413         int owned;
2414         /*
2415          * Save current signal mask in oldmask, then
2416          * set it to newmask.
2417          */
2418         if (ttolwp(curthread) != NULL) {
2419                 p = ttoproc(curthread);
2420                 owned = mutex_owned(&p->p_lock); /* this is filthy */
2421                 if (!owned)
2422                         mutex_enter(&p->p_lock);
2423                 schedctl_finish_sigblock(curthread);
2424                 if (oldmask != NULL)
2425                         *oldmask = curthread->t_hold;
2426                 curthread->t_hold = *newmask;
2427                 curthread->t_sig_check = 1;
2428                 if (!owned)
2429                         mutex_exit(&p->p_lock);
2430         }
2431 }
2432 
2433 /*
2434  * Return true if the signal number is in range
2435  * and the signal code specifies signal queueing.
2436  */
2437 int
2438 sigwillqueue(int sig, int code)
2439 {
2440         if (sig >= 0 && sig < NSIG) {
2441                 switch (code) {
2442                 case SI_QUEUE:
2443                 case SI_TIMER:
2444                 case SI_ASYNCIO:
2445                 case SI_MESGQ:
2446                         return (1);
2447                 }
2448         }
2449         return (0);
2450 }
2451 
2452 /*
2453  * The pre-allocated pool (with _SIGQUEUE_PREALLOC entries) is
2454  * allocated at the first sigqueue/signotify call.
2455  */
2456 sigqhdr_t *
2457 sigqhdralloc(size_t size, uint_t maxcount)
2458 {
2459         size_t i;
2460         sigqueue_t *sq, *next;
2461         sigqhdr_t *sqh;
2462 
2463         /*
2464          * Before the introduction of process.max-sigqueue-size
2465          * _SC_SIGQUEUE_MAX had this static value.
2466          */
2467 #define _SIGQUEUE_PREALLOC      32
2468 
2469         i = (_SIGQUEUE_PREALLOC * size) + sizeof (sigqhdr_t);
2470         ASSERT(maxcount <= INT_MAX);
2471         sqh = kmem_alloc(i, KM_SLEEP);
2472         sqh->sqb_count = maxcount;
2473         sqh->sqb_maxcount = maxcount;
2474         sqh->sqb_size = i;
2475         sqh->sqb_pexited = 0;
2476         sqh->sqb_sent = 0;
2477         sqh->sqb_free = sq = (sigqueue_t *)(sqh + 1);
2478         for (i = _SIGQUEUE_PREALLOC - 1; i != 0; i--) {
2479                 next = (sigqueue_t *)((uintptr_t)sq + size);
2480                 sq->sq_next = next;
2481                 sq = next;
2482         }
2483         sq->sq_next = NULL;
2484         cv_init(&sqh->sqb_cv, NULL, CV_DEFAULT, NULL);
2485         mutex_init(&sqh->sqb_lock, NULL, MUTEX_DEFAULT, NULL);
2486         return (sqh);
2487 }
2488 
2489 static void sigqrel(sigqueue_t *);
2490 
2491 /*
2492  * Allocate a sigqueue/signotify structure from the per process
2493  * pre-allocated pool or allocate a new sigqueue/signotify structure
2494  * if the pre-allocated pool is exhausted.
2495  */
2496 sigqueue_t *
2497 sigqalloc(sigqhdr_t *sqh)
2498 {
2499         sigqueue_t *sq = NULL;
2500 
2501         ASSERT(MUTEX_HELD(&curproc->p_lock));
2502 
2503         if (sqh != NULL) {
2504                 mutex_enter(&sqh->sqb_lock);
2505                 if (sqh->sqb_count > 0) {
2506                         sqh->sqb_count--;
2507                         if (sqh->sqb_free == NULL) {
2508                                 /*
2509                                  * The pre-allocated pool is exhausted.
2510                                  */
2511                                 sq = kmem_alloc(sizeof (sigqueue_t), KM_SLEEP);
2512                                 sq->sq_func = NULL;
2513                         } else {
2514                                 sq = sqh->sqb_free;
2515                                 sq->sq_func = sigqrel;
2516                                 sqh->sqb_free = sq->sq_next;
2517                         }
2518                         mutex_exit(&sqh->sqb_lock);
2519                         bzero(&sq->sq_info, sizeof (k_siginfo_t));
2520                         sq->sq_backptr = sqh;
2521                         sq->sq_next = NULL;
2522                         sq->sq_external = 0;
2523                 } else {
2524                         mutex_exit(&sqh->sqb_lock);
2525                 }
2526         }
2527         return (sq);
2528 }
2529 
2530 /*
2531  * Return a sigqueue structure back to the pre-allocated pool.
2532  */
2533 static void
2534 sigqrel(sigqueue_t *sq)
2535 {
2536         sigqhdr_t *sqh;
2537 
2538         /* make sure that p_lock of the affected process is held */
2539 
2540         sqh = (sigqhdr_t *)sq->sq_backptr;
2541         mutex_enter(&sqh->sqb_lock);
2542         if (sqh->sqb_pexited && sqh->sqb_sent == 1) {
2543                 mutex_exit(&sqh->sqb_lock);
2544                 cv_destroy(&sqh->sqb_cv);
2545                 mutex_destroy(&sqh->sqb_lock);
2546                 kmem_free(sqh, sqh->sqb_size);
2547         } else {
2548                 sqh->sqb_count++;
2549                 sqh->sqb_sent--;
2550                 sq->sq_next = sqh->sqb_free;
2551                 sq->sq_backptr = NULL;
2552                 sqh->sqb_free = sq;
2553                 cv_signal(&sqh->sqb_cv);
2554                 mutex_exit(&sqh->sqb_lock);
2555         }
2556 }
2557 
2558 /*
2559  * Free up the pre-allocated sigqueue headers of sigqueue pool
2560  * and signotify pool, if possible.
2561  * Called only by the owning process during exec() and exit().
2562  */
2563 void
2564 sigqfree(proc_t *p)
2565 {
2566         ASSERT(MUTEX_HELD(&p->p_lock));
2567 
2568         if (p->p_sigqhdr != NULL) {  /* sigqueue pool */
2569                 sigqhdrfree(p->p_sigqhdr);
2570                 p->p_sigqhdr = NULL;
2571         }
2572         if (p->p_signhdr != NULL) {  /* signotify pool */
2573                 sigqhdrfree(p->p_signhdr);
2574                 p->p_signhdr = NULL;
2575         }
2576 }
2577 
2578 /*
2579  * Free up the pre-allocated header and sigq pool if possible.
2580  */
2581 void
2582 sigqhdrfree(sigqhdr_t *sqh)
2583 {
2584         mutex_enter(&sqh->sqb_lock);
2585         if (sqh->sqb_sent == 0) {
2586                 mutex_exit(&sqh->sqb_lock);
2587                 cv_destroy(&sqh->sqb_cv);
2588                 mutex_destroy(&sqh->sqb_lock);
2589                 kmem_free(sqh, sqh->sqb_size);
2590         } else {
2591                 sqh->sqb_pexited = 1;
2592                 mutex_exit(&sqh->sqb_lock);
2593         }
2594 }
2595 
2596 /*
2597  * Free up a single sigqueue structure.
2598  * No other code should free a sigqueue directly.
2599  */
2600 void
2601 siginfofree(sigqueue_t *sqp)
2602 {
2603         if (sqp != NULL) {
2604                 if (sqp->sq_func != NULL)
2605                         (sqp->sq_func)(sqp);
2606                 else
2607                         kmem_free(sqp, sizeof (sigqueue_t));
2608         }
2609 }
2610 
2611 /*
2612  * Generate a synchronous signal caused by a hardware
2613  * condition encountered by an lwp.  Called from trap().
2614  */
2615 void
2616 trapsig(k_siginfo_t *ip, int restartable)
2617 {
2618         proc_t *p = ttoproc(curthread);
2619         int sig = ip->si_signo;
2620         sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
2621 
2622         ASSERT(sig > 0 && sig < NSIG);
2623 
2624         if (curthread->t_dtrace_on)
2625                 dtrace_safe_synchronous_signal();
2626 
2627         mutex_enter(&p->p_lock);
2628         schedctl_finish_sigblock(curthread);
2629         /*
2630          * Avoid a possible infinite loop if the lwp is holding the
2631          * signal generated by a trap of a restartable instruction or
2632          * if the signal so generated is being ignored by the process.
2633          */
2634         if (restartable &&
2635             (sigismember(&curthread->t_hold, sig) ||
2636             p->p_user.u_signal[sig-1] == SIG_IGN)) {
2637                 sigdelset(&curthread->t_hold, sig);
2638                 p->p_user.u_signal[sig-1] = SIG_DFL;
2639                 sigdelset(&p->p_ignore, sig);
2640         }
2641         bcopy(ip, &sqp->sq_info, sizeof (k_siginfo_t));
2642         sigaddqa(p, curthread, sqp);
2643         mutex_exit(&p->p_lock);
2644 }
2645 
2646 /*
2647  * Dispatch the real time profiling signal in the traditional way,
2648  * honoring all of the /proc tracing mechanism built into issig().
2649  */
2650 static void
2651 realsigprof_slow(int sysnum, int nsysarg, int error)
2652 {
2653         kthread_t *t = curthread;
2654         proc_t *p = ttoproc(t);
2655         klwp_t *lwp = ttolwp(t);
2656         k_siginfo_t *sip = &lwp->lwp_siginfo;
2657         void (*func)();
2658 
2659         mutex_enter(&p->p_lock);
2660         func = PTOU(p)->u_signal[SIGPROF - 1];
2661         if (p->p_rprof_cyclic == CYCLIC_NONE ||
2662             func == SIG_DFL || func == SIG_IGN) {
2663                 bzero(t->t_rprof, sizeof (*t->t_rprof));
2664                 mutex_exit(&p->p_lock);
2665                 return;
2666         }
2667         if (sigismember(&t->t_hold, SIGPROF)) {
2668                 mutex_exit(&p->p_lock);
2669                 return;
2670         }
2671         sip->si_signo = SIGPROF;
2672         sip->si_code = PROF_SIG;
2673         sip->si_errno = error;
2674         hrt2ts(gethrtime(), &sip->si_tstamp);
2675         sip->si_syscall = sysnum;
2676         sip->si_nsysarg = nsysarg;
2677         sip->si_fault = lwp->lwp_lastfault;
2678         sip->si_faddr = lwp->lwp_lastfaddr;
2679         lwp->lwp_lastfault = 0;
2680         lwp->lwp_lastfaddr = NULL;
2681         sigtoproc(p, t, SIGPROF);
2682         mutex_exit(&p->p_lock);
2683         ASSERT(lwp->lwp_cursig == 0);
2684         if (issig(FORREAL))
2685                 psig();
2686         sip->si_signo = 0;
2687         bzero(t->t_rprof, sizeof (*t->t_rprof));
2688 }
2689 
2690 /*
2691  * We are not tracing the SIGPROF signal, or doing any other unnatural
2692  * acts, like watchpoints, so dispatch the real time profiling signal
2693  * directly, bypassing all of the overhead built into issig().
2694  */
2695 static void
2696 realsigprof_fast(int sysnum, int nsysarg, int error)
2697 {
2698         kthread_t *t = curthread;
2699         proc_t *p = ttoproc(t);
2700         klwp_t *lwp = ttolwp(t);
2701         k_siginfo_t *sip = &lwp->lwp_siginfo;
2702         void (*func)();
2703         int rc;
2704         int code;
2705 
2706         /*
2707          * We don't need to acquire p->p_lock here;
2708          * we are manipulating thread-private data.
2709          */
2710         func = PTOU(p)->u_signal[SIGPROF - 1];
2711         if (p->p_rprof_cyclic == CYCLIC_NONE ||
2712             func == SIG_DFL || func == SIG_IGN) {
2713                 bzero(t->t_rprof, sizeof (*t->t_rprof));
2714                 return;
2715         }
2716         if (lwp->lwp_cursig != 0 ||
2717             lwp->lwp_curinfo != NULL ||
2718             sigismember(&t->t_hold, SIGPROF)) {
2719                 return;
2720         }
2721         sip->si_signo = SIGPROF;
2722         sip->si_code = PROF_SIG;
2723         sip->si_errno = error;
2724         hrt2ts(gethrtime(), &sip->si_tstamp);
2725         sip->si_syscall = sysnum;
2726         sip->si_nsysarg = nsysarg;
2727         sip->si_fault = lwp->lwp_lastfault;
2728         sip->si_faddr = lwp->lwp_lastfaddr;
2729         lwp->lwp_lastfault = 0;
2730         lwp->lwp_lastfaddr = NULL;
2731         if (t->t_flag & T_TOMASK)
2732                 t->t_flag &= ~T_TOMASK;
2733         else
2734                 lwp->lwp_sigoldmask = t->t_hold;
2735         sigorset(&t->t_hold, &PTOU(p)->u_sigmask[SIGPROF - 1]);
2736         if (!sigismember(&PTOU(p)->u_signodefer, SIGPROF))
2737                 sigaddset(&t->t_hold, SIGPROF);
2738         lwp->lwp_extsig = 0;
2739         lwp->lwp_ru.nsignals++;
2740         if (p->p_model == DATAMODEL_NATIVE)
2741                 rc = sendsig(SIGPROF, sip, func);
2742 #ifdef _SYSCALL32_IMPL
2743         else
2744                 rc = sendsig32(SIGPROF, sip, func);
2745 #endif  /* _SYSCALL32_IMPL */
2746         sip->si_signo = 0;
2747         bzero(t->t_rprof, sizeof (*t->t_rprof));
2748         if (rc == 0) {
2749                 /*
2750                  * sendsig() failed; we must dump core with a SIGSEGV.
2751                  * See psig().  This code is copied from there.
2752                  */
2753                 lwp->lwp_cursig = SIGSEGV;
2754                 code = CLD_KILLED;
2755                 proc_is_exiting(p);
2756                 if (exitlwps(1) != 0) {
2757                         mutex_enter(&p->p_lock);
2758                         lwp_exit();
2759                 }
2760                 if (audit_active == C2AUDIT_LOADED)
2761                         audit_core_start(SIGSEGV);
2762                 if (core(SIGSEGV, 0) == 0)
2763                         code = CLD_DUMPED;
2764                 if (audit_active == C2AUDIT_LOADED)
2765                         audit_core_finish(code);
2766                 exit(code, SIGSEGV);
2767         }
2768 }
2769 
2770 /*
2771  * Arrange for the real time profiling signal to be dispatched.
2772  */
2773 void
2774 realsigprof(int sysnum, int nsysarg, int error)
2775 {
2776         kthread_t *t = curthread;
2777         proc_t *p = ttoproc(t);
2778 
2779         if (t->t_rprof->rp_anystate == 0)
2780                 return;
2781 
2782         schedctl_finish_sigblock(t);
2783 
2784         /* test for any activity that requires p->p_lock */
2785         if (tracing(p, SIGPROF) || pr_watch_active(p) ||
2786             sigismember(&PTOU(p)->u_sigresethand, SIGPROF)) {
2787                 /* do it the classic slow way */
2788                 realsigprof_slow(sysnum, nsysarg, error);
2789         } else {
2790                 /* do it the cheating-a-little fast way */
2791                 realsigprof_fast(sysnum, nsysarg, error);
2792         }
2793 }
2794 
2795 #ifdef _SYSCALL32_IMPL
2796 
2797 /*
2798  * It's tricky to transmit a sigval between 32-bit and 64-bit
2799  * process, since in the 64-bit world, a pointer and an integer
2800  * are different sizes.  Since we're constrained by the standards
2801  * world not to change the types, and it's unclear how useful it is
2802  * to send pointers between address spaces this way, we preserve
2803  * the 'int' interpretation for 32-bit processes interoperating
2804  * with 64-bit processes.  The full semantics (pointers or integers)
2805  * are available for N-bit processes interoperating with N-bit
2806  * processes.
2807  */
2808 void
2809 siginfo_kto32(const k_siginfo_t *src, siginfo32_t *dest)
2810 {
2811         bzero(dest, sizeof (*dest));
2812 
2813         /*
2814          * The absolute minimum content is si_signo and si_code.
2815          */
2816         dest->si_signo = src->si_signo;
2817         if ((dest->si_code = src->si_code) == SI_NOINFO)
2818                 return;
2819 
2820         /*
2821          * A siginfo generated by user level is structured
2822          * differently from one generated by the kernel.
2823          */
2824         if (SI_FROMUSER(src)) {
2825                 dest->si_pid = src->si_pid;
2826                 dest->si_ctid = src->si_ctid;
2827                 dest->si_zoneid = src->si_zoneid;
2828                 dest->si_uid = src->si_uid;
2829                 if (SI_CANQUEUE(src->si_code))
2830                         dest->si_value.sival_int =
2831                             (int32_t)src->si_value.sival_int;
2832                 return;
2833         }
2834 
2835         dest->si_errno = src->si_errno;
2836 
2837         switch (src->si_signo) {
2838         default:
2839                 dest->si_pid = src->si_pid;
2840                 dest->si_ctid = src->si_ctid;
2841                 dest->si_zoneid = src->si_zoneid;
2842                 dest->si_uid = src->si_uid;
2843                 dest->si_value.sival_int = (int32_t)src->si_value.sival_int;
2844                 break;
2845         case SIGCLD:
2846                 dest->si_pid = src->si_pid;
2847                 dest->si_ctid = src->si_ctid;
2848                 dest->si_zoneid = src->si_zoneid;
2849                 dest->si_status = src->si_status;
2850                 dest->si_stime = src->si_stime;
2851                 dest->si_utime = src->si_utime;
2852                 break;
2853         case SIGSEGV:
2854         case SIGBUS:
2855         case SIGILL:
2856         case SIGTRAP:
2857         case SIGFPE:
2858         case SIGEMT:
2859                 dest->si_addr = (caddr32_t)(uintptr_t)src->si_addr;
2860                 dest->si_trapno = src->si_trapno;
2861                 dest->si_pc = (caddr32_t)(uintptr_t)src->si_pc;
2862                 break;
2863         case SIGPOLL:
2864         case SIGXFSZ:
2865                 dest->si_fd = src->si_fd;
2866                 dest->si_band = src->si_band;
2867                 break;
2868         case SIGPROF:
2869                 dest->si_faddr = (caddr32_t)(uintptr_t)src->si_faddr;
2870                 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec;
2871                 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec;
2872                 dest->si_syscall = src->si_syscall;
2873                 dest->si_nsysarg = src->si_nsysarg;
2874                 dest->si_fault = src->si_fault;
2875                 break;
2876         }
2877 }
2878 
2879 void
2880 siginfo_32tok(const siginfo32_t *src, k_siginfo_t *dest)
2881 {
2882         bzero(dest, sizeof (*dest));
2883 
2884         /*
2885          * The absolute minimum content is si_signo and si_code.
2886          */
2887         dest->si_signo = src->si_signo;
2888         if ((dest->si_code = src->si_code) == SI_NOINFO)
2889                 return;
2890 
2891         /*
2892          * A siginfo generated by user level is structured
2893          * differently from one generated by the kernel.
2894          */
2895         if (SI_FROMUSER(src)) {
2896                 dest->si_pid = src->si_pid;
2897                 dest->si_ctid = src->si_ctid;
2898                 dest->si_zoneid = src->si_zoneid;
2899                 dest->si_uid = src->si_uid;
2900                 if (SI_CANQUEUE(src->si_code))
2901                         dest->si_value.sival_int =
2902                             (int)src->si_value.sival_int;
2903                 return;
2904         }
2905 
2906         dest->si_errno = src->si_errno;
2907 
2908         switch (src->si_signo) {
2909         default:
2910                 dest->si_pid = src->si_pid;
2911                 dest->si_ctid = src->si_ctid;
2912                 dest->si_zoneid = src->si_zoneid;
2913                 dest->si_uid = src->si_uid;
2914                 dest->si_value.sival_int = (int)src->si_value.sival_int;
2915                 break;
2916         case SIGCLD:
2917                 dest->si_pid = src->si_pid;
2918                 dest->si_ctid = src->si_ctid;
2919                 dest->si_zoneid = src->si_zoneid;
2920                 dest->si_status = src->si_status;
2921                 dest->si_stime = src->si_stime;
2922                 dest->si_utime = src->si_utime;
2923                 break;
2924         case SIGSEGV:
2925         case SIGBUS:
2926         case SIGILL:
2927         case SIGTRAP:
2928         case SIGFPE:
2929         case SIGEMT:
2930                 dest->si_addr = (void *)(uintptr_t)src->si_addr;
2931                 dest->si_trapno = src->si_trapno;
2932                 dest->si_pc = (void *)(uintptr_t)src->si_pc;
2933                 break;
2934         case SIGPOLL:
2935         case SIGXFSZ:
2936                 dest->si_fd = src->si_fd;
2937                 dest->si_band = src->si_band;
2938                 break;
2939         case SIGPROF:
2940                 dest->si_faddr = (void *)(uintptr_t)src->si_faddr;
2941                 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec;
2942                 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec;
2943                 dest->si_syscall = src->si_syscall;
2944                 dest->si_nsysarg = src->si_nsysarg;
2945                 dest->si_fault = src->si_fault;
2946                 break;
2947         }
2948 }
2949 
2950 #endif /* _SYSCALL32_IMPL */