1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
  24  * Copyright 2021 Joyent, Inc.
  25  */
  26 
  27 #include <sys/types.h>
  28 #include <sys/param.h>
  29 #include <sys/sysmacros.h>
  30 #include <sys/signal.h>
  31 #include <sys/stack.h>
  32 #include <sys/pcb.h>
  33 #include <sys/user.h>
  34 #include <sys/systm.h>
  35 #include <sys/sysinfo.h>
  36 #include <sys/errno.h>
  37 #include <sys/cmn_err.h>
  38 #include <sys/cred.h>
  39 #include <sys/resource.h>
  40 #include <sys/task.h>
  41 #include <sys/project.h>
  42 #include <sys/proc.h>
  43 #include <sys/debug.h>
  44 #include <sys/disp.h>
  45 #include <sys/class.h>
  46 #include <vm/seg_kmem.h>
  47 #include <vm/seg_kp.h>
  48 #include <sys/machlock.h>
  49 #include <sys/kmem.h>
  50 #include <sys/varargs.h>
  51 #include <sys/turnstile.h>
  52 #include <sys/poll.h>
  53 #include <sys/vtrace.h>
  54 #include <sys/callb.h>
  55 #include <c2/audit.h>
  56 #include <sys/tnf.h>
  57 #include <sys/sobject.h>
  58 #include <sys/cpupart.h>
  59 #include <sys/pset.h>
  60 #include <sys/door.h>
  61 #include <sys/spl.h>
  62 #include <sys/copyops.h>
  63 #include <sys/rctl.h>
  64 #include <sys/brand.h>
  65 #include <sys/pool.h>
  66 #include <sys/zone.h>
  67 #include <sys/tsol/label.h>
  68 #include <sys/tsol/tndb.h>
  69 #include <sys/cpc_impl.h>
  70 #include <sys/sdt.h>
  71 #include <sys/reboot.h>
  72 #include <sys/kdi.h>
  73 #include <sys/schedctl.h>
  74 #include <sys/waitq.h>
  75 #include <sys/cpucaps.h>
  76 #include <sys/kiconv.h>
  77 #include <sys/ctype.h>
  78 #include <sys/smt.h>
  79 
  80 struct kmem_cache *thread_cache;        /* cache of free threads */
  81 struct kmem_cache *lwp_cache;           /* cache of free lwps */
  82 struct kmem_cache *turnstile_cache;     /* cache of free turnstiles */
  83 
  84 /*
  85  * allthreads is only for use by kmem_readers.  All kernel loops can use
  86  * the current thread as a start/end point.
  87  */
  88 kthread_t *allthreads = &t0;        /* circular list of all threads */
  89 
  90 static kcondvar_t reaper_cv;            /* synchronization var */
  91 kthread_t       *thread_deathrow;       /* circular list of reapable threads */
  92 kthread_t       *lwp_deathrow;          /* circular list of reapable threads */
  93 kmutex_t        reaplock;               /* protects lwp and thread deathrows */
  94 int     thread_reapcnt = 0;             /* number of threads on deathrow */
  95 int     lwp_reapcnt = 0;                /* number of lwps on deathrow */
  96 int     reaplimit = 16;                 /* delay reaping until reaplimit */
  97 
  98 thread_free_lock_t      *thread_free_lock;
  99                                         /* protects tick thread from reaper */
 100 
 101 extern int nthread;
 102 
 103 /* System Scheduling classes. */
 104 id_t    syscid;                         /* system scheduling class ID */
 105 id_t    sysdccid = CLASS_UNUSED;        /* reset when SDC loads */
 106 
 107 void    *segkp_thread;                  /* cookie for segkp pool */
 108 
 109 int lwp_cache_sz = 32;
 110 int t_cache_sz = 8;
 111 static kt_did_t next_t_id = 1;
 112 
 113 /* Default mode for thread binding to CPUs and processor sets */
 114 int default_binding_mode = TB_ALLHARD;
 115 
 116 /*
 117  * Min/Max stack sizes for stack size parameters
 118  */
 119 #define MAX_STKSIZE     (32 * DEFAULTSTKSZ)
 120 #define MIN_STKSIZE     DEFAULTSTKSZ
 121 
 122 /*
 123  * default_stksize overrides lwp_default_stksize if it is set.
 124  */
 125 int     default_stksize;
 126 int     lwp_default_stksize;
 127 
 128 static zone_key_t zone_thread_key;
 129 
 130 unsigned int kmem_stackinfo;            /* stackinfo feature on-off */
 131 kmem_stkinfo_t *kmem_stkinfo_log;       /* stackinfo circular log */
 132 static kmutex_t kmem_stkinfo_lock;      /* protects kmem_stkinfo_log */
 133 
 134 /*
 135  * forward declarations for internal thread specific data (tsd)
 136  */
 137 static void *tsd_realloc(void *, size_t, size_t);
 138 
 139 void thread_reaper(void);
 140 
 141 /* forward declarations for stackinfo feature */
 142 static void stkinfo_begin(kthread_t *);
 143 static void stkinfo_end(kthread_t *);
 144 static size_t stkinfo_percent(caddr_t, caddr_t, caddr_t);
 145 
 146 /*ARGSUSED*/
 147 static int
 148 turnstile_constructor(void *buf, void *cdrarg, int kmflags)
 149 {
 150         bzero(buf, sizeof (turnstile_t));
 151         return (0);
 152 }
 153 
 154 /*ARGSUSED*/
 155 static void
 156 turnstile_destructor(void *buf, void *cdrarg)
 157 {
 158         turnstile_t *ts = buf;
 159 
 160         ASSERT(ts->ts_free == NULL);
 161         ASSERT(ts->ts_waiters == 0);
 162         ASSERT(ts->ts_inheritor == NULL);
 163         ASSERT(ts->ts_sleepq[0].sq_first == NULL);
 164         ASSERT(ts->ts_sleepq[1].sq_first == NULL);
 165 }
 166 
 167 void
 168 thread_init(void)
 169 {
 170         kthread_t *tp;
 171         extern char sys_name[];
 172         extern void idle();
 173         struct cpu *cpu = CPU;
 174         int i;
 175         kmutex_t *lp;
 176 
 177         mutex_init(&reaplock, NULL, MUTEX_SPIN, (void *)ipltospl(DISP_LEVEL));
 178         thread_free_lock =
 179             kmem_alloc(sizeof (thread_free_lock_t) * THREAD_FREE_NUM, KM_SLEEP);
 180         for (i = 0; i < THREAD_FREE_NUM; i++) {
 181                 lp = &thread_free_lock[i].tf_lock;
 182                 mutex_init(lp, NULL, MUTEX_DEFAULT, NULL);
 183         }
 184 
 185 #if defined(__i386) || defined(__amd64)
 186         thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t),
 187             PTR24_ALIGN, NULL, NULL, NULL, NULL, NULL, 0);
 188 
 189         /*
 190          * "struct _klwp" includes a "struct pcb", which includes a
 191          * "struct fpu", which needs to be 64-byte aligned on amd64
 192          * (and even on i386) for xsave/xrstor.
 193          */
 194         lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t),
 195             64, NULL, NULL, NULL, NULL, NULL, 0);
 196 #else
 197         /*
 198          * Allocate thread structures from static_arena.  This prevents
 199          * issues where a thread tries to relocate its own thread
 200          * structure and touches it after the mapping has been suspended.
 201          */
 202         thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t),
 203             PTR24_ALIGN, NULL, NULL, NULL, NULL, static_arena, 0);
 204 
 205         lwp_stk_cache_init();
 206 
 207         lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t),
 208             0, NULL, NULL, NULL, NULL, NULL, 0);
 209 #endif
 210 
 211         turnstile_cache = kmem_cache_create("turnstile_cache",
 212             sizeof (turnstile_t), 0,
 213             turnstile_constructor, turnstile_destructor, NULL, NULL, NULL, 0);
 214 
 215         label_init();
 216         cred_init();
 217 
 218         /*
 219          * Initialize various resource management facilities.
 220          */
 221         rctl_init();
 222         cpucaps_init();
 223         /*
 224          * Zone_init() should be called before project_init() so that project ID
 225          * for the first project is initialized correctly.
 226          */
 227         zone_init();
 228         project_init();
 229         brand_init();
 230         kiconv_init();
 231         task_init();
 232         tcache_init();
 233         pool_init();
 234 
 235         curthread->t_ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP);
 236 
 237         /*
 238          * Originally, we had two parameters to set default stack
 239          * size: one for lwp's (lwp_default_stksize), and one for
 240          * kernel-only threads (DEFAULTSTKSZ, a.k.a. _defaultstksz).
 241          * Now we have a third parameter that overrides both if it is
 242          * set to a legal stack size, called default_stksize.
 243          */
 244 
 245         if (default_stksize == 0) {
 246                 default_stksize = DEFAULTSTKSZ;
 247         } else if (default_stksize % PAGESIZE != 0 ||
 248             default_stksize > MAX_STKSIZE ||
 249             default_stksize < MIN_STKSIZE) {
 250                 cmn_err(CE_WARN, "Illegal stack size. Using %d",
 251                     (int)DEFAULTSTKSZ);
 252                 default_stksize = DEFAULTSTKSZ;
 253         } else {
 254                 lwp_default_stksize = default_stksize;
 255         }
 256 
 257         if (lwp_default_stksize == 0) {
 258                 lwp_default_stksize = default_stksize;
 259         } else if (lwp_default_stksize % PAGESIZE != 0 ||
 260             lwp_default_stksize > MAX_STKSIZE ||
 261             lwp_default_stksize < MIN_STKSIZE) {
 262                 cmn_err(CE_WARN, "Illegal stack size. Using %d",
 263                     default_stksize);
 264                 lwp_default_stksize = default_stksize;
 265         }
 266 
 267         segkp_lwp = segkp_cache_init(segkp, lwp_cache_sz,
 268             lwp_default_stksize,
 269             (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED));
 270 
 271         segkp_thread = segkp_cache_init(segkp, t_cache_sz,
 272             default_stksize, KPD_HASREDZONE | KPD_LOCKED | KPD_NO_ANON);
 273 
 274         (void) getcid(sys_name, &syscid);
 275         curthread->t_cid = syscid;   /* current thread is t0 */
 276 
 277         /*
 278          * Set up the first CPU's idle thread.
 279          * It runs whenever the CPU has nothing worthwhile to do.
 280          */
 281         tp = thread_create(NULL, 0, idle, NULL, 0, &p0, TS_STOPPED, -1);
 282         cpu->cpu_idle_thread = tp;
 283         tp->t_preempt = 1;
 284         tp->t_disp_queue = cpu->cpu_disp;
 285         ASSERT(tp->t_disp_queue != NULL);
 286         tp->t_bound_cpu = cpu;
 287         tp->t_affinitycnt = 1;
 288 
 289         /*
 290          * Registering a thread in the callback table is usually
 291          * done in the initialization code of the thread. In this
 292          * case, we do it right after thread creation to avoid
 293          * blocking idle thread while registering itself. It also
 294          * avoids the possibility of reregistration in case a CPU
 295          * restarts its idle thread.
 296          */
 297         CALLB_CPR_INIT_SAFE(tp, "idle");
 298 
 299         /*
 300          * Create the thread_reaper daemon. From this point on, exited
 301          * threads will get reaped.
 302          */
 303         (void) thread_create(NULL, 0, (void (*)())thread_reaper,
 304             NULL, 0, &p0, TS_RUN, minclsyspri);
 305 
 306         /*
 307          * Finish initializing the kernel memory allocator now that
 308          * thread_create() is available.
 309          */
 310         kmem_thread_init();
 311 
 312         if (boothowto & RB_DEBUG)
 313                 kdi_dvec_thravail();
 314 }
 315 
 316 /*
 317  * Create a thread.
 318  *
 319  * thread_create() blocks for memory if necessary.  It never fails.
 320  *
 321  * If stk is NULL, the thread is created at the base of the stack
 322  * and cannot be swapped.
 323  */
 324 kthread_t *
 325 thread_create(
 326         caddr_t stk,
 327         size_t  stksize,
 328         void    (*proc)(),
 329         void    *arg,
 330         size_t  len,
 331         proc_t   *pp,
 332         int     state,
 333         pri_t   pri)
 334 {
 335         kthread_t *t;
 336         extern struct classfuncs sys_classfuncs;
 337         turnstile_t *ts;
 338 
 339         /*
 340          * Every thread keeps a turnstile around in case it needs to block.
 341          * The only reason the turnstile is not simply part of the thread
 342          * structure is that we may have to break the association whenever
 343          * more than one thread blocks on a given synchronization object.
 344          * From a memory-management standpoint, turnstiles are like the
 345          * "attached mblks" that hang off dblks in the streams allocator.
 346          */
 347         ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP);
 348 
 349         if (stk == NULL) {
 350                 /*
 351                  * alloc both thread and stack in segkp chunk
 352                  */
 353 
 354                 if (stksize < default_stksize)
 355                         stksize = default_stksize;
 356 
 357                 if (stksize == default_stksize) {
 358                         stk = (caddr_t)segkp_cache_get(segkp_thread);
 359                 } else {
 360                         stksize = roundup(stksize, PAGESIZE);
 361                         stk = (caddr_t)segkp_get(segkp, stksize,
 362                             (KPD_HASREDZONE | KPD_NO_ANON | KPD_LOCKED));
 363                 }
 364 
 365                 ASSERT(stk != NULL);
 366 
 367                 /*
 368                  * The machine-dependent mutex code may require that
 369                  * thread pointers (since they may be used for mutex owner
 370                  * fields) have certain alignment requirements.
 371                  * PTR24_ALIGN is the size of the alignment quanta.
 372                  * XXX - assumes stack grows toward low addresses.
 373                  */
 374                 if (stksize <= sizeof (kthread_t) + PTR24_ALIGN)
 375                         cmn_err(CE_PANIC, "thread_create: proposed stack size"
 376                             " too small to hold thread.");
 377 #ifdef STACK_GROWTH_DOWN
 378                 stksize -= SA(sizeof (kthread_t) + PTR24_ALIGN - 1);
 379                 stksize &= -PTR24_ALIGN;    /* make thread aligned */
 380                 t = (kthread_t *)(stk + stksize);
 381                 bzero(t, sizeof (kthread_t));
 382                 if (audit_active)
 383                         audit_thread_create(t);
 384                 t->t_stk = stk + stksize;
 385                 t->t_stkbase = stk;
 386 #else   /* stack grows to larger addresses */
 387                 stksize -= SA(sizeof (kthread_t));
 388                 t = (kthread_t *)(stk);
 389                 bzero(t, sizeof (kthread_t));
 390                 t->t_stk = stk + sizeof (kthread_t);
 391                 t->t_stkbase = stk + stksize + sizeof (kthread_t);
 392 #endif  /* STACK_GROWTH_DOWN */
 393                 t->t_flag |= T_TALLOCSTK;
 394                 t->t_swap = stk;
 395         } else {
 396                 t = kmem_cache_alloc(thread_cache, KM_SLEEP);
 397                 bzero(t, sizeof (kthread_t));
 398                 ASSERT(((uintptr_t)t & (PTR24_ALIGN - 1)) == 0);
 399                 if (audit_active)
 400                         audit_thread_create(t);
 401                 /*
 402                  * Initialize t_stk to the kernel stack pointer to use
 403                  * upon entry to the kernel
 404                  */
 405 #ifdef STACK_GROWTH_DOWN
 406                 t->t_stk = stk + stksize;
 407                 t->t_stkbase = stk;
 408 #else
 409                 t->t_stk = stk;                      /* 3b2-like */
 410                 t->t_stkbase = stk + stksize;
 411 #endif /* STACK_GROWTH_DOWN */
 412         }
 413 
 414         if (kmem_stackinfo != 0) {
 415                 stkinfo_begin(t);
 416         }
 417 
 418         t->t_ts = ts;
 419 
 420         /*
 421          * p_cred could be NULL if it thread_create is called before cred_init
 422          * is called in main.
 423          */
 424         mutex_enter(&pp->p_crlock);
 425         if (pp->p_cred)
 426                 crhold(t->t_cred = pp->p_cred);
 427         mutex_exit(&pp->p_crlock);
 428         t->t_start = gethrestime_sec();
 429         t->t_startpc = proc;
 430         t->t_procp = pp;
 431         t->t_clfuncs = &sys_classfuncs.thread;
 432         t->t_cid = syscid;
 433         t->t_pri = pri;
 434         t->t_stime = ddi_get_lbolt();
 435         t->t_schedflag = TS_LOAD | TS_DONT_SWAP;
 436         t->t_bind_cpu = PBIND_NONE;
 437         t->t_bindflag = (uchar_t)default_binding_mode;
 438         t->t_bind_pset = PS_NONE;
 439         t->t_plockp = &pp->p_lock;
 440         t->t_copyops = NULL;
 441         t->t_taskq = NULL;
 442         t->t_anttime = 0;
 443         t->t_hatdepth = 0;
 444 
 445         t->t_dtrace_vtime = 1;       /* assure vtimestamp is always non-zero */
 446 
 447         CPU_STATS_ADDQ(CPU, sys, nthreads, 1);
 448 #ifndef NPROBE
 449         /* Kernel probe */
 450         tnf_thread_create(t);
 451 #endif /* NPROBE */
 452         LOCK_INIT_CLEAR(&t->t_lock);
 453 
 454         /*
 455          * Callers who give us a NULL proc must do their own
 456          * stack initialization.  e.g. lwp_create()
 457          */
 458         if (proc != NULL) {
 459                 t->t_stk = thread_stk_init(t->t_stk);
 460                 thread_load(t, proc, arg, len);
 461         }
 462 
 463         /*
 464          * Put a hold on project0. If this thread is actually in a
 465          * different project, then t_proj will be changed later in
 466          * lwp_create().  All kernel-only threads must be in project 0.
 467          */
 468         t->t_proj = project_hold(proj0p);
 469 
 470         lgrp_affinity_init(&t->t_lgrp_affinity);
 471 
 472         mutex_enter(&pidlock);
 473         nthread++;
 474         t->t_did = next_t_id++;
 475         t->t_prev = curthread->t_prev;
 476         t->t_next = curthread;
 477 
 478         /*
 479          * Add the thread to the list of all threads, and initialize
 480          * its t_cpu pointer.  We need to block preemption since
 481          * cpu_offline walks the thread list looking for threads
 482          * with t_cpu pointing to the CPU being offlined.  We want
 483          * to make sure that the list is consistent and that if t_cpu
 484          * is set, the thread is on the list.
 485          */
 486         kpreempt_disable();
 487         curthread->t_prev->t_next = t;
 488         curthread->t_prev = t;
 489 
 490         /*
 491          * We'll always create in the default partition since that's where
 492          * kernel threads go (we'll change this later if needed, in
 493          * lwp_create()).
 494          */
 495         t->t_cpupart = &cp_default;
 496 
 497         /*
 498          * For now, affiliate this thread with the root lgroup.
 499          * Since the kernel does not (presently) allocate its memory
 500          * in a locality aware fashion, the root is an appropriate home.
 501          * If this thread is later associated with an lwp, it will have
 502          * its lgroup re-assigned at that time.
 503          */
 504         lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1);
 505 
 506         /*
 507          * If the current CPU is in the default cpupart, use it.  Otherwise,
 508          * pick one that is; before entering the dispatcher code, we'll
 509          * make sure to keep the invariant that ->t_cpu is set.  (In fact, we
 510          * rely on this, in ht_should_run(), in the call tree of
 511          * disp_lowpri_cpu().)
 512          */
 513         if (CPU->cpu_part == &cp_default) {
 514                 t->t_cpu = CPU;
 515         } else {
 516                 t->t_cpu = cp_default.cp_cpulist;
 517                 t->t_cpu = disp_lowpri_cpu(t->t_cpu, t, t->t_pri);
 518         }
 519 
 520         t->t_disp_queue = t->t_cpu->cpu_disp;
 521         kpreempt_enable();
 522 
 523         /*
 524          * Initialize thread state and the dispatcher lock pointer.
 525          * Need to hold onto pidlock to block allthreads walkers until
 526          * the state is set.
 527          */
 528         switch (state) {
 529         case TS_RUN:
 530                 curthread->t_oldspl = splhigh();     /* get dispatcher spl */
 531                 THREAD_SET_STATE(t, TS_STOPPED, &transition_lock);
 532                 CL_SETRUN(t);
 533                 thread_unlock(t);
 534                 break;
 535 
 536         case TS_ONPROC:
 537                 THREAD_ONPROC(t, t->t_cpu);
 538                 break;
 539 
 540         case TS_FREE:
 541                 /*
 542                  * Free state will be used for intr threads.
 543                  * The interrupt routine must set the thread dispatcher
 544                  * lock pointer (t_lockp) if starting on a CPU
 545                  * other than the current one.
 546                  */
 547                 THREAD_FREEINTR(t, CPU);
 548                 break;
 549 
 550         case TS_STOPPED:
 551                 THREAD_SET_STATE(t, TS_STOPPED, &stop_lock);
 552                 break;
 553 
 554         default:                        /* TS_SLEEP, TS_ZOMB or TS_TRANS */
 555                 cmn_err(CE_PANIC, "thread_create: invalid state %d", state);
 556         }
 557         mutex_exit(&pidlock);
 558         return (t);
 559 }
 560 
 561 /*
 562  * Move thread to project0 and take care of project reference counters.
 563  */
 564 void
 565 thread_rele(kthread_t *t)
 566 {
 567         kproject_t *kpj;
 568 
 569         thread_lock(t);
 570 
 571         ASSERT(t == curthread || t->t_state == TS_FREE || t->t_procp == &p0);
 572         kpj = ttoproj(t);
 573         t->t_proj = proj0p;
 574 
 575         thread_unlock(t);
 576 
 577         if (kpj != proj0p) {
 578                 project_rele(kpj);
 579                 (void) project_hold(proj0p);
 580         }
 581 }
 582 
 583 void
 584 thread_exit(void)
 585 {
 586         kthread_t *t = curthread;
 587 
 588         if ((t->t_proc_flag & TP_ZTHREAD) != 0)
 589                 cmn_err(CE_PANIC, "thread_exit: zthread_exit() not called");
 590 
 591         tsd_exit();             /* Clean up this thread's TSD */
 592 
 593         kcpc_passivate();       /* clean up performance counter state */
 594 
 595         /*
 596          * No kernel thread should have called poll() without arranging
 597          * calling pollcleanup() here.
 598          */
 599         ASSERT(t->t_pollstate == NULL);
 600         ASSERT(t->t_schedctl == NULL);
 601         if (t->t_door)
 602                 door_slam();    /* in case thread did an upcall */
 603 
 604 #ifndef NPROBE
 605         /* Kernel probe */
 606         if (t->t_tnf_tpdp)
 607                 tnf_thread_exit();
 608 #endif /* NPROBE */
 609 
 610         thread_rele(t);
 611         t->t_preempt++;
 612 
 613         /*
 614          * remove thread from the all threads list so that
 615          * death-row can use the same pointers.
 616          */
 617         mutex_enter(&pidlock);
 618         t->t_next->t_prev = t->t_prev;
 619         t->t_prev->t_next = t->t_next;
 620         ASSERT(allthreads != t);        /* t0 never exits */
 621         cv_broadcast(&t->t_joincv);      /* wake up anyone in thread_join */
 622         mutex_exit(&pidlock);
 623 
 624         if (t->t_ctx != NULL)
 625                 exitctx(t);
 626         if (t->t_procp->p_pctx != NULL)
 627                 exitpctx(t->t_procp);
 628 
 629         if (kmem_stackinfo != 0) {
 630                 stkinfo_end(t);
 631         }
 632 
 633         t->t_state = TS_ZOMB;        /* set zombie thread */
 634 
 635         swtch_from_zombie();    /* give up the CPU */
 636         /* NOTREACHED */
 637 }
 638 
 639 /*
 640  * Check to see if the specified thread is active (defined as being on
 641  * the thread list).  This is certainly a slow way to do this; if there's
 642  * ever a reason to speed it up, we could maintain a hash table of active
 643  * threads indexed by their t_did.
 644  */
 645 static kthread_t *
 646 did_to_thread(kt_did_t tid)
 647 {
 648         kthread_t *t;
 649 
 650         ASSERT(MUTEX_HELD(&pidlock));
 651         for (t = curthread->t_next; t != curthread; t = t->t_next) {
 652                 if (t->t_did == tid)
 653                         break;
 654         }
 655         if (t->t_did == tid)
 656                 return (t);
 657         else
 658                 return (NULL);
 659 }
 660 
 661 /*
 662  * Wait for specified thread to exit.  Returns immediately if the thread
 663  * could not be found, meaning that it has either already exited or never
 664  * existed.
 665  */
 666 void
 667 thread_join(kt_did_t tid)
 668 {
 669         kthread_t *t;
 670 
 671         ASSERT(tid != curthread->t_did);
 672         ASSERT(tid != t0.t_did);
 673 
 674         mutex_enter(&pidlock);
 675         /*
 676          * Make sure we check that the thread is on the thread list
 677          * before blocking on it; otherwise we could end up blocking on
 678          * a cv that's already been freed.  In other words, don't cache
 679          * the thread pointer across calls to cv_wait.
 680          *
 681          * The choice of loop invariant means that whenever a thread
 682          * is taken off the allthreads list, a cv_broadcast must be
 683          * performed on that thread's t_joincv to wake up any waiters.
 684          * The broadcast doesn't have to happen right away, but it
 685          * shouldn't be postponed indefinitely (e.g., by doing it in
 686          * thread_free which may only be executed when the deathrow
 687          * queue is processed.
 688          */
 689         while (t = did_to_thread(tid))
 690                 cv_wait(&t->t_joincv, &pidlock);
 691         mutex_exit(&pidlock);
 692 }
 693 
 694 void
 695 thread_free_prevent(kthread_t *t)
 696 {
 697         kmutex_t *lp;
 698 
 699         lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock;
 700         mutex_enter(lp);
 701 }
 702 
 703 void
 704 thread_free_allow(kthread_t *t)
 705 {
 706         kmutex_t *lp;
 707 
 708         lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock;
 709         mutex_exit(lp);
 710 }
 711 
 712 static void
 713 thread_free_barrier(kthread_t *t)
 714 {
 715         kmutex_t *lp;
 716 
 717         lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock;
 718         mutex_enter(lp);
 719         mutex_exit(lp);
 720 }
 721 
 722 void
 723 thread_free(kthread_t *t)
 724 {
 725         boolean_t allocstk = (t->t_flag & T_TALLOCSTK);
 726         klwp_t *lwp = t->t_lwp;
 727         caddr_t swap = t->t_swap;
 728 
 729         ASSERT(t != &t0 && t->t_state == TS_FREE);
 730         ASSERT(t->t_door == NULL);
 731         ASSERT(t->t_schedctl == NULL);
 732         ASSERT(t->t_pollstate == NULL);
 733 
 734         t->t_pri = 0;
 735         t->t_pc = 0;
 736         t->t_sp = 0;
 737         t->t_wchan0 = NULL;
 738         t->t_wchan = NULL;
 739         if (t->t_cred != NULL) {
 740                 crfree(t->t_cred);
 741                 t->t_cred = 0;
 742         }
 743         if (t->t_pdmsg) {
 744                 kmem_free(t->t_pdmsg, strlen(t->t_pdmsg) + 1);
 745                 t->t_pdmsg = NULL;
 746         }
 747         if (audit_active)
 748                 audit_thread_free(t);
 749 #ifndef NPROBE
 750         if (t->t_tnf_tpdp)
 751                 tnf_thread_free(t);
 752 #endif /* NPROBE */
 753         if (t->t_cldata) {
 754                 CL_EXITCLASS(t->t_cid, (caddr_t *)t->t_cldata);
 755         }
 756         if (t->t_rprof != NULL) {
 757                 kmem_free(t->t_rprof, sizeof (*t->t_rprof));
 758                 t->t_rprof = NULL;
 759         }
 760         t->t_lockp = NULL;   /* nothing should try to lock this thread now */
 761         if (lwp)
 762                 lwp_freeregs(lwp, 0);
 763         if (t->t_ctx)
 764                 freectx(t, 0);
 765         t->t_stk = NULL;
 766         if (lwp)
 767                 lwp_stk_fini(lwp);
 768         lock_clear(&t->t_lock);
 769 
 770         if (t->t_ts->ts_waiters > 0)
 771                 panic("thread_free: turnstile still active");
 772 
 773         kmem_cache_free(turnstile_cache, t->t_ts);
 774 
 775         free_afd(&t->t_activefd);
 776 
 777         /*
 778          * Barrier for the tick accounting code.  The tick accounting code
 779          * holds this lock to keep the thread from going away while it's
 780          * looking at it.
 781          */
 782         thread_free_barrier(t);
 783 
 784         ASSERT(ttoproj(t) == proj0p);
 785         project_rele(ttoproj(t));
 786 
 787         lgrp_affinity_free(&t->t_lgrp_affinity);
 788 
 789         mutex_enter(&pidlock);
 790         nthread--;
 791         mutex_exit(&pidlock);
 792 
 793         if (t->t_name != NULL) {
 794                 kmem_free(t->t_name, THREAD_NAME_MAX);
 795                 t->t_name = NULL;
 796         }
 797 
 798         /*
 799          * Free thread, lwp and stack.  This needs to be done carefully, since
 800          * if T_TALLOCSTK is set, the thread is part of the stack.
 801          */
 802         t->t_lwp = NULL;
 803         t->t_swap = NULL;
 804 
 805         if (swap) {
 806                 segkp_release(segkp, swap);
 807         }
 808         if (lwp) {
 809                 kmem_cache_free(lwp_cache, lwp);
 810         }
 811         if (!allocstk) {
 812                 kmem_cache_free(thread_cache, t);
 813         }
 814 }
 815 
 816 /*
 817  * Removes threads associated with the given zone from a deathrow queue.
 818  * tp is a pointer to the head of the deathrow queue, and countp is a
 819  * pointer to the current deathrow count.  Returns a linked list of
 820  * threads removed from the list.
 821  */
 822 static kthread_t *
 823 thread_zone_cleanup(kthread_t **tp, int *countp, zoneid_t zoneid)
 824 {
 825         kthread_t *tmp, *list = NULL;
 826         cred_t *cr;
 827 
 828         ASSERT(MUTEX_HELD(&reaplock));
 829         while (*tp != NULL) {
 830                 if ((cr = (*tp)->t_cred) != NULL && crgetzoneid(cr) == zoneid) {
 831                         tmp = *tp;
 832                         *tp = tmp->t_forw;
 833                         tmp->t_forw = list;
 834                         list = tmp;
 835                         (*countp)--;
 836                 } else {
 837                         tp = &(*tp)->t_forw;
 838                 }
 839         }
 840         return (list);
 841 }
 842 
 843 static void
 844 thread_reap_list(kthread_t *t)
 845 {
 846         kthread_t *next;
 847 
 848         while (t != NULL) {
 849                 next = t->t_forw;
 850                 thread_free(t);
 851                 t = next;
 852         }
 853 }
 854 
 855 /* ARGSUSED */
 856 static void
 857 thread_zone_destroy(zoneid_t zoneid, void *unused)
 858 {
 859         kthread_t *t, *l;
 860 
 861         mutex_enter(&reaplock);
 862         /*
 863          * Pull threads and lwps associated with zone off deathrow lists.
 864          */
 865         t = thread_zone_cleanup(&thread_deathrow, &thread_reapcnt, zoneid);
 866         l = thread_zone_cleanup(&lwp_deathrow, &lwp_reapcnt, zoneid);
 867         mutex_exit(&reaplock);
 868 
 869         /*
 870          * Guard against race condition in mutex_owner_running:
 871          *      thread=owner(mutex)
 872          *      <interrupt>
 873          *                              thread exits mutex
 874          *                              thread exits
 875          *                              thread reaped
 876          *                              thread struct freed
 877          * cpu = thread->t_cpu <- BAD POINTER DEREFERENCE.
 878          * A cross call to all cpus will cause the interrupt handler
 879          * to reset the PC if it is in mutex_owner_running, refreshing
 880          * stale thread pointers.
 881          */
 882         mutex_sync();   /* sync with mutex code */
 883 
 884         /*
 885          * Reap threads
 886          */
 887         thread_reap_list(t);
 888 
 889         /*
 890          * Reap lwps
 891          */
 892         thread_reap_list(l);
 893 }
 894 
 895 /*
 896  * cleanup zombie threads that are on deathrow.
 897  */
 898 void
 899 thread_reaper()
 900 {
 901         kthread_t *t, *l;
 902         callb_cpr_t cprinfo;
 903 
 904         /*
 905          * Register callback to clean up threads when zone is destroyed.
 906          */
 907         zone_key_create(&zone_thread_key, NULL, NULL, thread_zone_destroy);
 908 
 909         CALLB_CPR_INIT(&cprinfo, &reaplock, callb_generic_cpr, "t_reaper");
 910         for (;;) {
 911                 mutex_enter(&reaplock);
 912                 while (thread_deathrow == NULL && lwp_deathrow == NULL) {
 913                         CALLB_CPR_SAFE_BEGIN(&cprinfo);
 914                         cv_wait(&reaper_cv, &reaplock);
 915                         CALLB_CPR_SAFE_END(&cprinfo, &reaplock);
 916                 }
 917                 /*
 918                  * mutex_sync() needs to be called when reaping, but
 919                  * not too often.  We limit reaping rate to once
 920                  * per second.  Reaplimit is max rate at which threads can
 921                  * be freed. Does not impact thread destruction/creation.
 922                  */
 923                 t = thread_deathrow;
 924                 l = lwp_deathrow;
 925                 thread_deathrow = NULL;
 926                 lwp_deathrow = NULL;
 927                 thread_reapcnt = 0;
 928                 lwp_reapcnt = 0;
 929                 mutex_exit(&reaplock);
 930 
 931                 /*
 932                  * Guard against race condition in mutex_owner_running:
 933                  *      thread=owner(mutex)
 934                  *      <interrupt>
 935                  *                              thread exits mutex
 936                  *                              thread exits
 937                  *                              thread reaped
 938                  *                              thread struct freed
 939                  * cpu = thread->t_cpu <- BAD POINTER DEREFERENCE.
 940                  * A cross call to all cpus will cause the interrupt handler
 941                  * to reset the PC if it is in mutex_owner_running, refreshing
 942                  * stale thread pointers.
 943                  */
 944                 mutex_sync();   /* sync with mutex code */
 945                 /*
 946                  * Reap threads
 947                  */
 948                 thread_reap_list(t);
 949 
 950                 /*
 951                  * Reap lwps
 952                  */
 953                 thread_reap_list(l);
 954                 delay(hz);
 955         }
 956 }
 957 
 958 /*
 959  * This is called by lwpcreate, etc.() to put a lwp_deathrow thread onto
 960  * thread_deathrow. The thread's state is changed already TS_FREE to indicate
 961  * that is reapable. The thread already holds the reaplock, and was already
 962  * freed.
 963  */
 964 void
 965 reapq_move_lq_to_tq(kthread_t *t)
 966 {
 967         ASSERT(t->t_state == TS_FREE);
 968         ASSERT(MUTEX_HELD(&reaplock));
 969         t->t_forw = thread_deathrow;
 970         thread_deathrow = t;
 971         thread_reapcnt++;
 972         if (lwp_reapcnt + thread_reapcnt > reaplimit)
 973                 cv_signal(&reaper_cv);  /* wake the reaper */
 974 }
 975 
 976 /*
 977  * This is called by resume() to put a zombie thread onto deathrow.
 978  * The thread's state is changed to TS_FREE to indicate that is reapable.
 979  * This is called from the idle thread so it must not block - just spin.
 980  */
 981 void
 982 reapq_add(kthread_t *t)
 983 {
 984         mutex_enter(&reaplock);
 985 
 986         /*
 987          * lwp_deathrow contains threads with lwp linkage and
 988          * swappable thread stacks which have the default stacksize.
 989          * These threads' lwps and stacks may be reused by lwp_create().
 990          *
 991          * Anything else goes on thread_deathrow(), where it will eventually
 992          * be thread_free()d.
 993          */
 994         if (t->t_flag & T_LWPREUSE) {
 995                 ASSERT(ttolwp(t) != NULL);
 996                 t->t_forw = lwp_deathrow;
 997                 lwp_deathrow = t;
 998                 lwp_reapcnt++;
 999         } else {
1000                 t->t_forw = thread_deathrow;
1001                 thread_deathrow = t;
1002                 thread_reapcnt++;
1003         }
1004         if (lwp_reapcnt + thread_reapcnt > reaplimit)
1005                 cv_signal(&reaper_cv);      /* wake the reaper */
1006         t->t_state = TS_FREE;
1007         lock_clear(&t->t_lock);
1008 
1009         /*
1010          * Before we return, we need to grab and drop the thread lock for
1011          * the dead thread.  At this point, the current thread is the idle
1012          * thread, and the dead thread's CPU lock points to the current
1013          * CPU -- and we must grab and drop the lock to synchronize with
1014          * a racing thread walking a blocking chain that the zombie thread
1015          * was recently in.  By this point, that blocking chain is (by
1016          * definition) stale:  the dead thread is not holding any locks, and
1017          * is therefore not in any blocking chains -- but if we do not regrab
1018          * our lock before freeing the dead thread's data structures, the
1019          * thread walking the (stale) blocking chain will die on memory
1020          * corruption when it attempts to drop the dead thread's lock.  We
1021          * only need do this once because there is no way for the dead thread
1022          * to ever again be on a blocking chain:  once we have grabbed and
1023          * dropped the thread lock, we are guaranteed that anyone that could
1024          * have seen this thread in a blocking chain can no longer see it.
1025          */
1026         thread_lock(t);
1027         thread_unlock(t);
1028 
1029         mutex_exit(&reaplock);
1030 }
1031 
1032 /*
1033  * Provide an allocation function for callers of installctx() that, for
1034  * reasons of incomplete context-op initialization, must call installctx()
1035  * in a kpreempt_disable() block.  The caller, therefore, must call this
1036  * without being in such a block.
1037  */
1038 struct ctxop *
1039 installctx_preallocate(void)
1040 {
1041         /*
1042          * NOTE: We could ASSERT/VERIFY that we are not in a place where
1043          * a KM_SLEEP allocation could block indefinitely.
1044          *
1045          * ASSERT(curthread->t_preempt == 0);
1046          */
1047 
1048         return (kmem_alloc(sizeof (struct ctxop), KM_SLEEP));
1049 }
1050 
1051 /*
1052  * Install thread context ops for the current thread.
1053  * The caller can pass in a preallocated struct ctxop, eliminating the need
1054  * for the requirement of entering with kernel preemption still enabled.
1055  */
1056 void
1057 installctx(
1058         kthread_t *t,
1059         void    *arg,
1060         void    (*save)(void *),
1061         void    (*restore)(void *),
1062         void    (*fork)(void *, void *),
1063         void    (*lwp_create)(void *, void *),
1064         void    (*exit)(void *),
1065         void    (*free)(void *, int),
1066         struct ctxop *ctx)
1067 {
1068         if (ctx == NULL)
1069                 ctx = kmem_alloc(sizeof (struct ctxop), KM_SLEEP);
1070 
1071         ctx->save_op = save;
1072         ctx->restore_op = restore;
1073         ctx->fork_op = fork;
1074         ctx->lwp_create_op = lwp_create;
1075         ctx->exit_op = exit;
1076         ctx->free_op = free;
1077         ctx->arg = arg;
1078         ctx->save_ts = 0;
1079         ctx->restore_ts = 0;
1080 
1081         /*
1082          * Keep ctxops in a doubly-linked list to allow traversal in both
1083          * directions.  Using only the newest-to-oldest ordering was adequate
1084          * previously, but reversing the order for restore_op actions is
1085          * necessary if later-added ctxops depends on earlier ones.
1086          *
1087          * One example of such a dependency:  Hypervisor software handling the
1088          * guest FPU expects that it save FPU state prior to host FPU handling
1089          * and consequently handle the guest logic _after_ the host FPU has
1090          * been restored.
1091          *
1092          * The t_ctx member points to the most recently added ctxop or is NULL
1093          * if no ctxops are associated with the thread.  The 'next' pointers
1094          * form a loop of the ctxops in newest-to-oldest order.  The 'prev'
1095          * pointers form a loop in the reverse direction, where t_ctx->prev is
1096          * the oldest entry associated with the thread.
1097          *
1098          * The protection of kpreempt_disable is required to safely perform the
1099          * list insertion, since there are inconsistent states between some of
1100          * the pointer assignments.
1101          */
1102         kpreempt_disable();
1103         if (t->t_ctx == NULL) {
1104                 ctx->next = ctx;
1105                 ctx->prev = ctx;
1106         } else {
1107                 struct ctxop *head = t->t_ctx, *tail = t->t_ctx->prev;
1108 
1109                 ctx->next = head;
1110                 ctx->prev = tail;
1111                 head->prev = ctx;
1112                 tail->next = ctx;
1113         }
1114         t->t_ctx = ctx;
1115         kpreempt_enable();
1116 }
1117 
1118 /*
1119  * Remove the thread context ops from a thread.
1120  */
1121 int
1122 removectx(
1123         kthread_t *t,
1124         void    *arg,
1125         void    (*save)(void *),
1126         void    (*restore)(void *),
1127         void    (*fork)(void *, void *),
1128         void    (*lwp_create)(void *, void *),
1129         void    (*exit)(void *),
1130         void    (*free)(void *, int))
1131 {
1132         struct ctxop *ctx, *head;
1133 
1134         /*
1135          * The incoming kthread_t (which is the thread for which the
1136          * context ops will be removed) should be one of the following:
1137          *
1138          * a) the current thread,
1139          *
1140          * b) a thread of a process that's being forked (SIDL),
1141          *
1142          * c) a thread that belongs to the same process as the current
1143          *    thread and for which the current thread is the agent thread,
1144          *
1145          * d) a thread that is TS_STOPPED which is indicative of it
1146          *    being (if curthread is not an agent) a thread being created
1147          *    as part of an lwp creation.
1148          */
1149         ASSERT(t == curthread || ttoproc(t)->p_stat == SIDL ||
1150             ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1151 
1152         /*
1153          * Serialize modifications to t->t_ctx to prevent the agent thread
1154          * and the target thread from racing with each other during lwp exit.
1155          */
1156         mutex_enter(&t->t_ctx_lock);
1157         kpreempt_disable();
1158 
1159         if (t->t_ctx == NULL) {
1160                 mutex_exit(&t->t_ctx_lock);
1161                 kpreempt_enable();
1162                 return (0);
1163         }
1164 
1165         ctx = head = t->t_ctx;
1166         do {
1167                 if (ctx->save_op == save && ctx->restore_op == restore &&
1168                     ctx->fork_op == fork && ctx->lwp_create_op == lwp_create &&
1169                     ctx->exit_op == exit && ctx->free_op == free &&
1170                     ctx->arg == arg) {
1171                         ctx->prev->next = ctx->next;
1172                         ctx->next->prev = ctx->prev;
1173                         if (ctx->next == ctx) {
1174                                 /* last remaining item */
1175                                 t->t_ctx = NULL;
1176                         } else if (ctx == t->t_ctx) {
1177                                 /* fix up head of list */
1178                                 t->t_ctx = ctx->next;
1179                         }
1180                         ctx->next = ctx->prev = NULL;
1181 
1182                         mutex_exit(&t->t_ctx_lock);
1183                         if (ctx->free_op != NULL)
1184                                 (ctx->free_op)(ctx->arg, 0);
1185                         kmem_free(ctx, sizeof (struct ctxop));
1186                         kpreempt_enable();
1187                         return (1);
1188                 }
1189 
1190                 ctx = ctx->next;
1191         } while (ctx != head);
1192 
1193         mutex_exit(&t->t_ctx_lock);
1194         kpreempt_enable();
1195         return (0);
1196 }
1197 
1198 void
1199 savectx(kthread_t *t)
1200 {
1201         ASSERT(t == curthread);
1202 
1203         if (t->t_ctx != NULL) {
1204                 struct ctxop *ctx, *head;
1205 
1206                 /* Forward traversal */
1207                 ctx = head = t->t_ctx;
1208                 do {
1209                         if (ctx->save_op != NULL) {
1210                                 ctx->save_ts = gethrtime_unscaled();
1211                                 (ctx->save_op)(ctx->arg);
1212                         }
1213                         ctx = ctx->next;
1214                 } while (ctx != head);
1215         }
1216 }
1217 
1218 void
1219 restorectx(kthread_t *t)
1220 {
1221         ASSERT(t == curthread);
1222 
1223         if (t->t_ctx != NULL) {
1224                 struct ctxop *ctx, *tail;
1225 
1226                 /* Backward traversal (starting at the tail) */
1227                 ctx = tail = t->t_ctx->prev;
1228                 do {
1229                         if (ctx->restore_op != NULL) {
1230                                 ctx->restore_ts = gethrtime_unscaled();
1231                                 (ctx->restore_op)(ctx->arg);
1232                         }
1233                         ctx = ctx->prev;
1234                 } while (ctx != tail);
1235         }
1236 }
1237 
1238 void
1239 forkctx(kthread_t *t, kthread_t *ct)
1240 {
1241         if (t->t_ctx != NULL) {
1242                 struct ctxop *ctx, *head;
1243 
1244                 /* Forward traversal */
1245                 ctx = head = t->t_ctx;
1246                 do {
1247                         if (ctx->fork_op != NULL) {
1248                                 (ctx->fork_op)(t, ct);
1249                         }
1250                         ctx = ctx->next;
1251                 } while (ctx != head);
1252         }
1253 }
1254 
1255 /*
1256  * Note that this operator is only invoked via the _lwp_create
1257  * system call.  The system may have other reasons to create lwps
1258  * e.g. the agent lwp or the doors unreferenced lwp.
1259  */
1260 void
1261 lwp_createctx(kthread_t *t, kthread_t *ct)
1262 {
1263         if (t->t_ctx != NULL) {
1264                 struct ctxop *ctx, *head;
1265 
1266                 /* Forward traversal */
1267                 ctx = head = t->t_ctx;
1268                 do {
1269                         if (ctx->lwp_create_op != NULL) {
1270                                 (ctx->lwp_create_op)(t, ct);
1271                         }
1272                         ctx = ctx->next;
1273                 } while (ctx != head);
1274         }
1275 }
1276 
1277 /*
1278  * exitctx is called from thread_exit() and lwp_exit() to perform any actions
1279  * needed when the thread/LWP leaves the processor for the last time. This
1280  * routine is not intended to deal with freeing memory; freectx() is used for
1281  * that purpose during thread_free(). This routine is provided to allow for
1282  * clean-up that can't wait until thread_free().
1283  */
1284 void
1285 exitctx(kthread_t *t)
1286 {
1287         if (t->t_ctx != NULL) {
1288                 struct ctxop *ctx, *head;
1289 
1290                 /* Forward traversal */
1291                 ctx = head = t->t_ctx;
1292                 do {
1293                         if (ctx->exit_op != NULL) {
1294                                 (ctx->exit_op)(t);
1295                         }
1296                         ctx = ctx->next;
1297                 } while (ctx != head);
1298         }
1299 }
1300 
1301 /*
1302  * freectx is called from thread_free() and exec() to get
1303  * rid of old thread context ops.
1304  */
1305 void
1306 freectx(kthread_t *t, int isexec)
1307 {
1308         kpreempt_disable();
1309         if (t->t_ctx != NULL) {
1310                 struct ctxop *ctx, *head;
1311 
1312                 ctx = head = t->t_ctx;
1313                 t->t_ctx = NULL;
1314                 do {
1315                         struct ctxop *next = ctx->next;
1316 
1317                         if (ctx->free_op != NULL) {
1318                                 (ctx->free_op)(ctx->arg, isexec);
1319                         }
1320                         kmem_free(ctx, sizeof (struct ctxop));
1321                         ctx = next;
1322                 } while (ctx != head);
1323         }
1324         kpreempt_enable();
1325 }
1326 
1327 /*
1328  * freectx_ctx is called from lwp_create() when lwp is reused from
1329  * lwp_deathrow and its thread structure is added to thread_deathrow.
1330  * The thread structure to which this ctx was attached may be already
1331  * freed by the thread reaper so free_op implementations shouldn't rely
1332  * on thread structure to which this ctx was attached still being around.
1333  */
1334 void
1335 freectx_ctx(struct ctxop *ctx)
1336 {
1337         struct ctxop *head = ctx;
1338 
1339         ASSERT(ctx != NULL);
1340 
1341         kpreempt_disable();
1342 
1343         head = ctx;
1344         do {
1345                 struct ctxop *next = ctx->next;
1346 
1347                 if (ctx->free_op != NULL) {
1348                         (ctx->free_op)(ctx->arg, 0);
1349                 }
1350                 kmem_free(ctx, sizeof (struct ctxop));
1351                 ctx = next;
1352         } while (ctx != head);
1353         kpreempt_enable();
1354 }
1355 
1356 /*
1357  * Set the thread running; arrange for it to be swapped in if necessary.
1358  */
1359 void
1360 setrun_locked(kthread_t *t)
1361 {
1362         ASSERT(THREAD_LOCK_HELD(t));
1363         if (t->t_state == TS_SLEEP) {
1364                 /*
1365                  * Take off sleep queue.
1366                  */
1367                 SOBJ_UNSLEEP(t->t_sobj_ops, t);
1368         } else if (t->t_state & (TS_RUN | TS_ONPROC)) {
1369                 /*
1370                  * Already on dispatcher queue.
1371                  */
1372                 return;
1373         } else if (t->t_state == TS_WAIT) {
1374                 waitq_setrun(t);
1375         } else if (t->t_state == TS_STOPPED) {
1376                 /*
1377                  * All of the sending of SIGCONT (TC_XSTART) and /proc
1378                  * (TC_PSTART) and lwp_continue() (TC_CSTART) must have
1379                  * requested that the thread be run.
1380                  * Just calling setrun() is not sufficient to set a stopped
1381                  * thread running.  TP_TXSTART is always set if the thread
1382                  * is not stopped by a jobcontrol stop signal.
1383                  * TP_TPSTART is always set if /proc is not controlling it.
1384                  * TP_TCSTART is always set if lwp_suspend() didn't stop it.
1385                  * The thread won't be stopped unless one of these
1386                  * three mechanisms did it.
1387                  *
1388                  * These flags must be set before calling setrun_locked(t).
1389                  * They can't be passed as arguments because the streams
1390                  * code calls setrun() indirectly and the mechanism for
1391                  * doing so admits only one argument.  Note that the
1392                  * thread must be locked in order to change t_schedflags.
1393                  */
1394                 if ((t->t_schedflag & TS_ALLSTART) != TS_ALLSTART)
1395                         return;
1396                 /*
1397                  * Process is no longer stopped (a thread is running).
1398                  */
1399                 t->t_whystop = 0;
1400                 t->t_whatstop = 0;
1401                 /*
1402                  * Strictly speaking, we do not have to clear these
1403                  * flags here; they are cleared on entry to stop().
1404                  * However, they are confusing when doing kernel
1405                  * debugging or when they are revealed by ps(1).
1406                  */
1407                 t->t_schedflag &= ~TS_ALLSTART;
1408                 THREAD_TRANSITION(t);   /* drop stopped-thread lock */
1409                 ASSERT(t->t_lockp == &transition_lock);
1410                 ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL);
1411                 /*
1412                  * Let the class put the process on the dispatcher queue.
1413                  */
1414                 CL_SETRUN(t);
1415         }
1416 }
1417 
1418 void
1419 setrun(kthread_t *t)
1420 {
1421         thread_lock(t);
1422         setrun_locked(t);
1423         thread_unlock(t);
1424 }
1425 
1426 /*
1427  * Unpin an interrupted thread.
1428  *      When an interrupt occurs, the interrupt is handled on the stack
1429  *      of an interrupt thread, taken from a pool linked to the CPU structure.
1430  *
1431  *      When swtch() is switching away from an interrupt thread because it
1432  *      blocked or was preempted, this routine is called to complete the
1433  *      saving of the interrupted thread state, and returns the interrupted
1434  *      thread pointer so it may be resumed.
1435  *
1436  *      Called by swtch() only at high spl.
1437  */
1438 kthread_t *
1439 thread_unpin()
1440 {
1441         kthread_t       *t = curthread; /* current thread */
1442         kthread_t       *itp;           /* interrupted thread */
1443         int             i;              /* interrupt level */
1444         extern int      intr_passivate();
1445 
1446         ASSERT(t->t_intr != NULL);
1447 
1448         itp = t->t_intr;             /* interrupted thread */
1449         t->t_intr = NULL;            /* clear interrupt ptr */
1450 
1451         smt_end_intr();
1452 
1453         /*
1454          * Get state from interrupt thread for the one
1455          * it interrupted.
1456          */
1457 
1458         i = intr_passivate(t, itp);
1459 
1460         TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE,
1461             "intr_passivate:level %d curthread %p (%T) ithread %p (%T)",
1462             i, t, t, itp, itp);
1463 
1464         /*
1465          * Dissociate the current thread from the interrupted thread's LWP.
1466          */
1467         t->t_lwp = NULL;
1468 
1469         /*
1470          * Interrupt handlers above the level that spinlocks block must
1471          * not block.
1472          */
1473 #if DEBUG
1474         if (i < 0 || i > LOCK_LEVEL)
1475                 cmn_err(CE_PANIC, "thread_unpin: ipl out of range %x", i);
1476 #endif
1477 
1478         /*
1479          * Compute the CPU's base interrupt level based on the active
1480          * interrupts.
1481          */
1482         ASSERT(CPU->cpu_intr_actv & (1 << i));
1483         set_base_spl();
1484 
1485         return (itp);
1486 }
1487 
1488 /*
1489  * Create and initialize an interrupt thread.
1490  *      Returns non-zero on error.
1491  *      Called at spl7() or better.
1492  */
1493 void
1494 thread_create_intr(struct cpu *cp)
1495 {
1496         kthread_t *tp;
1497 
1498         tp = thread_create(NULL, 0,
1499             (void (*)())thread_create_intr, NULL, 0, &p0, TS_ONPROC, 0);
1500 
1501         /*
1502          * Set the thread in the TS_FREE state.  The state will change
1503          * to TS_ONPROC only while the interrupt is active.  Think of these
1504          * as being on a private free list for the CPU.  Being TS_FREE keeps
1505          * inactive interrupt threads out of debugger thread lists.
1506          *
1507          * We cannot call thread_create with TS_FREE because of the current
1508          * checks there for ONPROC.  Fix this when thread_create takes flags.
1509          */
1510         THREAD_FREEINTR(tp, cp);
1511 
1512         /*
1513          * Nobody should ever reference the credentials of an interrupt
1514          * thread so make it NULL to catch any such references.
1515          */
1516         tp->t_cred = NULL;
1517         tp->t_flag |= T_INTR_THREAD;
1518         tp->t_cpu = cp;
1519         tp->t_bound_cpu = cp;
1520         tp->t_disp_queue = cp->cpu_disp;
1521         tp->t_affinitycnt = 1;
1522         tp->t_preempt = 1;
1523 
1524         /*
1525          * Don't make a user-requested binding on this thread so that
1526          * the processor can be offlined.
1527          */
1528         tp->t_bind_cpu = PBIND_NONE; /* no USER-requested binding */
1529         tp->t_bind_pset = PS_NONE;
1530 
1531 #if defined(__i386) || defined(__amd64)
1532         tp->t_stk -= STACK_ALIGN;
1533         *(tp->t_stk) = 0;            /* terminate intr thread stack */
1534 #endif
1535 
1536         /*
1537          * Link onto CPU's interrupt pool.
1538          */
1539         tp->t_link = cp->cpu_intr_thread;
1540         cp->cpu_intr_thread = tp;
1541 }
1542 
1543 /*
1544  * TSD -- THREAD SPECIFIC DATA
1545  */
1546 static kmutex_t         tsd_mutex;       /* linked list spin lock */
1547 static uint_t           tsd_nkeys;       /* size of destructor array */
1548 /* per-key destructor funcs */
1549 static void             (**tsd_destructor)(void *);
1550 /* list of tsd_thread's */
1551 static struct tsd_thread        *tsd_list;
1552 
1553 /*
1554  * Default destructor
1555  *      Needed because NULL destructor means that the key is unused
1556  */
1557 /* ARGSUSED */
1558 void
1559 tsd_defaultdestructor(void *value)
1560 {}
1561 
1562 /*
1563  * Create a key (index into per thread array)
1564  *      Locks out tsd_create, tsd_destroy, and tsd_exit
1565  *      May allocate memory with lock held
1566  */
1567 void
1568 tsd_create(uint_t *keyp, void (*destructor)(void *))
1569 {
1570         int     i;
1571         uint_t  nkeys;
1572 
1573         /*
1574          * if key is allocated, do nothing
1575          */
1576         mutex_enter(&tsd_mutex);
1577         if (*keyp) {
1578                 mutex_exit(&tsd_mutex);
1579                 return;
1580         }
1581         /*
1582          * find an unused key
1583          */
1584         if (destructor == NULL)
1585                 destructor = tsd_defaultdestructor;
1586 
1587         for (i = 0; i < tsd_nkeys; ++i)
1588                 if (tsd_destructor[i] == NULL)
1589                         break;
1590 
1591         /*
1592          * if no unused keys, increase the size of the destructor array
1593          */
1594         if (i == tsd_nkeys) {
1595                 if ((nkeys = (tsd_nkeys << 1)) == 0)
1596                         nkeys = 1;
1597                 tsd_destructor =
1598                     (void (**)(void *))tsd_realloc((void *)tsd_destructor,
1599                     (size_t)(tsd_nkeys * sizeof (void (*)(void *))),
1600                     (size_t)(nkeys * sizeof (void (*)(void *))));
1601                 tsd_nkeys = nkeys;
1602         }
1603 
1604         /*
1605          * allocate the next available unused key
1606          */
1607         tsd_destructor[i] = destructor;
1608         *keyp = i + 1;
1609         mutex_exit(&tsd_mutex);
1610 }
1611 
1612 /*
1613  * Destroy a key -- this is for unloadable modules
1614  *
1615  * Assumes that the caller is preventing tsd_set and tsd_get
1616  * Locks out tsd_create, tsd_destroy, and tsd_exit
1617  * May free memory with lock held
1618  */
1619 void
1620 tsd_destroy(uint_t *keyp)
1621 {
1622         uint_t key;
1623         struct tsd_thread *tsd;
1624 
1625         /*
1626          * protect the key namespace and our destructor lists
1627          */
1628         mutex_enter(&tsd_mutex);
1629         key = *keyp;
1630         *keyp = 0;
1631 
1632         ASSERT(key <= tsd_nkeys);
1633 
1634         /*
1635          * if the key is valid
1636          */
1637         if (key != 0) {
1638                 uint_t k = key - 1;
1639                 /*
1640                  * for every thread with TSD, call key's destructor
1641                  */
1642                 for (tsd = tsd_list; tsd; tsd = tsd->ts_next) {
1643                         /*
1644                          * no TSD for key in this thread
1645                          */
1646                         if (key > tsd->ts_nkeys)
1647                                 continue;
1648                         /*
1649                          * call destructor for key
1650                          */
1651                         if (tsd->ts_value[k] && tsd_destructor[k])
1652                                 (*tsd_destructor[k])(tsd->ts_value[k]);
1653                         /*
1654                          * reset value for key
1655                          */
1656                         tsd->ts_value[k] = NULL;
1657                 }
1658                 /*
1659                  * actually free the key (NULL destructor == unused)
1660                  */
1661                 tsd_destructor[k] = NULL;
1662         }
1663 
1664         mutex_exit(&tsd_mutex);
1665 }
1666 
1667 /*
1668  * Quickly return the per thread value that was stored with the specified key
1669  * Assumes the caller is protecting key from tsd_create and tsd_destroy
1670  */
1671 void *
1672 tsd_get(uint_t key)
1673 {
1674         return (tsd_agent_get(curthread, key));
1675 }
1676 
1677 /*
1678  * Set a per thread value indexed with the specified key
1679  */
1680 int
1681 tsd_set(uint_t key, void *value)
1682 {
1683         return (tsd_agent_set(curthread, key, value));
1684 }
1685 
1686 /*
1687  * Like tsd_get(), except that the agent lwp can get the tsd of
1688  * another thread in the same process (the agent thread only runs when the
1689  * process is completely stopped by /proc), or syslwp is creating a new lwp.
1690  */
1691 void *
1692 tsd_agent_get(kthread_t *t, uint_t key)
1693 {
1694         struct tsd_thread *tsd = t->t_tsd;
1695 
1696         ASSERT(t == curthread ||
1697             ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1698 
1699         if (key && tsd != NULL && key <= tsd->ts_nkeys)
1700                 return (tsd->ts_value[key - 1]);
1701         return (NULL);
1702 }
1703 
1704 /*
1705  * Like tsd_set(), except that the agent lwp can set the tsd of
1706  * another thread in the same process, or syslwp can set the tsd
1707  * of a thread it's in the middle of creating.
1708  *
1709  * Assumes the caller is protecting key from tsd_create and tsd_destroy
1710  * May lock out tsd_destroy (and tsd_create), may allocate memory with
1711  * lock held
1712  */
1713 int
1714 tsd_agent_set(kthread_t *t, uint_t key, void *value)
1715 {
1716         struct tsd_thread *tsd = t->t_tsd;
1717 
1718         ASSERT(t == curthread ||
1719             ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1720 
1721         if (key == 0)
1722                 return (EINVAL);
1723         if (tsd == NULL)
1724                 tsd = t->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP);
1725         if (key <= tsd->ts_nkeys) {
1726                 tsd->ts_value[key - 1] = value;
1727                 return (0);
1728         }
1729 
1730         ASSERT(key <= tsd_nkeys);
1731 
1732         /*
1733          * lock out tsd_destroy()
1734          */
1735         mutex_enter(&tsd_mutex);
1736         if (tsd->ts_nkeys == 0) {
1737                 /*
1738                  * Link onto list of threads with TSD
1739                  */
1740                 if ((tsd->ts_next = tsd_list) != NULL)
1741                         tsd_list->ts_prev = tsd;
1742                 tsd_list = tsd;
1743         }
1744 
1745         /*
1746          * Allocate thread local storage and set the value for key
1747          */
1748         tsd->ts_value = tsd_realloc(tsd->ts_value,
1749             tsd->ts_nkeys * sizeof (void *),
1750             key * sizeof (void *));
1751         tsd->ts_nkeys = key;
1752         tsd->ts_value[key - 1] = value;
1753         mutex_exit(&tsd_mutex);
1754 
1755         return (0);
1756 }
1757 
1758 
1759 /*
1760  * Return the per thread value that was stored with the specified key
1761  *      If necessary, create the key and the value
1762  *      Assumes the caller is protecting *keyp from tsd_destroy
1763  */
1764 void *
1765 tsd_getcreate(uint_t *keyp, void (*destroy)(void *), void *(*allocate)(void))
1766 {
1767         void *value;
1768         uint_t key = *keyp;
1769         struct tsd_thread *tsd = curthread->t_tsd;
1770 
1771         if (tsd == NULL)
1772                 tsd = curthread->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP);
1773         if (key && key <= tsd->ts_nkeys && (value = tsd->ts_value[key - 1]))
1774                 return (value);
1775         if (key == 0)
1776                 tsd_create(keyp, destroy);
1777         (void) tsd_set(*keyp, value = (*allocate)());
1778 
1779         return (value);
1780 }
1781 
1782 /*
1783  * Called from thread_exit() to run the destructor function for each tsd
1784  *      Locks out tsd_create and tsd_destroy
1785  *      Assumes that the destructor *DOES NOT* use tsd
1786  */
1787 void
1788 tsd_exit(void)
1789 {
1790         int i;
1791         struct tsd_thread *tsd = curthread->t_tsd;
1792 
1793         if (tsd == NULL)
1794                 return;
1795 
1796         if (tsd->ts_nkeys == 0) {
1797                 kmem_free(tsd, sizeof (*tsd));
1798                 curthread->t_tsd = NULL;
1799                 return;
1800         }
1801 
1802         /*
1803          * lock out tsd_create and tsd_destroy, call
1804          * the destructor, and mark the value as destroyed.
1805          */
1806         mutex_enter(&tsd_mutex);
1807 
1808         for (i = 0; i < tsd->ts_nkeys; i++) {
1809                 if (tsd->ts_value[i] && tsd_destructor[i])
1810                         (*tsd_destructor[i])(tsd->ts_value[i]);
1811                 tsd->ts_value[i] = NULL;
1812         }
1813 
1814         /*
1815          * remove from linked list of threads with TSD
1816          */
1817         if (tsd->ts_next)
1818                 tsd->ts_next->ts_prev = tsd->ts_prev;
1819         if (tsd->ts_prev)
1820                 tsd->ts_prev->ts_next = tsd->ts_next;
1821         if (tsd_list == tsd)
1822                 tsd_list = tsd->ts_next;
1823 
1824         mutex_exit(&tsd_mutex);
1825 
1826         /*
1827          * free up the TSD
1828          */
1829         kmem_free(tsd->ts_value, tsd->ts_nkeys * sizeof (void *));
1830         kmem_free(tsd, sizeof (struct tsd_thread));
1831         curthread->t_tsd = NULL;
1832 }
1833 
1834 /*
1835  * realloc
1836  */
1837 static void *
1838 tsd_realloc(void *old, size_t osize, size_t nsize)
1839 {
1840         void *new;
1841 
1842         new = kmem_zalloc(nsize, KM_SLEEP);
1843         if (old) {
1844                 bcopy(old, new, osize);
1845                 kmem_free(old, osize);
1846         }
1847         return (new);
1848 }
1849 
1850 /*
1851  * Return non-zero if an interrupt is being serviced.
1852  */
1853 int
1854 servicing_interrupt()
1855 {
1856         int onintr = 0;
1857 
1858         /* Are we an interrupt thread */
1859         if (curthread->t_flag & T_INTR_THREAD)
1860                 return (1);
1861         /* Are we servicing a high level interrupt? */
1862         if (CPU_ON_INTR(CPU)) {
1863                 kpreempt_disable();
1864                 onintr = CPU_ON_INTR(CPU);
1865                 kpreempt_enable();
1866         }
1867         return (onintr);
1868 }
1869 
1870 
1871 /*
1872  * Change the dispatch priority of a thread in the system.
1873  * Used when raising or lowering a thread's priority.
1874  * (E.g., priority inheritance)
1875  *
1876  * Since threads are queued according to their priority, we
1877  * we must check the thread's state to determine whether it
1878  * is on a queue somewhere. If it is, we've got to:
1879  *
1880  *      o Dequeue the thread.
1881  *      o Change its effective priority.
1882  *      o Enqueue the thread.
1883  *
1884  * Assumptions: The thread whose priority we wish to change
1885  * must be locked before we call thread_change_(e)pri().
1886  * The thread_change(e)pri() function doesn't drop the thread
1887  * lock--that must be done by its caller.
1888  */
1889 void
1890 thread_change_epri(kthread_t *t, pri_t disp_pri)
1891 {
1892         uint_t  state;
1893 
1894         ASSERT(THREAD_LOCK_HELD(t));
1895 
1896         /*
1897          * If the inherited priority hasn't actually changed,
1898          * just return.
1899          */
1900         if (t->t_epri == disp_pri)
1901                 return;
1902 
1903         state = t->t_state;
1904 
1905         /*
1906          * If it's not on a queue, change the priority with impunity.
1907          */
1908         if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) {
1909                 t->t_epri = disp_pri;
1910                 if (state == TS_ONPROC) {
1911                         cpu_t *cp = t->t_disp_queue->disp_cpu;
1912 
1913                         if (t == cp->cpu_dispthread)
1914                                 cp->cpu_dispatch_pri = DISP_PRIO(t);
1915                 }
1916         } else if (state == TS_SLEEP) {
1917                 /*
1918                  * Take the thread out of its sleep queue.
1919                  * Change the inherited priority.
1920                  * Re-enqueue the thread.
1921                  * Each synchronization object exports a function
1922                  * to do this in an appropriate manner.
1923                  */
1924                 SOBJ_CHANGE_EPRI(t->t_sobj_ops, t, disp_pri);
1925         } else if (state == TS_WAIT) {
1926                 /*
1927                  * Re-enqueue a thread on the wait queue if its
1928                  * effective priority needs to change.
1929                  */
1930                 if (disp_pri != t->t_epri)
1931                         waitq_change_pri(t, disp_pri);
1932         } else {
1933                 /*
1934                  * The thread is on a run queue.
1935                  * Note: setbackdq() may not put the thread
1936                  * back on the same run queue where it originally
1937                  * resided.
1938                  */
1939                 (void) dispdeq(t);
1940                 t->t_epri = disp_pri;
1941                 setbackdq(t);
1942         }
1943         schedctl_set_cidpri(t);
1944 }
1945 
1946 /*
1947  * Function: Change the t_pri field of a thread.
1948  * Side Effects: Adjust the thread ordering on a run queue
1949  *               or sleep queue, if necessary.
1950  * Returns: 1 if the thread was on a run queue, else 0.
1951  */
1952 int
1953 thread_change_pri(kthread_t *t, pri_t disp_pri, int front)
1954 {
1955         uint_t  state;
1956         int     on_rq = 0;
1957 
1958         ASSERT(THREAD_LOCK_HELD(t));
1959 
1960         state = t->t_state;
1961         THREAD_WILLCHANGE_PRI(t, disp_pri);
1962 
1963         /*
1964          * If it's not on a queue, change the priority with impunity.
1965          */
1966         if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) {
1967                 t->t_pri = disp_pri;
1968 
1969                 if (state == TS_ONPROC) {
1970                         cpu_t *cp = t->t_disp_queue->disp_cpu;
1971 
1972                         if (t == cp->cpu_dispthread)
1973                                 cp->cpu_dispatch_pri = DISP_PRIO(t);
1974                 }
1975         } else if (state == TS_SLEEP) {
1976                 /*
1977                  * If the priority has changed, take the thread out of
1978                  * its sleep queue and change the priority.
1979                  * Re-enqueue the thread.
1980                  * Each synchronization object exports a function
1981                  * to do this in an appropriate manner.
1982                  */
1983                 if (disp_pri != t->t_pri)
1984                         SOBJ_CHANGE_PRI(t->t_sobj_ops, t, disp_pri);
1985         } else if (state == TS_WAIT) {
1986                 /*
1987                  * Re-enqueue a thread on the wait queue if its
1988                  * priority needs to change.
1989                  */
1990                 if (disp_pri != t->t_pri)
1991                         waitq_change_pri(t, disp_pri);
1992         } else {
1993                 /*
1994                  * The thread is on a run queue.
1995                  * Note: setbackdq() may not put the thread
1996                  * back on the same run queue where it originally
1997                  * resided.
1998                  *
1999                  * We still requeue the thread even if the priority
2000                  * is unchanged to preserve round-robin (and other)
2001                  * effects between threads of the same priority.
2002                  */
2003                 on_rq = dispdeq(t);
2004                 ASSERT(on_rq);
2005                 t->t_pri = disp_pri;
2006                 if (front) {
2007                         setfrontdq(t);
2008                 } else {
2009                         setbackdq(t);
2010                 }
2011         }
2012         schedctl_set_cidpri(t);
2013         return (on_rq);
2014 }
2015 
2016 /*
2017  * Tunable kmem_stackinfo is set, fill the kernel thread stack with a
2018  * specific pattern.
2019  */
2020 static void
2021 stkinfo_begin(kthread_t *t)
2022 {
2023         caddr_t start;  /* stack start */
2024         caddr_t end;    /* stack end  */
2025         uint64_t *ptr;  /* pattern pointer */
2026 
2027         /*
2028          * Stack grows up or down, see thread_create(),
2029          * compute stack memory area start and end (start < end).
2030          */
2031         if (t->t_stk > t->t_stkbase) {
2032                 /* stack grows down */
2033                 start = t->t_stkbase;
2034                 end = t->t_stk;
2035         } else {
2036                 /* stack grows up */
2037                 start = t->t_stk;
2038                 end = t->t_stkbase;
2039         }
2040 
2041         /*
2042          * Stackinfo pattern size is 8 bytes. Ensure proper 8 bytes
2043          * alignement for start and end in stack area boundaries
2044          * (protection against corrupt t_stkbase/t_stk data).
2045          */
2046         if ((((uintptr_t)start) & 0x7) != 0) {
2047                 start = (caddr_t)((((uintptr_t)start) & (~0x7)) + 8);
2048         }
2049         end = (caddr_t)(((uintptr_t)end) & (~0x7));
2050 
2051         if ((end <= start) || (end - start) > (1024 * 1024)) {
2052                 /* negative or stack size > 1 meg, assume bogus */
2053                 return;
2054         }
2055 
2056         /* fill stack area with a pattern (instead of zeros) */
2057         ptr = (uint64_t *)((void *)start);
2058         while (ptr < (uint64_t *)((void *)end)) {
2059                 *ptr++ = KMEM_STKINFO_PATTERN;
2060         }
2061 }
2062 
2063 
2064 /*
2065  * Tunable kmem_stackinfo is set, create stackinfo log if doesn't already exist,
2066  * compute the percentage of kernel stack really used, and set in the log
2067  * if it's the latest highest percentage.
2068  */
2069 static void
2070 stkinfo_end(kthread_t *t)
2071 {
2072         caddr_t start;  /* stack start */
2073         caddr_t end;    /* stack end  */
2074         uint64_t *ptr;  /* pattern pointer */
2075         size_t stksz;   /* stack size */
2076         size_t smallest = 0;
2077         size_t percent = 0;
2078         uint_t index = 0;
2079         uint_t i;
2080         static size_t smallest_percent = (size_t)-1;
2081         static uint_t full = 0;
2082 
2083         /* create the stackinfo log, if doesn't already exist */
2084         mutex_enter(&kmem_stkinfo_lock);
2085         if (kmem_stkinfo_log == NULL) {
2086                 kmem_stkinfo_log = (kmem_stkinfo_t *)
2087                     kmem_zalloc(KMEM_STKINFO_LOG_SIZE *
2088                     (sizeof (kmem_stkinfo_t)), KM_NOSLEEP);
2089                 if (kmem_stkinfo_log == NULL) {
2090                         mutex_exit(&kmem_stkinfo_lock);
2091                         return;
2092                 }
2093         }
2094         mutex_exit(&kmem_stkinfo_lock);
2095 
2096         /*
2097          * Stack grows up or down, see thread_create(),
2098          * compute stack memory area start and end (start < end).
2099          */
2100         if (t->t_stk > t->t_stkbase) {
2101                 /* stack grows down */
2102                 start = t->t_stkbase;
2103                 end = t->t_stk;
2104         } else {
2105                 /* stack grows up */
2106                 start = t->t_stk;
2107                 end = t->t_stkbase;
2108         }
2109 
2110         /* stack size as found in kthread_t */
2111         stksz = end - start;
2112 
2113         /*
2114          * Stackinfo pattern size is 8 bytes. Ensure proper 8 bytes
2115          * alignement for start and end in stack area boundaries
2116          * (protection against corrupt t_stkbase/t_stk data).
2117          */
2118         if ((((uintptr_t)start) & 0x7) != 0) {
2119                 start = (caddr_t)((((uintptr_t)start) & (~0x7)) + 8);
2120         }
2121         end = (caddr_t)(((uintptr_t)end) & (~0x7));
2122 
2123         if ((end <= start) || (end - start) > (1024 * 1024)) {
2124                 /* negative or stack size > 1 meg, assume bogus */
2125                 return;
2126         }
2127 
2128         /* search until no pattern in the stack */
2129         if (t->t_stk > t->t_stkbase) {
2130                 /* stack grows down */
2131 #if defined(__i386) || defined(__amd64)
2132                 /*
2133                  * 6 longs are pushed on stack, see thread_load(). Skip
2134                  * them, so if kthread has never run, percent is zero.
2135                  * 8 bytes alignement is preserved for a 32 bit kernel,
2136                  * 6 x 4 = 24, 24 is a multiple of 8.
2137                  *
2138                  */
2139                 end -= (6 * sizeof (long));
2140 #endif
2141                 ptr = (uint64_t *)((void *)start);
2142                 while (ptr < (uint64_t *)((void *)end)) {
2143                         if (*ptr != KMEM_STKINFO_PATTERN) {
2144                                 percent = stkinfo_percent(end,
2145                                     start, (caddr_t)ptr);
2146                                 break;
2147                         }
2148                         ptr++;
2149                 }
2150         } else {
2151                 /* stack grows up */
2152                 ptr = (uint64_t *)((void *)end);
2153                 ptr--;
2154                 while (ptr >= (uint64_t *)((void *)start)) {
2155                         if (*ptr != KMEM_STKINFO_PATTERN) {
2156                                 percent = stkinfo_percent(start,
2157                                     end, (caddr_t)ptr);
2158                                 break;
2159                         }
2160                         ptr--;
2161                 }
2162         }
2163 
2164         DTRACE_PROBE3(stack__usage, kthread_t *, t,
2165             size_t, stksz, size_t, percent);
2166 
2167         if (percent == 0) {
2168                 return;
2169         }
2170 
2171         mutex_enter(&kmem_stkinfo_lock);
2172         if (full == KMEM_STKINFO_LOG_SIZE && percent < smallest_percent) {
2173                 /*
2174                  * The log is full and already contains the highest values
2175                  */
2176                 mutex_exit(&kmem_stkinfo_lock);
2177                 return;
2178         }
2179 
2180         /* keep a log of the highest used stack */
2181         for (i = 0; i < KMEM_STKINFO_LOG_SIZE; i++) {
2182                 if (kmem_stkinfo_log[i].percent == 0) {
2183                         index = i;
2184                         full++;
2185                         break;
2186                 }
2187                 if (smallest == 0) {
2188                         smallest = kmem_stkinfo_log[i].percent;
2189                         index = i;
2190                         continue;
2191                 }
2192                 if (kmem_stkinfo_log[i].percent < smallest) {
2193                         smallest = kmem_stkinfo_log[i].percent;
2194                         index = i;
2195                 }
2196         }
2197 
2198         if (percent >= kmem_stkinfo_log[index].percent) {
2199                 kmem_stkinfo_log[index].kthread = (caddr_t)t;
2200                 kmem_stkinfo_log[index].t_startpc = (caddr_t)t->t_startpc;
2201                 kmem_stkinfo_log[index].start = start;
2202                 kmem_stkinfo_log[index].stksz = stksz;
2203                 kmem_stkinfo_log[index].percent = percent;
2204                 kmem_stkinfo_log[index].t_tid = t->t_tid;
2205                 kmem_stkinfo_log[index].cmd[0] = '\0';
2206                 if (t->t_tid != 0) {
2207                         stksz = strlen((t->t_procp)->p_user.u_comm);
2208                         if (stksz >= KMEM_STKINFO_STR_SIZE) {
2209                                 stksz = KMEM_STKINFO_STR_SIZE - 1;
2210                                 kmem_stkinfo_log[index].cmd[stksz] = '\0';
2211                         } else {
2212                                 stksz += 1;
2213                         }
2214                         (void) memcpy(kmem_stkinfo_log[index].cmd,
2215                             (t->t_procp)->p_user.u_comm, stksz);
2216                 }
2217                 if (percent < smallest_percent) {
2218                         smallest_percent = percent;
2219                 }
2220         }
2221         mutex_exit(&kmem_stkinfo_lock);
2222 }
2223 
2224 /*
2225  * Tunable kmem_stackinfo is set, compute stack utilization percentage.
2226  */
2227 static size_t
2228 stkinfo_percent(caddr_t t_stk, caddr_t t_stkbase, caddr_t sp)
2229 {
2230         size_t percent;
2231         size_t s;
2232 
2233         if (t_stk > t_stkbase) {
2234                 /* stack grows down */
2235                 if (sp > t_stk) {
2236                         return (0);
2237                 }
2238                 if (sp < t_stkbase) {
2239                         return (100);
2240                 }
2241                 percent = t_stk - sp + 1;
2242                 s = t_stk - t_stkbase + 1;
2243         } else {
2244                 /* stack grows up */
2245                 if (sp < t_stk) {
2246                         return (0);
2247                 }
2248                 if (sp > t_stkbase) {
2249                         return (100);
2250                 }
2251                 percent = sp - t_stk + 1;
2252                 s = t_stkbase - t_stk + 1;
2253         }
2254         percent = ((100 * percent) / s) + 1;
2255         if (percent > 100) {
2256                 percent = 100;
2257         }
2258         return (percent);
2259 }
2260 
2261 /*
2262  * NOTE: This will silently truncate a name > THREAD_NAME_MAX - 1 characters
2263  * long.  It is expected that callers (acting on behalf of userland clients)
2264  * will perform any required checks to return the correct error semantics.
2265  * It is also expected callers on behalf of userland clients have done
2266  * any necessary permission checks.
2267  */
2268 int
2269 thread_setname(kthread_t *t, const char *name)
2270 {
2271         char *buf = NULL;
2272 
2273         /*
2274          * We optimistically assume that a thread's name will only be set
2275          * once and so allocate memory in preparation of setting t_name.
2276          * If it turns out a name has already been set, we just discard (free)
2277          * the buffer we just allocated and reuse the current buffer
2278          * (as all should be THREAD_NAME_MAX large).
2279          *
2280          * Such an arrangement means over the lifetime of a kthread_t, t_name
2281          * is either NULL or has one value (the address of the buffer holding
2282          * the current thread name).   The assumption is that most kthread_t
2283          * instances will not have a name assigned, so dynamically allocating
2284          * the memory should minimize the footprint of this feature, but by
2285          * having the buffer persist for the life of the thread, it simplifies
2286          * usage in highly constrained situations (e.g. dtrace).
2287          */
2288         if (name != NULL && name[0] != '\0') {
2289                 for (size_t i = 0; name[i] != '\0'; i++) {
2290                         if (!isprint(name[i]))
2291                                 return (EINVAL);
2292                 }
2293 
2294                 buf = kmem_zalloc(THREAD_NAME_MAX, KM_SLEEP);
2295                 (void) strlcpy(buf, name, THREAD_NAME_MAX);
2296         }
2297 
2298         mutex_enter(&ttoproc(t)->p_lock);
2299         if (t->t_name == NULL) {
2300                 t->t_name = buf;
2301         } else {
2302                 if (buf != NULL) {
2303                         (void) strlcpy(t->t_name, name, THREAD_NAME_MAX);
2304                         kmem_free(buf, THREAD_NAME_MAX);
2305                 } else {
2306                         bzero(t->t_name, THREAD_NAME_MAX);
2307                 }
2308         }
2309         mutex_exit(&ttoproc(t)->p_lock);
2310         return (0);
2311 }
2312 
2313 int
2314 thread_vsetname(kthread_t *t, const char *fmt, ...)
2315 {
2316         char name[THREAD_NAME_MAX];
2317         va_list va;
2318         int rc;
2319 
2320         va_start(va, fmt);
2321         rc = vsnprintf(name, sizeof (name), fmt, va);
2322         va_end(va);
2323 
2324         if (rc < 0)
2325                 return (EINVAL);
2326 
2327         if (rc >= sizeof (name))
2328                 return (ENAMETOOLONG);
2329 
2330         return (thread_setname(t, name));
2331 }