1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2019 Joyent, Inc.
25 */
26
27 #include <sys/types.h>
28 #include <sys/param.h>
29 #include <sys/sysmacros.h>
30 #include <sys/signal.h>
31 #include <sys/stack.h>
32 #include <sys/pcb.h>
33 #include <sys/user.h>
34 #include <sys/systm.h>
35 #include <sys/sysinfo.h>
36 #include <sys/errno.h>
37 #include <sys/cmn_err.h>
38 #include <sys/cred.h>
39 #include <sys/resource.h>
40 #include <sys/task.h>
41 #include <sys/project.h>
42 #include <sys/proc.h>
43 #include <sys/debug.h>
44 #include <sys/disp.h>
45 #include <sys/class.h>
46 #include <vm/seg_kmem.h>
47 #include <vm/seg_kp.h>
48 #include <sys/machlock.h>
49 #include <sys/kmem.h>
50 #include <sys/varargs.h>
51 #include <sys/turnstile.h>
52 #include <sys/poll.h>
53 #include <sys/vtrace.h>
54 #include <sys/callb.h>
55 #include <c2/audit.h>
56 #include <sys/tnf.h>
57 #include <sys/sobject.h>
58 #include <sys/cpupart.h>
59 #include <sys/pset.h>
60 #include <sys/door.h>
61 #include <sys/spl.h>
62 #include <sys/copyops.h>
63 #include <sys/rctl.h>
64 #include <sys/brand.h>
65 #include <sys/pool.h>
66 #include <sys/zone.h>
67 #include <sys/tsol/label.h>
68 #include <sys/tsol/tndb.h>
69 #include <sys/cpc_impl.h>
70 #include <sys/sdt.h>
71 #include <sys/reboot.h>
72 #include <sys/kdi.h>
73 #include <sys/schedctl.h>
74 #include <sys/waitq.h>
75 #include <sys/cpucaps.h>
76 #include <sys/kiconv.h>
77 #include <sys/ctype.h>
78 #include <sys/smt.h>
79
80 struct kmem_cache *thread_cache; /* cache of free threads */
81 struct kmem_cache *lwp_cache; /* cache of free lwps */
82 struct kmem_cache *turnstile_cache; /* cache of free turnstiles */
83
84 /*
85 * allthreads is only for use by kmem_readers. All kernel loops can use
86 * the current thread as a start/end point.
87 */
88 kthread_t *allthreads = &t0; /* circular list of all threads */
89
90 static kcondvar_t reaper_cv; /* synchronization var */
91 kthread_t *thread_deathrow; /* circular list of reapable threads */
92 kthread_t *lwp_deathrow; /* circular list of reapable threads */
93 kmutex_t reaplock; /* protects lwp and thread deathrows */
94 int thread_reapcnt = 0; /* number of threads on deathrow */
95 int lwp_reapcnt = 0; /* number of lwps on deathrow */
96 int reaplimit = 16; /* delay reaping until reaplimit */
97
98 thread_free_lock_t *thread_free_lock;
99 /* protects tick thread from reaper */
100
101 extern int nthread;
102
103 /* System Scheduling classes. */
104 id_t syscid; /* system scheduling class ID */
105 id_t sysdccid = CLASS_UNUSED; /* reset when SDC loads */
106
107 void *segkp_thread; /* cookie for segkp pool */
108
109 int lwp_cache_sz = 32;
110 int t_cache_sz = 8;
111 static kt_did_t next_t_id = 1;
112
113 /* Default mode for thread binding to CPUs and processor sets */
114 int default_binding_mode = TB_ALLHARD;
115
116 /*
117 * Min/Max stack sizes for stack size parameters
118 */
119 #define MAX_STKSIZE (32 * DEFAULTSTKSZ)
120 #define MIN_STKSIZE DEFAULTSTKSZ
121
122 /*
123 * default_stksize overrides lwp_default_stksize if it is set.
124 */
125 int default_stksize;
126 int lwp_default_stksize;
127
128 static zone_key_t zone_thread_key;
129
130 unsigned int kmem_stackinfo; /* stackinfo feature on-off */
131 kmem_stkinfo_t *kmem_stkinfo_log; /* stackinfo circular log */
132 static kmutex_t kmem_stkinfo_lock; /* protects kmem_stkinfo_log */
133
134 /*
135 * forward declarations for internal thread specific data (tsd)
136 */
137 static void *tsd_realloc(void *, size_t, size_t);
138
139 void thread_reaper(void);
140
141 /* forward declarations for stackinfo feature */
142 static void stkinfo_begin(kthread_t *);
143 static void stkinfo_end(kthread_t *);
144 static size_t stkinfo_percent(caddr_t, caddr_t, caddr_t);
145
146 /*ARGSUSED*/
147 static int
148 turnstile_constructor(void *buf, void *cdrarg, int kmflags)
149 {
150 bzero(buf, sizeof (turnstile_t));
151 return (0);
152 }
153
154 /*ARGSUSED*/
155 static void
156 turnstile_destructor(void *buf, void *cdrarg)
157 {
158 turnstile_t *ts = buf;
159
160 ASSERT(ts->ts_free == NULL);
161 ASSERT(ts->ts_waiters == 0);
162 ASSERT(ts->ts_inheritor == NULL);
163 ASSERT(ts->ts_sleepq[0].sq_first == NULL);
164 ASSERT(ts->ts_sleepq[1].sq_first == NULL);
165 }
166
167 void
168 thread_init(void)
169 {
170 kthread_t *tp;
171 extern char sys_name[];
172 extern void idle();
173 struct cpu *cpu = CPU;
174 int i;
175 kmutex_t *lp;
176
177 mutex_init(&reaplock, NULL, MUTEX_SPIN, (void *)ipltospl(DISP_LEVEL));
178 thread_free_lock =
179 kmem_alloc(sizeof (thread_free_lock_t) * THREAD_FREE_NUM, KM_SLEEP);
180 for (i = 0; i < THREAD_FREE_NUM; i++) {
181 lp = &thread_free_lock[i].tf_lock;
182 mutex_init(lp, NULL, MUTEX_DEFAULT, NULL);
183 }
184
185 #if defined(__i386) || defined(__amd64)
186 thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t),
187 PTR24_ALIGN, NULL, NULL, NULL, NULL, NULL, 0);
188
189 /*
190 * "struct _klwp" includes a "struct pcb", which includes a
191 * "struct fpu", which needs to be 64-byte aligned on amd64
192 * (and even on i386) for xsave/xrstor.
193 */
194 lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t),
195 64, NULL, NULL, NULL, NULL, NULL, 0);
196 #else
197 /*
198 * Allocate thread structures from static_arena. This prevents
199 * issues where a thread tries to relocate its own thread
200 * structure and touches it after the mapping has been suspended.
201 */
202 thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t),
203 PTR24_ALIGN, NULL, NULL, NULL, NULL, static_arena, 0);
204
205 lwp_stk_cache_init();
206
207 lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t),
208 0, NULL, NULL, NULL, NULL, NULL, 0);
209 #endif
210
211 turnstile_cache = kmem_cache_create("turnstile_cache",
212 sizeof (turnstile_t), 0,
213 turnstile_constructor, turnstile_destructor, NULL, NULL, NULL, 0);
214
215 label_init();
216 cred_init();
217
218 /*
219 * Initialize various resource management facilities.
220 */
221 rctl_init();
222 cpucaps_init();
223 /*
224 * Zone_init() should be called before project_init() so that project ID
225 * for the first project is initialized correctly.
226 */
227 zone_init();
228 project_init();
229 brand_init();
230 kiconv_init();
231 task_init();
232 tcache_init();
233 pool_init();
234
235 curthread->t_ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP);
236
237 /*
238 * Originally, we had two parameters to set default stack
239 * size: one for lwp's (lwp_default_stksize), and one for
240 * kernel-only threads (DEFAULTSTKSZ, a.k.a. _defaultstksz).
241 * Now we have a third parameter that overrides both if it is
242 * set to a legal stack size, called default_stksize.
243 */
244
245 if (default_stksize == 0) {
246 default_stksize = DEFAULTSTKSZ;
247 } else if (default_stksize % PAGESIZE != 0 ||
248 default_stksize > MAX_STKSIZE ||
249 default_stksize < MIN_STKSIZE) {
250 cmn_err(CE_WARN, "Illegal stack size. Using %d",
251 (int)DEFAULTSTKSZ);
252 default_stksize = DEFAULTSTKSZ;
253 } else {
254 lwp_default_stksize = default_stksize;
255 }
256
257 if (lwp_default_stksize == 0) {
258 lwp_default_stksize = default_stksize;
259 } else if (lwp_default_stksize % PAGESIZE != 0 ||
260 lwp_default_stksize > MAX_STKSIZE ||
261 lwp_default_stksize < MIN_STKSIZE) {
262 cmn_err(CE_WARN, "Illegal stack size. Using %d",
263 default_stksize);
264 lwp_default_stksize = default_stksize;
265 }
266
267 segkp_lwp = segkp_cache_init(segkp, lwp_cache_sz,
268 lwp_default_stksize,
269 (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED));
270
271 segkp_thread = segkp_cache_init(segkp, t_cache_sz,
272 default_stksize, KPD_HASREDZONE | KPD_LOCKED | KPD_NO_ANON);
273
274 (void) getcid(sys_name, &syscid);
275 curthread->t_cid = syscid; /* current thread is t0 */
276
277 /*
278 * Set up the first CPU's idle thread.
279 * It runs whenever the CPU has nothing worthwhile to do.
280 */
281 tp = thread_create(NULL, 0, idle, NULL, 0, &p0, TS_STOPPED, -1);
282 cpu->cpu_idle_thread = tp;
283 tp->t_preempt = 1;
284 tp->t_disp_queue = cpu->cpu_disp;
285 ASSERT(tp->t_disp_queue != NULL);
286 tp->t_bound_cpu = cpu;
287 tp->t_affinitycnt = 1;
288
289 /*
290 * Registering a thread in the callback table is usually
291 * done in the initialization code of the thread. In this
292 * case, we do it right after thread creation to avoid
293 * blocking idle thread while registering itself. It also
294 * avoids the possibility of reregistration in case a CPU
295 * restarts its idle thread.
296 */
297 CALLB_CPR_INIT_SAFE(tp, "idle");
298
299 /*
300 * Create the thread_reaper daemon. From this point on, exited
301 * threads will get reaped.
302 */
303 (void) thread_create(NULL, 0, (void (*)())thread_reaper,
304 NULL, 0, &p0, TS_RUN, minclsyspri);
305
306 /*
307 * Finish initializing the kernel memory allocator now that
308 * thread_create() is available.
309 */
310 kmem_thread_init();
311
312 if (boothowto & RB_DEBUG)
313 kdi_dvec_thravail();
314 }
315
316 /*
317 * Create a thread.
318 *
319 * thread_create() blocks for memory if necessary. It never fails.
320 *
321 * If stk is NULL, the thread is created at the base of the stack
322 * and cannot be swapped.
323 */
324 kthread_t *
325 thread_create(
326 caddr_t stk,
327 size_t stksize,
328 void (*proc)(),
329 void *arg,
330 size_t len,
331 proc_t *pp,
332 int state,
333 pri_t pri)
334 {
335 kthread_t *t;
336 extern struct classfuncs sys_classfuncs;
337 turnstile_t *ts;
338
339 /*
340 * Every thread keeps a turnstile around in case it needs to block.
341 * The only reason the turnstile is not simply part of the thread
342 * structure is that we may have to break the association whenever
343 * more than one thread blocks on a given synchronization object.
344 * From a memory-management standpoint, turnstiles are like the
345 * "attached mblks" that hang off dblks in the streams allocator.
346 */
347 ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP);
348
349 if (stk == NULL) {
350 /*
351 * alloc both thread and stack in segkp chunk
352 */
353
354 if (stksize < default_stksize)
355 stksize = default_stksize;
356
357 if (stksize == default_stksize) {
358 stk = (caddr_t)segkp_cache_get(segkp_thread);
359 } else {
360 stksize = roundup(stksize, PAGESIZE);
361 stk = (caddr_t)segkp_get(segkp, stksize,
362 (KPD_HASREDZONE | KPD_NO_ANON | KPD_LOCKED));
363 }
364
365 ASSERT(stk != NULL);
366
367 /*
368 * The machine-dependent mutex code may require that
369 * thread pointers (since they may be used for mutex owner
370 * fields) have certain alignment requirements.
371 * PTR24_ALIGN is the size of the alignment quanta.
372 * XXX - assumes stack grows toward low addresses.
373 */
374 if (stksize <= sizeof (kthread_t) + PTR24_ALIGN)
375 cmn_err(CE_PANIC, "thread_create: proposed stack size"
376 " too small to hold thread.");
377 #ifdef STACK_GROWTH_DOWN
378 stksize -= SA(sizeof (kthread_t) + PTR24_ALIGN - 1);
379 stksize &= -PTR24_ALIGN; /* make thread aligned */
380 t = (kthread_t *)(stk + stksize);
381 bzero(t, sizeof (kthread_t));
382 if (audit_active)
383 audit_thread_create(t);
384 t->t_stk = stk + stksize;
385 t->t_stkbase = stk;
386 #else /* stack grows to larger addresses */
387 stksize -= SA(sizeof (kthread_t));
388 t = (kthread_t *)(stk);
389 bzero(t, sizeof (kthread_t));
390 t->t_stk = stk + sizeof (kthread_t);
391 t->t_stkbase = stk + stksize + sizeof (kthread_t);
392 #endif /* STACK_GROWTH_DOWN */
393 t->t_flag |= T_TALLOCSTK;
394 t->t_swap = stk;
395 } else {
396 t = kmem_cache_alloc(thread_cache, KM_SLEEP);
397 bzero(t, sizeof (kthread_t));
398 ASSERT(((uintptr_t)t & (PTR24_ALIGN - 1)) == 0);
399 if (audit_active)
400 audit_thread_create(t);
401 /*
402 * Initialize t_stk to the kernel stack pointer to use
403 * upon entry to the kernel
404 */
405 #ifdef STACK_GROWTH_DOWN
406 t->t_stk = stk + stksize;
407 t->t_stkbase = stk;
408 #else
409 t->t_stk = stk; /* 3b2-like */
410 t->t_stkbase = stk + stksize;
411 #endif /* STACK_GROWTH_DOWN */
412 }
413
414 if (kmem_stackinfo != 0) {
415 stkinfo_begin(t);
416 }
417
418 t->t_ts = ts;
419
420 /*
421 * p_cred could be NULL if it thread_create is called before cred_init
422 * is called in main.
423 */
424 mutex_enter(&pp->p_crlock);
425 if (pp->p_cred)
426 crhold(t->t_cred = pp->p_cred);
427 mutex_exit(&pp->p_crlock);
428 t->t_start = gethrestime_sec();
429 t->t_startpc = proc;
430 t->t_procp = pp;
431 t->t_clfuncs = &sys_classfuncs.thread;
432 t->t_cid = syscid;
433 t->t_pri = pri;
434 t->t_stime = ddi_get_lbolt();
435 t->t_schedflag = TS_LOAD | TS_DONT_SWAP;
436 t->t_bind_cpu = PBIND_NONE;
437 t->t_bindflag = (uchar_t)default_binding_mode;
438 t->t_bind_pset = PS_NONE;
439 t->t_plockp = &pp->p_lock;
440 t->t_copyops = NULL;
441 t->t_taskq = NULL;
442 t->t_anttime = 0;
443 t->t_hatdepth = 0;
444
445 t->t_dtrace_vtime = 1; /* assure vtimestamp is always non-zero */
446
447 CPU_STATS_ADDQ(CPU, sys, nthreads, 1);
448 #ifndef NPROBE
449 /* Kernel probe */
450 tnf_thread_create(t);
451 #endif /* NPROBE */
452 LOCK_INIT_CLEAR(&t->t_lock);
453
454 /*
455 * Callers who give us a NULL proc must do their own
456 * stack initialization. e.g. lwp_create()
457 */
458 if (proc != NULL) {
459 t->t_stk = thread_stk_init(t->t_stk);
460 thread_load(t, proc, arg, len);
461 }
462
463 /*
464 * Put a hold on project0. If this thread is actually in a
465 * different project, then t_proj will be changed later in
466 * lwp_create(). All kernel-only threads must be in project 0.
467 */
468 t->t_proj = project_hold(proj0p);
469
470 lgrp_affinity_init(&t->t_lgrp_affinity);
471
472 mutex_enter(&pidlock);
473 nthread++;
474 t->t_did = next_t_id++;
475 t->t_prev = curthread->t_prev;
476 t->t_next = curthread;
477
478 /*
479 * Add the thread to the list of all threads, and initialize
480 * its t_cpu pointer. We need to block preemption since
481 * cpu_offline walks the thread list looking for threads
482 * with t_cpu pointing to the CPU being offlined. We want
483 * to make sure that the list is consistent and that if t_cpu
484 * is set, the thread is on the list.
485 */
486 kpreempt_disable();
487 curthread->t_prev->t_next = t;
488 curthread->t_prev = t;
489
490 /*
491 * We'll always create in the default partition since that's where
492 * kernel threads go (we'll change this later if needed, in
493 * lwp_create()).
494 */
495 t->t_cpupart = &cp_default;
496
497 /*
498 * For now, affiliate this thread with the root lgroup.
499 * Since the kernel does not (presently) allocate its memory
500 * in a locality aware fashion, the root is an appropriate home.
501 * If this thread is later associated with an lwp, it will have
502 * its lgroup re-assigned at that time.
503 */
504 lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1);
505
506 /*
507 * If the current CPU is in the default cpupart, use it. Otherwise,
508 * pick one that is; before entering the dispatcher code, we'll
509 * make sure to keep the invariant that ->t_cpu is set. (In fact, we
510 * rely on this, in ht_should_run(), in the call tree of
511 * disp_lowpri_cpu().)
512 */
513 if (CPU->cpu_part == &cp_default) {
514 t->t_cpu = CPU;
515 } else {
516 t->t_cpu = cp_default.cp_cpulist;
517 t->t_cpu = disp_lowpri_cpu(t->t_cpu, t, t->t_pri);
518 }
519
520 t->t_disp_queue = t->t_cpu->cpu_disp;
521 kpreempt_enable();
522
523 /*
524 * Initialize thread state and the dispatcher lock pointer.
525 * Need to hold onto pidlock to block allthreads walkers until
526 * the state is set.
527 */
528 switch (state) {
529 case TS_RUN:
530 curthread->t_oldspl = splhigh(); /* get dispatcher spl */
531 THREAD_SET_STATE(t, TS_STOPPED, &transition_lock);
532 CL_SETRUN(t);
533 thread_unlock(t);
534 break;
535
536 case TS_ONPROC:
537 THREAD_ONPROC(t, t->t_cpu);
538 break;
539
540 case TS_FREE:
541 /*
542 * Free state will be used for intr threads.
543 * The interrupt routine must set the thread dispatcher
544 * lock pointer (t_lockp) if starting on a CPU
545 * other than the current one.
546 */
547 THREAD_FREEINTR(t, CPU);
548 break;
549
550 case TS_STOPPED:
551 THREAD_SET_STATE(t, TS_STOPPED, &stop_lock);
552 break;
553
554 default: /* TS_SLEEP, TS_ZOMB or TS_TRANS */
555 cmn_err(CE_PANIC, "thread_create: invalid state %d", state);
556 }
557 mutex_exit(&pidlock);
558 return (t);
559 }
560
561 /*
562 * Move thread to project0 and take care of project reference counters.
563 */
564 void
565 thread_rele(kthread_t *t)
566 {
567 kproject_t *kpj;
568
569 thread_lock(t);
570
571 ASSERT(t == curthread || t->t_state == TS_FREE || t->t_procp == &p0);
572 kpj = ttoproj(t);
573 t->t_proj = proj0p;
574
575 thread_unlock(t);
576
577 if (kpj != proj0p) {
578 project_rele(kpj);
579 (void) project_hold(proj0p);
580 }
581 }
582
583 void
584 thread_exit(void)
585 {
586 kthread_t *t = curthread;
587
588 if ((t->t_proc_flag & TP_ZTHREAD) != 0)
589 cmn_err(CE_PANIC, "thread_exit: zthread_exit() not called");
590
591 tsd_exit(); /* Clean up this thread's TSD */
592
593 kcpc_passivate(); /* clean up performance counter state */
594
595 /*
596 * No kernel thread should have called poll() without arranging
597 * calling pollcleanup() here.
598 */
599 ASSERT(t->t_pollstate == NULL);
600 ASSERT(t->t_schedctl == NULL);
601 if (t->t_door)
602 door_slam(); /* in case thread did an upcall */
603
604 #ifndef NPROBE
605 /* Kernel probe */
606 if (t->t_tnf_tpdp)
607 tnf_thread_exit();
608 #endif /* NPROBE */
609
610 thread_rele(t);
611 t->t_preempt++;
612
613 /*
614 * remove thread from the all threads list so that
615 * death-row can use the same pointers.
616 */
617 mutex_enter(&pidlock);
618 t->t_next->t_prev = t->t_prev;
619 t->t_prev->t_next = t->t_next;
620 ASSERT(allthreads != t); /* t0 never exits */
621 cv_broadcast(&t->t_joincv); /* wake up anyone in thread_join */
622 mutex_exit(&pidlock);
623
624 if (t->t_ctx != NULL)
625 exitctx(t);
626 if (t->t_procp->p_pctx != NULL)
627 exitpctx(t->t_procp);
628
629 if (kmem_stackinfo != 0) {
630 stkinfo_end(t);
631 }
632
633 t->t_state = TS_ZOMB; /* set zombie thread */
634
635 swtch_from_zombie(); /* give up the CPU */
636 /* NOTREACHED */
637 }
638
639 /*
640 * Check to see if the specified thread is active (defined as being on
641 * the thread list). This is certainly a slow way to do this; if there's
642 * ever a reason to speed it up, we could maintain a hash table of active
643 * threads indexed by their t_did.
644 */
645 static kthread_t *
646 did_to_thread(kt_did_t tid)
647 {
648 kthread_t *t;
649
650 ASSERT(MUTEX_HELD(&pidlock));
651 for (t = curthread->t_next; t != curthread; t = t->t_next) {
652 if (t->t_did == tid)
653 break;
654 }
655 if (t->t_did == tid)
656 return (t);
657 else
658 return (NULL);
659 }
660
661 /*
662 * Wait for specified thread to exit. Returns immediately if the thread
663 * could not be found, meaning that it has either already exited or never
664 * existed.
665 */
666 void
667 thread_join(kt_did_t tid)
668 {
669 kthread_t *t;
670
671 ASSERT(tid != curthread->t_did);
672 ASSERT(tid != t0.t_did);
673
674 mutex_enter(&pidlock);
675 /*
676 * Make sure we check that the thread is on the thread list
677 * before blocking on it; otherwise we could end up blocking on
678 * a cv that's already been freed. In other words, don't cache
679 * the thread pointer across calls to cv_wait.
680 *
681 * The choice of loop invariant means that whenever a thread
682 * is taken off the allthreads list, a cv_broadcast must be
683 * performed on that thread's t_joincv to wake up any waiters.
684 * The broadcast doesn't have to happen right away, but it
685 * shouldn't be postponed indefinitely (e.g., by doing it in
686 * thread_free which may only be executed when the deathrow
687 * queue is processed.
688 */
689 while (t = did_to_thread(tid))
690 cv_wait(&t->t_joincv, &pidlock);
691 mutex_exit(&pidlock);
692 }
693
694 void
695 thread_free_prevent(kthread_t *t)
696 {
697 kmutex_t *lp;
698
699 lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock;
700 mutex_enter(lp);
701 }
702
703 void
704 thread_free_allow(kthread_t *t)
705 {
706 kmutex_t *lp;
707
708 lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock;
709 mutex_exit(lp);
710 }
711
712 static void
713 thread_free_barrier(kthread_t *t)
714 {
715 kmutex_t *lp;
716
717 lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock;
718 mutex_enter(lp);
719 mutex_exit(lp);
720 }
721
722 void
723 thread_free(kthread_t *t)
724 {
725 boolean_t allocstk = (t->t_flag & T_TALLOCSTK);
726 klwp_t *lwp = t->t_lwp;
727 caddr_t swap = t->t_swap;
728
729 ASSERT(t != &t0 && t->t_state == TS_FREE);
730 ASSERT(t->t_door == NULL);
731 ASSERT(t->t_schedctl == NULL);
732 ASSERT(t->t_pollstate == NULL);
733
734 t->t_pri = 0;
735 t->t_pc = 0;
736 t->t_sp = 0;
737 t->t_wchan0 = NULL;
738 t->t_wchan = NULL;
739 if (t->t_cred != NULL) {
740 crfree(t->t_cred);
741 t->t_cred = 0;
742 }
743 if (t->t_pdmsg) {
744 kmem_free(t->t_pdmsg, strlen(t->t_pdmsg) + 1);
745 t->t_pdmsg = NULL;
746 }
747 if (audit_active)
748 audit_thread_free(t);
749 #ifndef NPROBE
750 if (t->t_tnf_tpdp)
751 tnf_thread_free(t);
752 #endif /* NPROBE */
753 if (t->t_cldata) {
754 CL_EXITCLASS(t->t_cid, (caddr_t *)t->t_cldata);
755 }
756 if (t->t_rprof != NULL) {
757 kmem_free(t->t_rprof, sizeof (*t->t_rprof));
758 t->t_rprof = NULL;
759 }
760 t->t_lockp = NULL; /* nothing should try to lock this thread now */
761 if (lwp)
762 lwp_freeregs(lwp, 0);
763 if (t->t_ctx)
764 freectx(t, 0);
765 t->t_stk = NULL;
766 if (lwp)
767 lwp_stk_fini(lwp);
768 lock_clear(&t->t_lock);
769
770 if (t->t_ts->ts_waiters > 0)
771 panic("thread_free: turnstile still active");
772
773 kmem_cache_free(turnstile_cache, t->t_ts);
774
775 free_afd(&t->t_activefd);
776
777 /*
778 * Barrier for the tick accounting code. The tick accounting code
779 * holds this lock to keep the thread from going away while it's
780 * looking at it.
781 */
782 thread_free_barrier(t);
783
784 ASSERT(ttoproj(t) == proj0p);
785 project_rele(ttoproj(t));
786
787 lgrp_affinity_free(&t->t_lgrp_affinity);
788
789 mutex_enter(&pidlock);
790 nthread--;
791 mutex_exit(&pidlock);
792
793 if (t->t_name != NULL) {
794 kmem_free(t->t_name, THREAD_NAME_MAX);
795 t->t_name = NULL;
796 }
797
798 /*
799 * Free thread, lwp and stack. This needs to be done carefully, since
800 * if T_TALLOCSTK is set, the thread is part of the stack.
801 */
802 t->t_lwp = NULL;
803 t->t_swap = NULL;
804
805 if (swap) {
806 segkp_release(segkp, swap);
807 }
808 if (lwp) {
809 kmem_cache_free(lwp_cache, lwp);
810 }
811 if (!allocstk) {
812 kmem_cache_free(thread_cache, t);
813 }
814 }
815
816 /*
817 * Removes threads associated with the given zone from a deathrow queue.
818 * tp is a pointer to the head of the deathrow queue, and countp is a
819 * pointer to the current deathrow count. Returns a linked list of
820 * threads removed from the list.
821 */
822 static kthread_t *
823 thread_zone_cleanup(kthread_t **tp, int *countp, zoneid_t zoneid)
824 {
825 kthread_t *tmp, *list = NULL;
826 cred_t *cr;
827
828 ASSERT(MUTEX_HELD(&reaplock));
829 while (*tp != NULL) {
830 if ((cr = (*tp)->t_cred) != NULL && crgetzoneid(cr) == zoneid) {
831 tmp = *tp;
832 *tp = tmp->t_forw;
833 tmp->t_forw = list;
834 list = tmp;
835 (*countp)--;
836 } else {
837 tp = &(*tp)->t_forw;
838 }
839 }
840 return (list);
841 }
842
843 static void
844 thread_reap_list(kthread_t *t)
845 {
846 kthread_t *next;
847
848 while (t != NULL) {
849 next = t->t_forw;
850 thread_free(t);
851 t = next;
852 }
853 }
854
855 /* ARGSUSED */
856 static void
857 thread_zone_destroy(zoneid_t zoneid, void *unused)
858 {
859 kthread_t *t, *l;
860
861 mutex_enter(&reaplock);
862 /*
863 * Pull threads and lwps associated with zone off deathrow lists.
864 */
865 t = thread_zone_cleanup(&thread_deathrow, &thread_reapcnt, zoneid);
866 l = thread_zone_cleanup(&lwp_deathrow, &lwp_reapcnt, zoneid);
867 mutex_exit(&reaplock);
868
869 /*
870 * Guard against race condition in mutex_owner_running:
871 * thread=owner(mutex)
872 * <interrupt>
873 * thread exits mutex
874 * thread exits
875 * thread reaped
876 * thread struct freed
877 * cpu = thread->t_cpu <- BAD POINTER DEREFERENCE.
878 * A cross call to all cpus will cause the interrupt handler
879 * to reset the PC if it is in mutex_owner_running, refreshing
880 * stale thread pointers.
881 */
882 mutex_sync(); /* sync with mutex code */
883
884 /*
885 * Reap threads
886 */
887 thread_reap_list(t);
888
889 /*
890 * Reap lwps
891 */
892 thread_reap_list(l);
893 }
894
895 /*
896 * cleanup zombie threads that are on deathrow.
897 */
898 void
899 thread_reaper()
900 {
901 kthread_t *t, *l;
902 callb_cpr_t cprinfo;
903
904 /*
905 * Register callback to clean up threads when zone is destroyed.
906 */
907 zone_key_create(&zone_thread_key, NULL, NULL, thread_zone_destroy);
908
909 CALLB_CPR_INIT(&cprinfo, &reaplock, callb_generic_cpr, "t_reaper");
910 for (;;) {
911 mutex_enter(&reaplock);
912 while (thread_deathrow == NULL && lwp_deathrow == NULL) {
913 CALLB_CPR_SAFE_BEGIN(&cprinfo);
914 cv_wait(&reaper_cv, &reaplock);
915 CALLB_CPR_SAFE_END(&cprinfo, &reaplock);
916 }
917 /*
918 * mutex_sync() needs to be called when reaping, but
919 * not too often. We limit reaping rate to once
920 * per second. Reaplimit is max rate at which threads can
921 * be freed. Does not impact thread destruction/creation.
922 */
923 t = thread_deathrow;
924 l = lwp_deathrow;
925 thread_deathrow = NULL;
926 lwp_deathrow = NULL;
927 thread_reapcnt = 0;
928 lwp_reapcnt = 0;
929 mutex_exit(&reaplock);
930
931 /*
932 * Guard against race condition in mutex_owner_running:
933 * thread=owner(mutex)
934 * <interrupt>
935 * thread exits mutex
936 * thread exits
937 * thread reaped
938 * thread struct freed
939 * cpu = thread->t_cpu <- BAD POINTER DEREFERENCE.
940 * A cross call to all cpus will cause the interrupt handler
941 * to reset the PC if it is in mutex_owner_running, refreshing
942 * stale thread pointers.
943 */
944 mutex_sync(); /* sync with mutex code */
945 /*
946 * Reap threads
947 */
948 thread_reap_list(t);
949
950 /*
951 * Reap lwps
952 */
953 thread_reap_list(l);
954 delay(hz);
955 }
956 }
957
958 /*
959 * This is called by lwpcreate, etc.() to put a lwp_deathrow thread onto
960 * thread_deathrow. The thread's state is changed already TS_FREE to indicate
961 * that is reapable. The thread already holds the reaplock, and was already
962 * freed.
963 */
964 void
965 reapq_move_lq_to_tq(kthread_t *t)
966 {
967 ASSERT(t->t_state == TS_FREE);
968 ASSERT(MUTEX_HELD(&reaplock));
969 t->t_forw = thread_deathrow;
970 thread_deathrow = t;
971 thread_reapcnt++;
972 if (lwp_reapcnt + thread_reapcnt > reaplimit)
973 cv_signal(&reaper_cv); /* wake the reaper */
974 }
975
976 /*
977 * This is called by resume() to put a zombie thread onto deathrow.
978 * The thread's state is changed to TS_FREE to indicate that is reapable.
979 * This is called from the idle thread so it must not block - just spin.
980 */
981 void
982 reapq_add(kthread_t *t)
983 {
984 mutex_enter(&reaplock);
985
986 /*
987 * lwp_deathrow contains threads with lwp linkage and
988 * swappable thread stacks which have the default stacksize.
989 * These threads' lwps and stacks may be reused by lwp_create().
990 *
991 * Anything else goes on thread_deathrow(), where it will eventually
992 * be thread_free()d.
993 */
994 if (t->t_flag & T_LWPREUSE) {
995 ASSERT(ttolwp(t) != NULL);
996 t->t_forw = lwp_deathrow;
997 lwp_deathrow = t;
998 lwp_reapcnt++;
999 } else {
1000 t->t_forw = thread_deathrow;
1001 thread_deathrow = t;
1002 thread_reapcnt++;
1003 }
1004 if (lwp_reapcnt + thread_reapcnt > reaplimit)
1005 cv_signal(&reaper_cv); /* wake the reaper */
1006 t->t_state = TS_FREE;
1007 lock_clear(&t->t_lock);
1008
1009 /*
1010 * Before we return, we need to grab and drop the thread lock for
1011 * the dead thread. At this point, the current thread is the idle
1012 * thread, and the dead thread's CPU lock points to the current
1013 * CPU -- and we must grab and drop the lock to synchronize with
1014 * a racing thread walking a blocking chain that the zombie thread
1015 * was recently in. By this point, that blocking chain is (by
1016 * definition) stale: the dead thread is not holding any locks, and
1017 * is therefore not in any blocking chains -- but if we do not regrab
1018 * our lock before freeing the dead thread's data structures, the
1019 * thread walking the (stale) blocking chain will die on memory
1020 * corruption when it attempts to drop the dead thread's lock. We
1021 * only need do this once because there is no way for the dead thread
1022 * to ever again be on a blocking chain: once we have grabbed and
1023 * dropped the thread lock, we are guaranteed that anyone that could
1024 * have seen this thread in a blocking chain can no longer see it.
1025 */
1026 thread_lock(t);
1027 thread_unlock(t);
1028
1029 mutex_exit(&reaplock);
1030 }
1031
1032 /*
1033 * Install thread context ops for the current thread.
1034 */
1035 void
1036 installctx(
1037 kthread_t *t,
1038 void *arg,
1039 void (*save)(void *),
1040 void (*restore)(void *),
1041 void (*fork)(void *, void *),
1042 void (*lwp_create)(void *, void *),
1043 void (*exit)(void *),
1044 void (*free)(void *, int))
1045 {
1046 struct ctxop *ctx;
1047
1048 ctx = kmem_alloc(sizeof (struct ctxop), KM_SLEEP);
1049 ctx->save_op = save;
1050 ctx->restore_op = restore;
1051 ctx->fork_op = fork;
1052 ctx->lwp_create_op = lwp_create;
1053 ctx->exit_op = exit;
1054 ctx->free_op = free;
1055 ctx->arg = arg;
1056 ctx->save_ts = 0;
1057 ctx->restore_ts = 0;
1058
1059 /*
1060 * Keep ctxops in a doubly-linked list to allow traversal in both
1061 * directions. Using only the newest-to-oldest ordering was adequate
1062 * previously, but reversing the order for restore_op actions is
1063 * necessary if later-added ctxops depends on earlier ones.
1064 *
1065 * One example of such a dependency: Hypervisor software handling the
1066 * guest FPU expects that it save FPU state prior to host FPU handling
1067 * and consequently handle the guest logic _after_ the host FPU has
1068 * been restored.
1069 *
1070 * The t_ctx member points to the most recently added ctxop or is NULL
1071 * if no ctxops are associated with the thread. The 'next' pointers
1072 * form a loop of the ctxops in newest-to-oldest order. The 'prev'
1073 * pointers form a loop in the reverse direction, where t_ctx->prev is
1074 * the oldest entry associated with the thread.
1075 *
1076 * The protection of kpreempt_disable is required to safely perform the
1077 * list insertion, since there are inconsistent states between some of
1078 * the pointer assignments.
1079 */
1080 kpreempt_disable();
1081 if (t->t_ctx == NULL) {
1082 ctx->next = ctx;
1083 ctx->prev = ctx;
1084 } else {
1085 struct ctxop *head = t->t_ctx, *tail = t->t_ctx->prev;
1086
1087 ctx->next = head;
1088 ctx->prev = tail;
1089 head->prev = ctx;
1090 tail->next = ctx;
1091 }
1092 t->t_ctx = ctx;
1093 kpreempt_enable();
1094 }
1095
1096 /*
1097 * Remove the thread context ops from a thread.
1098 */
1099 int
1100 removectx(
1101 kthread_t *t,
1102 void *arg,
1103 void (*save)(void *),
1104 void (*restore)(void *),
1105 void (*fork)(void *, void *),
1106 void (*lwp_create)(void *, void *),
1107 void (*exit)(void *),
1108 void (*free)(void *, int))
1109 {
1110 struct ctxop *ctx, *head;
1111
1112 /*
1113 * The incoming kthread_t (which is the thread for which the
1114 * context ops will be removed) should be one of the following:
1115 *
1116 * a) the current thread,
1117 *
1118 * b) a thread of a process that's being forked (SIDL),
1119 *
1120 * c) a thread that belongs to the same process as the current
1121 * thread and for which the current thread is the agent thread,
1122 *
1123 * d) a thread that is TS_STOPPED which is indicative of it
1124 * being (if curthread is not an agent) a thread being created
1125 * as part of an lwp creation.
1126 */
1127 ASSERT(t == curthread || ttoproc(t)->p_stat == SIDL ||
1128 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1129
1130 /*
1131 * Serialize modifications to t->t_ctx to prevent the agent thread
1132 * and the target thread from racing with each other during lwp exit.
1133 */
1134 mutex_enter(&t->t_ctx_lock);
1135 kpreempt_disable();
1136
1137 if (t->t_ctx == NULL) {
1138 mutex_exit(&t->t_ctx_lock);
1139 kpreempt_enable();
1140 return (0);
1141 }
1142
1143 ctx = head = t->t_ctx;
1144 do {
1145 if (ctx->save_op == save && ctx->restore_op == restore &&
1146 ctx->fork_op == fork && ctx->lwp_create_op == lwp_create &&
1147 ctx->exit_op == exit && ctx->free_op == free &&
1148 ctx->arg == arg) {
1149 ctx->prev->next = ctx->next;
1150 ctx->next->prev = ctx->prev;
1151 if (ctx->next == ctx) {
1152 /* last remaining item */
1153 t->t_ctx = NULL;
1154 } else if (ctx == t->t_ctx) {
1155 /* fix up head of list */
1156 t->t_ctx = ctx->next;
1157 }
1158 ctx->next = ctx->prev = NULL;
1159
1160 mutex_exit(&t->t_ctx_lock);
1161 if (ctx->free_op != NULL)
1162 (ctx->free_op)(ctx->arg, 0);
1163 kmem_free(ctx, sizeof (struct ctxop));
1164 kpreempt_enable();
1165 return (1);
1166 }
1167
1168 ctx = ctx->next;
1169 } while (ctx != head);
1170
1171 mutex_exit(&t->t_ctx_lock);
1172 kpreempt_enable();
1173 return (0);
1174 }
1175
1176 void
1177 savectx(kthread_t *t)
1178 {
1179 ASSERT(t == curthread);
1180
1181 if (t->t_ctx != NULL) {
1182 struct ctxop *ctx, *head;
1183
1184 /* Forward traversal */
1185 ctx = head = t->t_ctx;
1186 do {
1187 if (ctx->save_op != NULL) {
1188 ctx->save_ts = gethrtime_unscaled();
1189 (ctx->save_op)(ctx->arg);
1190 }
1191 ctx = ctx->next;
1192 } while (ctx != head);
1193 }
1194 }
1195
1196 void
1197 restorectx(kthread_t *t)
1198 {
1199 ASSERT(t == curthread);
1200
1201 if (t->t_ctx != NULL) {
1202 struct ctxop *ctx, *tail;
1203
1204 /* Backward traversal (starting at the tail) */
1205 ctx = tail = t->t_ctx->prev;
1206 do {
1207 if (ctx->restore_op != NULL) {
1208 ctx->restore_ts = gethrtime_unscaled();
1209 (ctx->restore_op)(ctx->arg);
1210 }
1211 ctx = ctx->prev;
1212 } while (ctx != tail);
1213 }
1214 }
1215
1216 void
1217 forkctx(kthread_t *t, kthread_t *ct)
1218 {
1219 if (t->t_ctx != NULL) {
1220 struct ctxop *ctx, *head;
1221
1222 /* Forward traversal */
1223 ctx = head = t->t_ctx;
1224 do {
1225 if (ctx->fork_op != NULL) {
1226 (ctx->fork_op)(t, ct);
1227 }
1228 ctx = ctx->next;
1229 } while (ctx != head);
1230 }
1231 }
1232
1233 /*
1234 * Note that this operator is only invoked via the _lwp_create
1235 * system call. The system may have other reasons to create lwps
1236 * e.g. the agent lwp or the doors unreferenced lwp.
1237 */
1238 void
1239 lwp_createctx(kthread_t *t, kthread_t *ct)
1240 {
1241 if (t->t_ctx != NULL) {
1242 struct ctxop *ctx, *head;
1243
1244 /* Forward traversal */
1245 ctx = head = t->t_ctx;
1246 do {
1247 if (ctx->lwp_create_op != NULL) {
1248 (ctx->lwp_create_op)(t, ct);
1249 }
1250 ctx = ctx->next;
1251 } while (ctx != head);
1252 }
1253 }
1254
1255 /*
1256 * exitctx is called from thread_exit() and lwp_exit() to perform any actions
1257 * needed when the thread/LWP leaves the processor for the last time. This
1258 * routine is not intended to deal with freeing memory; freectx() is used for
1259 * that purpose during thread_free(). This routine is provided to allow for
1260 * clean-up that can't wait until thread_free().
1261 */
1262 void
1263 exitctx(kthread_t *t)
1264 {
1265 if (t->t_ctx != NULL) {
1266 struct ctxop *ctx, *head;
1267
1268 /* Forward traversal */
1269 ctx = head = t->t_ctx;
1270 do {
1271 if (ctx->exit_op != NULL) {
1272 (ctx->exit_op)(t);
1273 }
1274 ctx = ctx->next;
1275 } while (ctx != head);
1276 }
1277 }
1278
1279 /*
1280 * freectx is called from thread_free() and exec() to get
1281 * rid of old thread context ops.
1282 */
1283 void
1284 freectx(kthread_t *t, int isexec)
1285 {
1286 kpreempt_disable();
1287 if (t->t_ctx != NULL) {
1288 struct ctxop *ctx, *head;
1289
1290 ctx = head = t->t_ctx;
1291 t->t_ctx = NULL;
1292 do {
1293 struct ctxop *next = ctx->next;
1294
1295 if (ctx->free_op != NULL) {
1296 (ctx->free_op)(ctx->arg, isexec);
1297 }
1298 kmem_free(ctx, sizeof (struct ctxop));
1299 ctx = next;
1300 } while (ctx != head);
1301 }
1302 kpreempt_enable();
1303 }
1304
1305 /*
1306 * freectx_ctx is called from lwp_create() when lwp is reused from
1307 * lwp_deathrow and its thread structure is added to thread_deathrow.
1308 * The thread structure to which this ctx was attached may be already
1309 * freed by the thread reaper so free_op implementations shouldn't rely
1310 * on thread structure to which this ctx was attached still being around.
1311 */
1312 void
1313 freectx_ctx(struct ctxop *ctx)
1314 {
1315 struct ctxop *head = ctx;
1316
1317 ASSERT(ctx != NULL);
1318
1319 kpreempt_disable();
1320
1321 head = ctx;
1322 do {
1323 struct ctxop *next = ctx->next;
1324
1325 if (ctx->free_op != NULL) {
1326 (ctx->free_op)(ctx->arg, 0);
1327 }
1328 kmem_free(ctx, sizeof (struct ctxop));
1329 ctx = next;
1330 } while (ctx != head);
1331 kpreempt_enable();
1332 }
1333
1334 /*
1335 * Set the thread running; arrange for it to be swapped in if necessary.
1336 */
1337 void
1338 setrun_locked(kthread_t *t)
1339 {
1340 ASSERT(THREAD_LOCK_HELD(t));
1341 if (t->t_state == TS_SLEEP) {
1342 /*
1343 * Take off sleep queue.
1344 */
1345 SOBJ_UNSLEEP(t->t_sobj_ops, t);
1346 } else if (t->t_state & (TS_RUN | TS_ONPROC)) {
1347 /*
1348 * Already on dispatcher queue.
1349 */
1350 return;
1351 } else if (t->t_state == TS_WAIT) {
1352 waitq_setrun(t);
1353 } else if (t->t_state == TS_STOPPED) {
1354 /*
1355 * All of the sending of SIGCONT (TC_XSTART) and /proc
1356 * (TC_PSTART) and lwp_continue() (TC_CSTART) must have
1357 * requested that the thread be run.
1358 * Just calling setrun() is not sufficient to set a stopped
1359 * thread running. TP_TXSTART is always set if the thread
1360 * is not stopped by a jobcontrol stop signal.
1361 * TP_TPSTART is always set if /proc is not controlling it.
1362 * TP_TCSTART is always set if lwp_suspend() didn't stop it.
1363 * The thread won't be stopped unless one of these
1364 * three mechanisms did it.
1365 *
1366 * These flags must be set before calling setrun_locked(t).
1367 * They can't be passed as arguments because the streams
1368 * code calls setrun() indirectly and the mechanism for
1369 * doing so admits only one argument. Note that the
1370 * thread must be locked in order to change t_schedflags.
1371 */
1372 if ((t->t_schedflag & TS_ALLSTART) != TS_ALLSTART)
1373 return;
1374 /*
1375 * Process is no longer stopped (a thread is running).
1376 */
1377 t->t_whystop = 0;
1378 t->t_whatstop = 0;
1379 /*
1380 * Strictly speaking, we do not have to clear these
1381 * flags here; they are cleared on entry to stop().
1382 * However, they are confusing when doing kernel
1383 * debugging or when they are revealed by ps(1).
1384 */
1385 t->t_schedflag &= ~TS_ALLSTART;
1386 THREAD_TRANSITION(t); /* drop stopped-thread lock */
1387 ASSERT(t->t_lockp == &transition_lock);
1388 ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL);
1389 /*
1390 * Let the class put the process on the dispatcher queue.
1391 */
1392 CL_SETRUN(t);
1393 }
1394 }
1395
1396 void
1397 setrun(kthread_t *t)
1398 {
1399 thread_lock(t);
1400 setrun_locked(t);
1401 thread_unlock(t);
1402 }
1403
1404 /*
1405 * Unpin an interrupted thread.
1406 * When an interrupt occurs, the interrupt is handled on the stack
1407 * of an interrupt thread, taken from a pool linked to the CPU structure.
1408 *
1409 * When swtch() is switching away from an interrupt thread because it
1410 * blocked or was preempted, this routine is called to complete the
1411 * saving of the interrupted thread state, and returns the interrupted
1412 * thread pointer so it may be resumed.
1413 *
1414 * Called by swtch() only at high spl.
1415 */
1416 kthread_t *
1417 thread_unpin()
1418 {
1419 kthread_t *t = curthread; /* current thread */
1420 kthread_t *itp; /* interrupted thread */
1421 int i; /* interrupt level */
1422 extern int intr_passivate();
1423
1424 ASSERT(t->t_intr != NULL);
1425
1426 itp = t->t_intr; /* interrupted thread */
1427 t->t_intr = NULL; /* clear interrupt ptr */
1428
1429 smt_end_intr();
1430
1431 /*
1432 * Get state from interrupt thread for the one
1433 * it interrupted.
1434 */
1435
1436 i = intr_passivate(t, itp);
1437
1438 TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE,
1439 "intr_passivate:level %d curthread %p (%T) ithread %p (%T)",
1440 i, t, t, itp, itp);
1441
1442 /*
1443 * Dissociate the current thread from the interrupted thread's LWP.
1444 */
1445 t->t_lwp = NULL;
1446
1447 /*
1448 * Interrupt handlers above the level that spinlocks block must
1449 * not block.
1450 */
1451 #if DEBUG
1452 if (i < 0 || i > LOCK_LEVEL)
1453 cmn_err(CE_PANIC, "thread_unpin: ipl out of range %x", i);
1454 #endif
1455
1456 /*
1457 * Compute the CPU's base interrupt level based on the active
1458 * interrupts.
1459 */
1460 ASSERT(CPU->cpu_intr_actv & (1 << i));
1461 set_base_spl();
1462
1463 return (itp);
1464 }
1465
1466 /*
1467 * Create and initialize an interrupt thread.
1468 * Returns non-zero on error.
1469 * Called at spl7() or better.
1470 */
1471 void
1472 thread_create_intr(struct cpu *cp)
1473 {
1474 kthread_t *tp;
1475
1476 tp = thread_create(NULL, 0,
1477 (void (*)())thread_create_intr, NULL, 0, &p0, TS_ONPROC, 0);
1478
1479 /*
1480 * Set the thread in the TS_FREE state. The state will change
1481 * to TS_ONPROC only while the interrupt is active. Think of these
1482 * as being on a private free list for the CPU. Being TS_FREE keeps
1483 * inactive interrupt threads out of debugger thread lists.
1484 *
1485 * We cannot call thread_create with TS_FREE because of the current
1486 * checks there for ONPROC. Fix this when thread_create takes flags.
1487 */
1488 THREAD_FREEINTR(tp, cp);
1489
1490 /*
1491 * Nobody should ever reference the credentials of an interrupt
1492 * thread so make it NULL to catch any such references.
1493 */
1494 tp->t_cred = NULL;
1495 tp->t_flag |= T_INTR_THREAD;
1496 tp->t_cpu = cp;
1497 tp->t_bound_cpu = cp;
1498 tp->t_disp_queue = cp->cpu_disp;
1499 tp->t_affinitycnt = 1;
1500 tp->t_preempt = 1;
1501
1502 /*
1503 * Don't make a user-requested binding on this thread so that
1504 * the processor can be offlined.
1505 */
1506 tp->t_bind_cpu = PBIND_NONE; /* no USER-requested binding */
1507 tp->t_bind_pset = PS_NONE;
1508
1509 #if defined(__i386) || defined(__amd64)
1510 tp->t_stk -= STACK_ALIGN;
1511 *(tp->t_stk) = 0; /* terminate intr thread stack */
1512 #endif
1513
1514 /*
1515 * Link onto CPU's interrupt pool.
1516 */
1517 tp->t_link = cp->cpu_intr_thread;
1518 cp->cpu_intr_thread = tp;
1519 }
1520
1521 /*
1522 * TSD -- THREAD SPECIFIC DATA
1523 */
1524 static kmutex_t tsd_mutex; /* linked list spin lock */
1525 static uint_t tsd_nkeys; /* size of destructor array */
1526 /* per-key destructor funcs */
1527 static void (**tsd_destructor)(void *);
1528 /* list of tsd_thread's */
1529 static struct tsd_thread *tsd_list;
1530
1531 /*
1532 * Default destructor
1533 * Needed because NULL destructor means that the key is unused
1534 */
1535 /* ARGSUSED */
1536 void
1537 tsd_defaultdestructor(void *value)
1538 {}
1539
1540 /*
1541 * Create a key (index into per thread array)
1542 * Locks out tsd_create, tsd_destroy, and tsd_exit
1543 * May allocate memory with lock held
1544 */
1545 void
1546 tsd_create(uint_t *keyp, void (*destructor)(void *))
1547 {
1548 int i;
1549 uint_t nkeys;
1550
1551 /*
1552 * if key is allocated, do nothing
1553 */
1554 mutex_enter(&tsd_mutex);
1555 if (*keyp) {
1556 mutex_exit(&tsd_mutex);
1557 return;
1558 }
1559 /*
1560 * find an unused key
1561 */
1562 if (destructor == NULL)
1563 destructor = tsd_defaultdestructor;
1564
1565 for (i = 0; i < tsd_nkeys; ++i)
1566 if (tsd_destructor[i] == NULL)
1567 break;
1568
1569 /*
1570 * if no unused keys, increase the size of the destructor array
1571 */
1572 if (i == tsd_nkeys) {
1573 if ((nkeys = (tsd_nkeys << 1)) == 0)
1574 nkeys = 1;
1575 tsd_destructor =
1576 (void (**)(void *))tsd_realloc((void *)tsd_destructor,
1577 (size_t)(tsd_nkeys * sizeof (void (*)(void *))),
1578 (size_t)(nkeys * sizeof (void (*)(void *))));
1579 tsd_nkeys = nkeys;
1580 }
1581
1582 /*
1583 * allocate the next available unused key
1584 */
1585 tsd_destructor[i] = destructor;
1586 *keyp = i + 1;
1587 mutex_exit(&tsd_mutex);
1588 }
1589
1590 /*
1591 * Destroy a key -- this is for unloadable modules
1592 *
1593 * Assumes that the caller is preventing tsd_set and tsd_get
1594 * Locks out tsd_create, tsd_destroy, and tsd_exit
1595 * May free memory with lock held
1596 */
1597 void
1598 tsd_destroy(uint_t *keyp)
1599 {
1600 uint_t key;
1601 struct tsd_thread *tsd;
1602
1603 /*
1604 * protect the key namespace and our destructor lists
1605 */
1606 mutex_enter(&tsd_mutex);
1607 key = *keyp;
1608 *keyp = 0;
1609
1610 ASSERT(key <= tsd_nkeys);
1611
1612 /*
1613 * if the key is valid
1614 */
1615 if (key != 0) {
1616 uint_t k = key - 1;
1617 /*
1618 * for every thread with TSD, call key's destructor
1619 */
1620 for (tsd = tsd_list; tsd; tsd = tsd->ts_next) {
1621 /*
1622 * no TSD for key in this thread
1623 */
1624 if (key > tsd->ts_nkeys)
1625 continue;
1626 /*
1627 * call destructor for key
1628 */
1629 if (tsd->ts_value[k] && tsd_destructor[k])
1630 (*tsd_destructor[k])(tsd->ts_value[k]);
1631 /*
1632 * reset value for key
1633 */
1634 tsd->ts_value[k] = NULL;
1635 }
1636 /*
1637 * actually free the key (NULL destructor == unused)
1638 */
1639 tsd_destructor[k] = NULL;
1640 }
1641
1642 mutex_exit(&tsd_mutex);
1643 }
1644
1645 /*
1646 * Quickly return the per thread value that was stored with the specified key
1647 * Assumes the caller is protecting key from tsd_create and tsd_destroy
1648 */
1649 void *
1650 tsd_get(uint_t key)
1651 {
1652 return (tsd_agent_get(curthread, key));
1653 }
1654
1655 /*
1656 * Set a per thread value indexed with the specified key
1657 */
1658 int
1659 tsd_set(uint_t key, void *value)
1660 {
1661 return (tsd_agent_set(curthread, key, value));
1662 }
1663
1664 /*
1665 * Like tsd_get(), except that the agent lwp can get the tsd of
1666 * another thread in the same process (the agent thread only runs when the
1667 * process is completely stopped by /proc), or syslwp is creating a new lwp.
1668 */
1669 void *
1670 tsd_agent_get(kthread_t *t, uint_t key)
1671 {
1672 struct tsd_thread *tsd = t->t_tsd;
1673
1674 ASSERT(t == curthread ||
1675 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1676
1677 if (key && tsd != NULL && key <= tsd->ts_nkeys)
1678 return (tsd->ts_value[key - 1]);
1679 return (NULL);
1680 }
1681
1682 /*
1683 * Like tsd_set(), except that the agent lwp can set the tsd of
1684 * another thread in the same process, or syslwp can set the tsd
1685 * of a thread it's in the middle of creating.
1686 *
1687 * Assumes the caller is protecting key from tsd_create and tsd_destroy
1688 * May lock out tsd_destroy (and tsd_create), may allocate memory with
1689 * lock held
1690 */
1691 int
1692 tsd_agent_set(kthread_t *t, uint_t key, void *value)
1693 {
1694 struct tsd_thread *tsd = t->t_tsd;
1695
1696 ASSERT(t == curthread ||
1697 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1698
1699 if (key == 0)
1700 return (EINVAL);
1701 if (tsd == NULL)
1702 tsd = t->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP);
1703 if (key <= tsd->ts_nkeys) {
1704 tsd->ts_value[key - 1] = value;
1705 return (0);
1706 }
1707
1708 ASSERT(key <= tsd_nkeys);
1709
1710 /*
1711 * lock out tsd_destroy()
1712 */
1713 mutex_enter(&tsd_mutex);
1714 if (tsd->ts_nkeys == 0) {
1715 /*
1716 * Link onto list of threads with TSD
1717 */
1718 if ((tsd->ts_next = tsd_list) != NULL)
1719 tsd_list->ts_prev = tsd;
1720 tsd_list = tsd;
1721 }
1722
1723 /*
1724 * Allocate thread local storage and set the value for key
1725 */
1726 tsd->ts_value = tsd_realloc(tsd->ts_value,
1727 tsd->ts_nkeys * sizeof (void *),
1728 key * sizeof (void *));
1729 tsd->ts_nkeys = key;
1730 tsd->ts_value[key - 1] = value;
1731 mutex_exit(&tsd_mutex);
1732
1733 return (0);
1734 }
1735
1736
1737 /*
1738 * Return the per thread value that was stored with the specified key
1739 * If necessary, create the key and the value
1740 * Assumes the caller is protecting *keyp from tsd_destroy
1741 */
1742 void *
1743 tsd_getcreate(uint_t *keyp, void (*destroy)(void *), void *(*allocate)(void))
1744 {
1745 void *value;
1746 uint_t key = *keyp;
1747 struct tsd_thread *tsd = curthread->t_tsd;
1748
1749 if (tsd == NULL)
1750 tsd = curthread->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP);
1751 if (key && key <= tsd->ts_nkeys && (value = tsd->ts_value[key - 1]))
1752 return (value);
1753 if (key == 0)
1754 tsd_create(keyp, destroy);
1755 (void) tsd_set(*keyp, value = (*allocate)());
1756
1757 return (value);
1758 }
1759
1760 /*
1761 * Called from thread_exit() to run the destructor function for each tsd
1762 * Locks out tsd_create and tsd_destroy
1763 * Assumes that the destructor *DOES NOT* use tsd
1764 */
1765 void
1766 tsd_exit(void)
1767 {
1768 int i;
1769 struct tsd_thread *tsd = curthread->t_tsd;
1770
1771 if (tsd == NULL)
1772 return;
1773
1774 if (tsd->ts_nkeys == 0) {
1775 kmem_free(tsd, sizeof (*tsd));
1776 curthread->t_tsd = NULL;
1777 return;
1778 }
1779
1780 /*
1781 * lock out tsd_create and tsd_destroy, call
1782 * the destructor, and mark the value as destroyed.
1783 */
1784 mutex_enter(&tsd_mutex);
1785
1786 for (i = 0; i < tsd->ts_nkeys; i++) {
1787 if (tsd->ts_value[i] && tsd_destructor[i])
1788 (*tsd_destructor[i])(tsd->ts_value[i]);
1789 tsd->ts_value[i] = NULL;
1790 }
1791
1792 /*
1793 * remove from linked list of threads with TSD
1794 */
1795 if (tsd->ts_next)
1796 tsd->ts_next->ts_prev = tsd->ts_prev;
1797 if (tsd->ts_prev)
1798 tsd->ts_prev->ts_next = tsd->ts_next;
1799 if (tsd_list == tsd)
1800 tsd_list = tsd->ts_next;
1801
1802 mutex_exit(&tsd_mutex);
1803
1804 /*
1805 * free up the TSD
1806 */
1807 kmem_free(tsd->ts_value, tsd->ts_nkeys * sizeof (void *));
1808 kmem_free(tsd, sizeof (struct tsd_thread));
1809 curthread->t_tsd = NULL;
1810 }
1811
1812 /*
1813 * realloc
1814 */
1815 static void *
1816 tsd_realloc(void *old, size_t osize, size_t nsize)
1817 {
1818 void *new;
1819
1820 new = kmem_zalloc(nsize, KM_SLEEP);
1821 if (old) {
1822 bcopy(old, new, osize);
1823 kmem_free(old, osize);
1824 }
1825 return (new);
1826 }
1827
1828 /*
1829 * Return non-zero if an interrupt is being serviced.
1830 */
1831 int
1832 servicing_interrupt()
1833 {
1834 int onintr = 0;
1835
1836 /* Are we an interrupt thread */
1837 if (curthread->t_flag & T_INTR_THREAD)
1838 return (1);
1839 /* Are we servicing a high level interrupt? */
1840 if (CPU_ON_INTR(CPU)) {
1841 kpreempt_disable();
1842 onintr = CPU_ON_INTR(CPU);
1843 kpreempt_enable();
1844 }
1845 return (onintr);
1846 }
1847
1848
1849 /*
1850 * Change the dispatch priority of a thread in the system.
1851 * Used when raising or lowering a thread's priority.
1852 * (E.g., priority inheritance)
1853 *
1854 * Since threads are queued according to their priority, we
1855 * we must check the thread's state to determine whether it
1856 * is on a queue somewhere. If it is, we've got to:
1857 *
1858 * o Dequeue the thread.
1859 * o Change its effective priority.
1860 * o Enqueue the thread.
1861 *
1862 * Assumptions: The thread whose priority we wish to change
1863 * must be locked before we call thread_change_(e)pri().
1864 * The thread_change(e)pri() function doesn't drop the thread
1865 * lock--that must be done by its caller.
1866 */
1867 void
1868 thread_change_epri(kthread_t *t, pri_t disp_pri)
1869 {
1870 uint_t state;
1871
1872 ASSERT(THREAD_LOCK_HELD(t));
1873
1874 /*
1875 * If the inherited priority hasn't actually changed,
1876 * just return.
1877 */
1878 if (t->t_epri == disp_pri)
1879 return;
1880
1881 state = t->t_state;
1882
1883 /*
1884 * If it's not on a queue, change the priority with impunity.
1885 */
1886 if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) {
1887 t->t_epri = disp_pri;
1888 if (state == TS_ONPROC) {
1889 cpu_t *cp = t->t_disp_queue->disp_cpu;
1890
1891 if (t == cp->cpu_dispthread)
1892 cp->cpu_dispatch_pri = DISP_PRIO(t);
1893 }
1894 } else if (state == TS_SLEEP) {
1895 /*
1896 * Take the thread out of its sleep queue.
1897 * Change the inherited priority.
1898 * Re-enqueue the thread.
1899 * Each synchronization object exports a function
1900 * to do this in an appropriate manner.
1901 */
1902 SOBJ_CHANGE_EPRI(t->t_sobj_ops, t, disp_pri);
1903 } else if (state == TS_WAIT) {
1904 /*
1905 * Re-enqueue a thread on the wait queue if its
1906 * effective priority needs to change.
1907 */
1908 if (disp_pri != t->t_epri)
1909 waitq_change_pri(t, disp_pri);
1910 } else {
1911 /*
1912 * The thread is on a run queue.
1913 * Note: setbackdq() may not put the thread
1914 * back on the same run queue where it originally
1915 * resided.
1916 */
1917 (void) dispdeq(t);
1918 t->t_epri = disp_pri;
1919 setbackdq(t);
1920 }
1921 schedctl_set_cidpri(t);
1922 }
1923
1924 /*
1925 * Function: Change the t_pri field of a thread.
1926 * Side Effects: Adjust the thread ordering on a run queue
1927 * or sleep queue, if necessary.
1928 * Returns: 1 if the thread was on a run queue, else 0.
1929 */
1930 int
1931 thread_change_pri(kthread_t *t, pri_t disp_pri, int front)
1932 {
1933 uint_t state;
1934 int on_rq = 0;
1935
1936 ASSERT(THREAD_LOCK_HELD(t));
1937
1938 state = t->t_state;
1939 THREAD_WILLCHANGE_PRI(t, disp_pri);
1940
1941 /*
1942 * If it's not on a queue, change the priority with impunity.
1943 */
1944 if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) {
1945 t->t_pri = disp_pri;
1946
1947 if (state == TS_ONPROC) {
1948 cpu_t *cp = t->t_disp_queue->disp_cpu;
1949
1950 if (t == cp->cpu_dispthread)
1951 cp->cpu_dispatch_pri = DISP_PRIO(t);
1952 }
1953 } else if (state == TS_SLEEP) {
1954 /*
1955 * If the priority has changed, take the thread out of
1956 * its sleep queue and change the priority.
1957 * Re-enqueue the thread.
1958 * Each synchronization object exports a function
1959 * to do this in an appropriate manner.
1960 */
1961 if (disp_pri != t->t_pri)
1962 SOBJ_CHANGE_PRI(t->t_sobj_ops, t, disp_pri);
1963 } else if (state == TS_WAIT) {
1964 /*
1965 * Re-enqueue a thread on the wait queue if its
1966 * priority needs to change.
1967 */
1968 if (disp_pri != t->t_pri)
1969 waitq_change_pri(t, disp_pri);
1970 } else {
1971 /*
1972 * The thread is on a run queue.
1973 * Note: setbackdq() may not put the thread
1974 * back on the same run queue where it originally
1975 * resided.
1976 *
1977 * We still requeue the thread even if the priority
1978 * is unchanged to preserve round-robin (and other)
1979 * effects between threads of the same priority.
1980 */
1981 on_rq = dispdeq(t);
1982 ASSERT(on_rq);
1983 t->t_pri = disp_pri;
1984 if (front) {
1985 setfrontdq(t);
1986 } else {
1987 setbackdq(t);
1988 }
1989 }
1990 schedctl_set_cidpri(t);
1991 return (on_rq);
1992 }
1993
1994 /*
1995 * Tunable kmem_stackinfo is set, fill the kernel thread stack with a
1996 * specific pattern.
1997 */
1998 static void
1999 stkinfo_begin(kthread_t *t)
2000 {
2001 caddr_t start; /* stack start */
2002 caddr_t end; /* stack end */
2003 uint64_t *ptr; /* pattern pointer */
2004
2005 /*
2006 * Stack grows up or down, see thread_create(),
2007 * compute stack memory area start and end (start < end).
2008 */
2009 if (t->t_stk > t->t_stkbase) {
2010 /* stack grows down */
2011 start = t->t_stkbase;
2012 end = t->t_stk;
2013 } else {
2014 /* stack grows up */
2015 start = t->t_stk;
2016 end = t->t_stkbase;
2017 }
2018
2019 /*
2020 * Stackinfo pattern size is 8 bytes. Ensure proper 8 bytes
2021 * alignement for start and end in stack area boundaries
2022 * (protection against corrupt t_stkbase/t_stk data).
2023 */
2024 if ((((uintptr_t)start) & 0x7) != 0) {
2025 start = (caddr_t)((((uintptr_t)start) & (~0x7)) + 8);
2026 }
2027 end = (caddr_t)(((uintptr_t)end) & (~0x7));
2028
2029 if ((end <= start) || (end - start) > (1024 * 1024)) {
2030 /* negative or stack size > 1 meg, assume bogus */
2031 return;
2032 }
2033
2034 /* fill stack area with a pattern (instead of zeros) */
2035 ptr = (uint64_t *)((void *)start);
2036 while (ptr < (uint64_t *)((void *)end)) {
2037 *ptr++ = KMEM_STKINFO_PATTERN;
2038 }
2039 }
2040
2041
2042 /*
2043 * Tunable kmem_stackinfo is set, create stackinfo log if doesn't already exist,
2044 * compute the percentage of kernel stack really used, and set in the log
2045 * if it's the latest highest percentage.
2046 */
2047 static void
2048 stkinfo_end(kthread_t *t)
2049 {
2050 caddr_t start; /* stack start */
2051 caddr_t end; /* stack end */
2052 uint64_t *ptr; /* pattern pointer */
2053 size_t stksz; /* stack size */
2054 size_t smallest = 0;
2055 size_t percent = 0;
2056 uint_t index = 0;
2057 uint_t i;
2058 static size_t smallest_percent = (size_t)-1;
2059 static uint_t full = 0;
2060
2061 /* create the stackinfo log, if doesn't already exist */
2062 mutex_enter(&kmem_stkinfo_lock);
2063 if (kmem_stkinfo_log == NULL) {
2064 kmem_stkinfo_log = (kmem_stkinfo_t *)
2065 kmem_zalloc(KMEM_STKINFO_LOG_SIZE *
2066 (sizeof (kmem_stkinfo_t)), KM_NOSLEEP);
2067 if (kmem_stkinfo_log == NULL) {
2068 mutex_exit(&kmem_stkinfo_lock);
2069 return;
2070 }
2071 }
2072 mutex_exit(&kmem_stkinfo_lock);
2073
2074 /*
2075 * Stack grows up or down, see thread_create(),
2076 * compute stack memory area start and end (start < end).
2077 */
2078 if (t->t_stk > t->t_stkbase) {
2079 /* stack grows down */
2080 start = t->t_stkbase;
2081 end = t->t_stk;
2082 } else {
2083 /* stack grows up */
2084 start = t->t_stk;
2085 end = t->t_stkbase;
2086 }
2087
2088 /* stack size as found in kthread_t */
2089 stksz = end - start;
2090
2091 /*
2092 * Stackinfo pattern size is 8 bytes. Ensure proper 8 bytes
2093 * alignement for start and end in stack area boundaries
2094 * (protection against corrupt t_stkbase/t_stk data).
2095 */
2096 if ((((uintptr_t)start) & 0x7) != 0) {
2097 start = (caddr_t)((((uintptr_t)start) & (~0x7)) + 8);
2098 }
2099 end = (caddr_t)(((uintptr_t)end) & (~0x7));
2100
2101 if ((end <= start) || (end - start) > (1024 * 1024)) {
2102 /* negative or stack size > 1 meg, assume bogus */
2103 return;
2104 }
2105
2106 /* search until no pattern in the stack */
2107 if (t->t_stk > t->t_stkbase) {
2108 /* stack grows down */
2109 #if defined(__i386) || defined(__amd64)
2110 /*
2111 * 6 longs are pushed on stack, see thread_load(). Skip
2112 * them, so if kthread has never run, percent is zero.
2113 * 8 bytes alignement is preserved for a 32 bit kernel,
2114 * 6 x 4 = 24, 24 is a multiple of 8.
2115 *
2116 */
2117 end -= (6 * sizeof (long));
2118 #endif
2119 ptr = (uint64_t *)((void *)start);
2120 while (ptr < (uint64_t *)((void *)end)) {
2121 if (*ptr != KMEM_STKINFO_PATTERN) {
2122 percent = stkinfo_percent(end,
2123 start, (caddr_t)ptr);
2124 break;
2125 }
2126 ptr++;
2127 }
2128 } else {
2129 /* stack grows up */
2130 ptr = (uint64_t *)((void *)end);
2131 ptr--;
2132 while (ptr >= (uint64_t *)((void *)start)) {
2133 if (*ptr != KMEM_STKINFO_PATTERN) {
2134 percent = stkinfo_percent(start,
2135 end, (caddr_t)ptr);
2136 break;
2137 }
2138 ptr--;
2139 }
2140 }
2141
2142 DTRACE_PROBE3(stack__usage, kthread_t *, t,
2143 size_t, stksz, size_t, percent);
2144
2145 if (percent == 0) {
2146 return;
2147 }
2148
2149 mutex_enter(&kmem_stkinfo_lock);
2150 if (full == KMEM_STKINFO_LOG_SIZE && percent < smallest_percent) {
2151 /*
2152 * The log is full and already contains the highest values
2153 */
2154 mutex_exit(&kmem_stkinfo_lock);
2155 return;
2156 }
2157
2158 /* keep a log of the highest used stack */
2159 for (i = 0; i < KMEM_STKINFO_LOG_SIZE; i++) {
2160 if (kmem_stkinfo_log[i].percent == 0) {
2161 index = i;
2162 full++;
2163 break;
2164 }
2165 if (smallest == 0) {
2166 smallest = kmem_stkinfo_log[i].percent;
2167 index = i;
2168 continue;
2169 }
2170 if (kmem_stkinfo_log[i].percent < smallest) {
2171 smallest = kmem_stkinfo_log[i].percent;
2172 index = i;
2173 }
2174 }
2175
2176 if (percent >= kmem_stkinfo_log[index].percent) {
2177 kmem_stkinfo_log[index].kthread = (caddr_t)t;
2178 kmem_stkinfo_log[index].t_startpc = (caddr_t)t->t_startpc;
2179 kmem_stkinfo_log[index].start = start;
2180 kmem_stkinfo_log[index].stksz = stksz;
2181 kmem_stkinfo_log[index].percent = percent;
2182 kmem_stkinfo_log[index].t_tid = t->t_tid;
2183 kmem_stkinfo_log[index].cmd[0] = '\0';
2184 if (t->t_tid != 0) {
2185 stksz = strlen((t->t_procp)->p_user.u_comm);
2186 if (stksz >= KMEM_STKINFO_STR_SIZE) {
2187 stksz = KMEM_STKINFO_STR_SIZE - 1;
2188 kmem_stkinfo_log[index].cmd[stksz] = '\0';
2189 } else {
2190 stksz += 1;
2191 }
2192 (void) memcpy(kmem_stkinfo_log[index].cmd,
2193 (t->t_procp)->p_user.u_comm, stksz);
2194 }
2195 if (percent < smallest_percent) {
2196 smallest_percent = percent;
2197 }
2198 }
2199 mutex_exit(&kmem_stkinfo_lock);
2200 }
2201
2202 /*
2203 * Tunable kmem_stackinfo is set, compute stack utilization percentage.
2204 */
2205 static size_t
2206 stkinfo_percent(caddr_t t_stk, caddr_t t_stkbase, caddr_t sp)
2207 {
2208 size_t percent;
2209 size_t s;
2210
2211 if (t_stk > t_stkbase) {
2212 /* stack grows down */
2213 if (sp > t_stk) {
2214 return (0);
2215 }
2216 if (sp < t_stkbase) {
2217 return (100);
2218 }
2219 percent = t_stk - sp + 1;
2220 s = t_stk - t_stkbase + 1;
2221 } else {
2222 /* stack grows up */
2223 if (sp < t_stk) {
2224 return (0);
2225 }
2226 if (sp > t_stkbase) {
2227 return (100);
2228 }
2229 percent = sp - t_stk + 1;
2230 s = t_stkbase - t_stk + 1;
2231 }
2232 percent = ((100 * percent) / s) + 1;
2233 if (percent > 100) {
2234 percent = 100;
2235 }
2236 return (percent);
2237 }
2238
2239 /*
2240 * NOTE: This will silently truncate a name > THREAD_NAME_MAX - 1 characters
2241 * long. It is expected that callers (acting on behalf of userland clients)
2242 * will perform any required checks to return the correct error semantics.
2243 * It is also expected callers on behalf of userland clients have done
2244 * any necessary permission checks.
2245 */
2246 int
2247 thread_setname(kthread_t *t, const char *name)
2248 {
2249 char *buf = NULL;
2250
2251 /*
2252 * We optimistically assume that a thread's name will only be set
2253 * once and so allocate memory in preparation of setting t_name.
2254 * If it turns out a name has already been set, we just discard (free)
2255 * the buffer we just allocated and reuse the current buffer
2256 * (as all should be THREAD_NAME_MAX large).
2257 *
2258 * Such an arrangement means over the lifetime of a kthread_t, t_name
2259 * is either NULL or has one value (the address of the buffer holding
2260 * the current thread name). The assumption is that most kthread_t
2261 * instances will not have a name assigned, so dynamically allocating
2262 * the memory should minimize the footprint of this feature, but by
2263 * having the buffer persist for the life of the thread, it simplifies
2264 * usage in highly constrained situations (e.g. dtrace).
2265 */
2266 if (name != NULL && name[0] != '\0') {
2267 for (size_t i = 0; name[i] != '\0'; i++) {
2268 if (!isprint(name[i]))
2269 return (EINVAL);
2270 }
2271
2272 buf = kmem_zalloc(THREAD_NAME_MAX, KM_SLEEP);
2273 (void) strlcpy(buf, name, THREAD_NAME_MAX);
2274 }
2275
2276 mutex_enter(&ttoproc(t)->p_lock);
2277 if (t->t_name == NULL) {
2278 t->t_name = buf;
2279 } else {
2280 if (buf != NULL) {
2281 (void) strlcpy(t->t_name, name, THREAD_NAME_MAX);
2282 kmem_free(buf, THREAD_NAME_MAX);
2283 } else {
2284 bzero(t->t_name, THREAD_NAME_MAX);
2285 }
2286 }
2287 mutex_exit(&ttoproc(t)->p_lock);
2288 return (0);
2289 }
2290
2291 int
2292 thread_vsetname(kthread_t *t, const char *fmt, ...)
2293 {
2294 char name[THREAD_NAME_MAX];
2295 va_list va;
2296 int rc;
2297
2298 va_start(va, fmt);
2299 rc = vsnprintf(name, sizeof (name), fmt, va);
2300 va_end(va);
2301
2302 if (rc < 0)
2303 return (EINVAL);
2304
2305 if (rc >= sizeof (name))
2306 return (ENAMETOOLONG);
2307
2308 return (thread_setname(t, name));
2309 }