Print this page
OS-8347 Update SmartOS to support illumos 13917
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/brand/lx/os/lx_misc.c
+++ new/usr/src/uts/common/brand/lx/os/lx_misc.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
|
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 /*
27 - * Copyright 2019 Joyent, Inc.
27 + * Copyright 2022 Joyent, Inc.
28 28 */
29 29
30 30 #include <sys/errno.h>
31 31 #include <sys/systm.h>
32 32 #include <sys/archsystm.h>
33 33 #include <sys/privregs.h>
34 34 #include <sys/exec.h>
35 35 #include <sys/lwp.h>
36 36 #include <sys/sem.h>
37 37 #include <sys/brand.h>
38 38 #include <sys/lx_brand.h>
39 39 #include <sys/lx_misc.h>
40 40 #include <sys/lx_siginfo.h>
41 41 #include <sys/lx_futex.h>
42 42 #include <lx_errno.h>
43 43 #include <sys/lx_userhz.h>
44 44 #include <sys/cmn_err.h>
45 45 #include <sys/siginfo.h>
46 46 #include <sys/contract/process_impl.h>
47 47 #include <sys/x86_archext.h>
48 48 #include <sys/sdt.h>
49 49 #include <lx_signum.h>
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
50 50 #include <lx_syscall.h>
51 51 #include <sys/proc.h>
52 52 #include <sys/procfs.h>
53 53 #include <net/if.h>
54 54 #include <inet/ip6.h>
55 55 #include <sys/sunddi.h>
56 56 #include <sys/dlpi.h>
57 57 #include <sys/sysmacros.h>
58 58
59 59 /* Linux specific functions and definitions */
60 -static void lx_save(klwp_t *);
61 -static void lx_restore(klwp_t *);
60 +static void lx_save(void *);
61 +static void lx_restore(void *);
62 62
63 +/* Context op template. */
64 +static const struct ctxop_template lx_ctxop_template = {
65 + .ct_rev = CTXOP_TPL_REV,
66 + .ct_save = lx_save,
67 + .ct_restore = lx_restore,
68 + .ct_exit = lx_save,
69 +};
70 +
63 71 /*
64 72 * Set the return code for the forked child, always zero
65 73 */
66 74 /*ARGSUSED*/
67 75 void
68 76 lx_setrval(klwp_t *lwp, int v1, int v2)
69 77 {
70 78 lwptoregs(lwp)->r_r0 = 0;
71 79 }
72 80
73 81 /*
74 82 * Reset process state on exec(2)
75 83 */
76 84 void
77 85 lx_exec()
78 86 {
79 87 klwp_t *lwp = ttolwp(curthread);
80 88 struct lx_lwp_data *lwpd = lwptolxlwp(lwp);
81 89 proc_t *p = ttoproc(curthread);
82 90 lx_proc_data_t *pd = ptolxproc(p);
83 91 struct regs *rp = lwptoregs(lwp);
84 92
85 93 /* b_exec is called without p_lock held */
86 94 VERIFY(MUTEX_NOT_HELD(&p->p_lock));
87 95
88 96 /*
89 97 * Any l_handler handlers set as a result of B_REGISTER are now
90 98 * invalid; clear them.
91 99 */
92 100 pd->l_handler = (uintptr_t)NULL;
93 101
94 102 /*
95 103 * If this was a multi-threaded Linux process and this lwp wasn't the
96 104 * main lwp, then we need to make its Illumos and Linux PIDs match.
97 105 */
98 106 if (curthread->t_tid != 1) {
99 107 lx_pid_reassign(curthread);
100 108 }
101 109
102 110 /*
103 111 * Inform ptrace(2) that we are processing an execve(2) call so that if
104 112 * we are traced we can post either the PTRACE_EVENT_EXEC event or the
105 113 * legacy SIGTRAP.
106 114 */
107 115 (void) lx_ptrace_stop_for_option(LX_PTRACE_O_TRACEEXEC, B_FALSE, 0, 0);
108 116
109 117 /* clear the fs/gsbase values until the app. can reinitialize them */
110 118 lwpd->br_lx_fsbase = (uintptr_t)NULL;
111 119 lwpd->br_ntv_fsbase = (uintptr_t)NULL;
112 120 lwpd->br_lx_gsbase = (uintptr_t)NULL;
|
↓ open down ↓ |
40 lines elided |
↑ open up ↑ |
113 121 lwpd->br_ntv_gsbase = (uintptr_t)NULL;
114 122
115 123 /*
116 124 * Clear the native stack flags. This will be reinitialised by
117 125 * lx_init() in the new process image.
118 126 */
119 127 lwpd->br_stack_mode = LX_STACK_MODE_PREINIT;
120 128 lwpd->br_ntv_stack = 0;
121 129 lwpd->br_ntv_stack_current = 0;
122 130
123 - installctx(lwptot(lwp), lwp, lx_save, lx_restore, NULL, NULL, lx_save,
124 - NULL, NULL);
131 + ctxop_install(lwptot(lwp), &lx_ctxop_template, lwp);
125 132
126 133 /*
127 134 * clear out the tls array
128 135 */
129 136 bzero(lwpd->br_tls, sizeof (lwpd->br_tls));
130 137
131 138 /*
132 139 * reset the tls entries in the gdt
133 140 */
134 141 kpreempt_disable();
135 142 lx_restore(lwp);
136 143 kpreempt_enable();
137 144
138 145 /*
139 146 * The exec syscall doesn't return (so we don't call lx_syscall_return)
140 147 * but for our ptrace emulation we need to do this so that a tracer
141 148 * does not get out of sync. We know that by the time this lx_exec
142 149 * function is called that the exec has succeeded.
143 150 */
144 151 rp->r_r0 = 0;
145 152 (void) lx_ptrace_stop(LX_PR_SYSEXIT);
146 153 }
147 154
148 155 static void
149 156 lx_cleanlwp(klwp_t *lwp, proc_t *p)
150 157 {
151 158 struct lx_lwp_data *lwpd = lwptolxlwp(lwp);
152 159 void *rb_list = NULL;
153 160
154 161 VERIFY(lwpd != NULL);
155 162
156 163 mutex_enter(&p->p_lock);
157 164 if ((lwpd->br_ptrace_flags & LX_PTF_EXITING) == 0) {
158 165 lx_ptrace_exit(p, lwp);
159 166 }
160 167
161 168 /*
162 169 * While we have p_lock, clear the TP_KTHREAD flag. This is needed
163 170 * to prevent races within lx procfs. It's fine for prchoose() to pick
164 171 * this thread now since it is exiting and no longer blocked in the
165 172 * kernel.
166 173 */
167 174 lwptot(lwp)->t_proc_flag &= ~TP_KTHREAD;
168 175
169 176 /*
170 177 * While we have p_lock, safely grab any robust_list references and
171 178 * clear the lwp field.
172 179 */
173 180 sprlock_proc(p);
174 181 rb_list = lwpd->br_robust_list;
175 182 lwpd->br_robust_list = NULL;
176 183 sprunlock(p);
177 184
178 185 if (rb_list != NULL) {
179 186 lx_futex_robust_exit((uintptr_t)rb_list, lwpd->br_pid);
180 187 }
181 188
182 189 /*
183 190 * We need to run our context exit operation (lx_save) here to ensure
184 191 * we don't leave any garbage around. This is necessary to handle the
185 192 * following calling sequence:
186 193 * exit -> proc_exit -> lx_freelwp -> removectx
187 194 * That is, when our branded process exits, proc_exit will call our
188 195 * lx_freelwp brand hook which does call this function (lx_cleanlwp),
189 196 * but lx_freelwp also removes our context exit operation. The context
190 197 * exit functions are run by exitctx, which is called by either
191 198 * lwp_exit or thread_exit. The thread_exit function is called at the
192 199 * end of proc_exit when we'll swtch() to another thread, but by then
193 200 * our context exit function has been removed.
194 201 *
195 202 * It's ok if this function happens to be called more than once (for
196 203 * example, if we exec a native binary).
197 204 */
198 205 kpreempt_disable();
199 206 lx_save(lwp);
200 207 kpreempt_enable();
201 208 }
202 209
203 210 void
204 211 lx_exitlwp(klwp_t *lwp)
205 212 {
206 213 struct lx_lwp_data *lwpd = lwptolxlwp(lwp);
207 214 proc_t *p = lwptoproc(lwp);
208 215 kthread_t *t;
209 216 sigqueue_t *sqp = NULL;
210 217 pid_t ppid;
211 218 id_t ptid;
212 219
213 220 VERIFY(MUTEX_NOT_HELD(&p->p_lock));
214 221
215 222 if (lwpd == NULL) {
216 223 /* second time thru' */
217 224 return;
218 225 }
219 226
220 227 lx_cleanlwp(lwp, p);
221 228
222 229 if (lwpd->br_clear_ctidp != NULL) {
223 230 (void) suword32(lwpd->br_clear_ctidp, 0);
224 231 (void) lx_futex((uintptr_t)lwpd->br_clear_ctidp, FUTEX_WAKE, 1,
225 232 (uintptr_t)NULL, (uintptr_t)NULL, 0);
226 233 lwpd->br_clear_ctidp = NULL;
227 234 }
228 235
229 236 if (lwpd->br_signal != 0) {
230 237 /*
231 238 * The first thread in a process doesn't cause a signal to
232 239 * be sent when it exits. It was created by a fork(), not
233 240 * a clone(), so the parent should get signalled when the
234 241 * process exits.
235 242 */
236 243 if (lwpd->br_ptid == -1)
237 244 goto free;
238 245
239 246 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
240 247 /*
241 248 * If br_ppid is 0, it means this is a CLONE_PARENT thread,
242 249 * so the signal goes to the parent process - not to a
243 250 * specific thread in this process.
244 251 */
245 252 p = lwptoproc(lwp);
246 253 if (lwpd->br_ppid == 0) {
247 254 mutex_enter(&p->p_lock);
248 255 ppid = p->p_ppid;
249 256 t = NULL;
250 257 } else {
251 258 /*
252 259 * If we have been reparented to init or if our
253 260 * parent thread is gone, then nobody gets
254 261 * signaled.
255 262 */
256 263 if ((lx_lwp_ppid(lwp, &ppid, &ptid) == 1) ||
257 264 (ptid == -1))
258 265 goto free;
259 266
260 267 mutex_enter(&pidlock);
261 268 if ((p = prfind(ppid)) == NULL || p->p_stat == SIDL) {
262 269 mutex_exit(&pidlock);
263 270 goto free;
264 271 }
265 272 mutex_enter(&p->p_lock);
266 273 mutex_exit(&pidlock);
267 274
268 275 if ((t = idtot(p, ptid)) == NULL) {
269 276 mutex_exit(&p->p_lock);
270 277 goto free;
271 278 }
272 279 }
273 280
274 281 sqp->sq_info.si_signo = lwpd->br_signal;
275 282 sqp->sq_info.si_code = lwpd->br_exitwhy;
276 283 sqp->sq_info.si_status = lwpd->br_exitwhat;
277 284 sqp->sq_info.si_pid = lwpd->br_pid;
278 285 sqp->sq_info.si_uid = crgetruid(CRED());
279 286 sigaddqa(p, t, sqp);
280 287 mutex_exit(&p->p_lock);
281 288 sqp = NULL;
282 289 }
283 290
284 291 free:
285 292 if (lwpd->br_scall_args != NULL) {
286 293 ASSERT(lwpd->br_args_size > 0);
287 294 kmem_free(lwpd->br_scall_args, lwpd->br_args_size);
288 295 }
289 296 if (sqp)
290 297 kmem_free(sqp, sizeof (sigqueue_t));
291 298 }
292 299
293 300 void
294 301 lx_freelwp(klwp_t *lwp)
295 302 {
296 303 struct lx_lwp_data *lwpd = lwptolxlwp(lwp);
297 304 proc_t *p = lwptoproc(lwp);
298 305 lx_zone_data_t *lxzdata;
299 306 vfs_t *cgrp;
300 307
301 308 VERIFY(MUTEX_NOT_HELD(&p->p_lock));
302 309
303 310 if (lwpd == NULL) {
304 311 /*
305 312 * There is one case where an LX branded process will possess
306 313 * LWPs which lack their own brand data. During the course of
307 314 * executing native binary, the process will be preemptively
308 315 * branded to allow hooks such as b_native_exec to function.
309 316 * If that process possesses multiple LWPS, they will _not_ be
310 317 * branded since they will exit if the exec succeeds. It's
311 318 * during this LWP exit that lx_freelwp would be called on an
312 319 * unbranded LWP. When that is the case, it is acceptable to
313 320 * bypass the hook.
314 321 */
315 322 return;
316 323 }
317 324
318 325 /* cgroup integration */
319 326 lxzdata = ztolxzd(p->p_zone);
320 327 mutex_enter(&lxzdata->lxzd_lock);
321 328 cgrp = lxzdata->lxzd_cgroup;
322 329 if (cgrp != NULL) {
323 330 VFS_HOLD(cgrp);
324 331 mutex_exit(&lxzdata->lxzd_lock);
325 332 ASSERT(lx_cgrp_freelwp != NULL);
326 333 (*lx_cgrp_freelwp)(cgrp, lwpd->br_cgroupid, lwptot(lwp)->t_tid,
327 334 lwpd->br_pid);
328 335 VFS_RELE(cgrp);
329 336 } else {
330 337 mutex_exit(&lxzdata->lxzd_lock);
331 338 }
332 339
333 340 /*
334 341 * It is possible for the lx_freelwp hook to be called without a prior
335 342 * call to lx_exitlwp being made. This happens as part of lwp
336 343 * de-branding when a native binary is executed from a branded process.
337 344 *
338 345 * To cover all cases, lx_cleanlwp is called from lx_exitlwp as well
|
↓ open down ↓ |
204 lines elided |
↑ open up ↑ |
339 346 * here in lx_freelwp. When the second call is redundant, the
340 347 * resources will already be freed and no work will be needed.
341 348 */
342 349 lx_cleanlwp(lwp, p);
343 350
344 351 /*
345 352 * Remove our system call interposer.
346 353 */
347 354 lwp->lwp_brand_syscall = NULL;
348 355
349 - (void) removectx(lwptot(lwp), lwp, lx_save, lx_restore, NULL, NULL,
350 - lx_save, NULL);
356 + /*
357 + * If this process is being de-branded during an exec(),
358 + * the LX ctxops may have already been removed, so the result
359 + * from ctxop_remove is irrelevant.
360 + */
361 + (void) ctxop_remove(lwptot(lwp), &lx_ctxop_template, lwp);
351 362 if (lwpd->br_pid != 0) {
352 363 lx_pid_rele(lwptoproc(lwp)->p_pid, lwptot(lwp)->t_tid);
353 364 }
354 365
355 366 /*
356 367 * Discard the affinity mask.
357 368 */
358 369 VERIFY(lwpd->br_affinitymask != NULL);
359 370 cpuset_free(lwpd->br_affinitymask);
360 371 lwpd->br_affinitymask = NULL;
361 372
362 373 /*
363 374 * Ensure that lx_ptrace_exit() has been called to detach
364 375 * ptrace(2) tracers and tracees.
365 376 */
366 377 VERIFY(lwpd->br_ptrace_tracer == NULL);
367 378 VERIFY(lwpd->br_ptrace_accord == NULL);
368 379
369 380 lwp->lwp_brand = NULL;
370 381 kmem_free(lwpd, sizeof (struct lx_lwp_data));
371 382 }
372 383
373 384 void *
374 385 lx_lwpdata_alloc(proc_t *p)
375 386 {
376 387 lx_lwp_data_t *lwpd;
377 388 struct lx_pid *lpidp;
378 389 cpuset_t *affmask;
379 390 pid_t newpid = 0;
380 391 struct pid *pidp = NULL;
381 392
382 393 VERIFY(MUTEX_NOT_HELD(&p->p_lock));
383 394
384 395 /*
385 396 * LWPs beyond the first will require a pid to be allocated to emulate
386 397 * Linux's goofy thread model. While this allocation may be
387 398 * unnecessary when a single-lwp process undergoes branding, it cannot
388 399 * be performed during b_initlwp due to p_lock being held.
389 400 */
390 401 if (p->p_lwpcnt > 0) {
391 402 if ((newpid = pid_allocate(p, 0, 0)) < 0) {
392 403 return (NULL);
393 404 }
394 405 pidp = pid_find(newpid);
395 406 }
396 407
397 408 lwpd = kmem_zalloc(sizeof (struct lx_lwp_data), KM_SLEEP);
398 409 lpidp = kmem_zalloc(sizeof (struct lx_pid), KM_SLEEP);
399 410 affmask = cpuset_alloc(KM_SLEEP);
400 411
401 412 lpidp->lxp_lpid = newpid;
402 413 lpidp->lxp_pidp = pidp;
403 414 lwpd->br_lpid = lpidp;
404 415 lwpd->br_affinitymask = affmask;
405 416
406 417 return (lwpd);
407 418 }
408 419
409 420 /*
410 421 * Free lwp brand data if an error occurred during lwp_create.
411 422 * Otherwise, lx_freelwp will be used to free the resources after they're
412 423 * associated with the lwp via lx_initlwp.
413 424 */
414 425 void
415 426 lx_lwpdata_free(void *lwpbd)
416 427 {
417 428 lx_lwp_data_t *lwpd = (lx_lwp_data_t *)lwpbd;
418 429 VERIFY(lwpd != NULL);
419 430 VERIFY(lwpd->br_lpid != NULL);
420 431 VERIFY(lwpd->br_affinitymask != NULL);
421 432
422 433 cpuset_free(lwpd->br_affinitymask);
423 434 if (lwpd->br_lpid->lxp_pidp != NULL) {
424 435 (void) pid_rele(lwpd->br_lpid->lxp_pidp);
425 436 }
426 437 kmem_free(lwpd->br_lpid, sizeof (*lwpd->br_lpid));
427 438 kmem_free(lwpd, sizeof (*lwpd));
428 439 }
429 440
430 441 void
431 442 lx_initlwp(klwp_t *lwp, void *lwpbd)
432 443 {
433 444 lx_lwp_data_t *lwpd = (lx_lwp_data_t *)lwpbd;
434 445 lx_lwp_data_t *plwpd = ttolxlwp(curthread);
435 446 kthread_t *tp = lwptot(lwp);
436 447 proc_t *p = lwptoproc(lwp);
437 448 lx_zone_data_t *lxzdata;
438 449 vfs_t *cgrp;
439 450
440 451 VERIFY(MUTEX_HELD(&p->p_lock));
441 452 VERIFY(lwp->lwp_brand == NULL);
442 453
443 454 lwpd->br_exitwhy = CLD_EXITED;
444 455 lwpd->br_lwp = lwp;
445 456 lwpd->br_clear_ctidp = NULL;
446 457 lwpd->br_set_ctidp = NULL;
447 458 lwpd->br_signal = 0;
448 459 lwpd->br_stack_mode = LX_STACK_MODE_PREINIT;
449 460 cpuset_all(lwpd->br_affinitymask);
450 461
451 462 /*
452 463 * The first thread in a process has ppid set to the parent
453 464 * process's pid, and ptid set to -1. Subsequent threads in the
454 465 * process have their ppid set to the pid of the thread that
455 466 * created them, and their ptid to that thread's tid.
456 467 */
457 468 if (tp->t_next == tp) {
458 469 lwpd->br_ppid = tp->t_procp->p_ppid;
459 470 lwpd->br_ptid = -1;
460 471 } else if (plwpd != NULL) {
461 472 bcopy(plwpd->br_tls, lwpd->br_tls, sizeof (lwpd->br_tls));
462 473 lwpd->br_ppid = plwpd->br_pid;
463 474 lwpd->br_ptid = curthread->t_tid;
464 475 /* The child inherits the fs/gsbase values from the parent */
465 476 lwpd->br_lx_fsbase = plwpd->br_lx_fsbase;
466 477 lwpd->br_ntv_fsbase = plwpd->br_ntv_fsbase;
467 478 lwpd->br_lx_gsbase = plwpd->br_lx_gsbase;
468 479 lwpd->br_ntv_gsbase = plwpd->br_ntv_gsbase;
469 480 } else {
470 481 /*
471 482 * Oddball case: the parent thread isn't a Linux process.
472 483 */
473 484 lwpd->br_ppid = 0;
474 485 lwpd->br_ptid = -1;
475 486 }
476 487 lwp->lwp_brand = lwpd;
477 488
478 489 /*
479 490 * When during lx_lwpdata_alloc, we must decide whether or not to
480 491 * allocate a new pid to associate with the lwp. Since p_lock is not
481 492 * held at that point, the only time we can guarantee a new pid isn't
482 493 * needed is when p_lwpcnt == 0. This is because other lwps won't be
483 494 * present to race with us with regards to pid allocation.
484 495 *
485 496 * This means that in all other cases (where p_lwpcnt > 0), we expect
486 497 * that lx_lwpdata_alloc will allocate a pid for us to use here, even
487 498 * if it is uneeded. If this process is undergoing an exec, for
488 499 * example, the single existing lwp will not need a new pid when it is
489 500 * rebranded. In that case, lx_pid_assign will free the uneeded pid.
490 501 */
|
↓ open down ↓ |
130 lines elided |
↑ open up ↑ |
491 502 VERIFY(lwpd->br_lpid->lxp_pidp != NULL || p->p_lwpcnt == 0);
492 503
493 504 lx_pid_assign(tp, lwpd->br_lpid);
494 505 lwpd->br_tgid = lwpd->br_pid;
495 506 /*
496 507 * Having performed the lx pid assignement, the lpid reference is no
497 508 * longer needed. The underlying data will be freed during lx_freelwp.
498 509 */
499 510 lwpd->br_lpid = NULL;
500 511
501 - installctx(lwptot(lwp), lwp, lx_save, lx_restore, NULL, NULL,
502 - lx_save, NULL, NULL);
512 + ctxop_install(lwptot(lwp), &lx_ctxop_template, lwp);
503 513
504 514 /*
505 515 * Install branded system call hooks for this LWP:
506 516 */
507 517 lwp->lwp_brand_syscall = lx_syscall_enter;
508 518
509 519 /*
510 520 * The new LWP inherits the parent LWP cgroup ID.
511 521 */
512 522 if (plwpd != NULL) {
513 523 lwpd->br_cgroupid = plwpd->br_cgroupid;
514 524 }
515 525 /*
516 526 * The new LWP inherits the parent LWP emulated scheduling info.
517 527 */
518 528 if (plwpd != NULL) {
519 529 lwpd->br_schd_class = plwpd->br_schd_class;
520 530 lwpd->br_schd_pri = plwpd->br_schd_pri;
521 531 lwpd->br_schd_flags = plwpd->br_schd_flags;
522 532 lwpd->br_schd_runtime = plwpd->br_schd_runtime;
523 533 lwpd->br_schd_deadline = plwpd->br_schd_deadline;
524 534 lwpd->br_schd_period = plwpd->br_schd_period;
525 535 }
526 536 lxzdata = ztolxzd(p->p_zone);
527 537 mutex_enter(&lxzdata->lxzd_lock);
528 538 cgrp = lxzdata->lxzd_cgroup;
529 539 if (cgrp != NULL) {
530 540 VFS_HOLD(cgrp);
531 541 mutex_exit(&lxzdata->lxzd_lock);
532 542 ASSERT(lx_cgrp_initlwp != NULL);
533 543 (*lx_cgrp_initlwp)(cgrp, lwpd->br_cgroupid, lwptot(lwp)->t_tid,
534 544 lwpd->br_pid);
535 545 VFS_RELE(cgrp);
536 546 } else {
537 547 mutex_exit(&lxzdata->lxzd_lock);
538 548 }
539 549 }
540 550
541 551 void
542 552 lx_initlwp_post(klwp_t *lwp)
543 553 {
544 554 lx_lwp_data_t *plwpd = ttolxlwp(curthread);
545 555 /*
546 556 * If the parent LWP has a ptrace(2) tracer, the new LWP may
547 557 * need to inherit that same tracer.
548 558 */
549 559 if (plwpd != NULL) {
550 560 lx_ptrace_inherit_tracer(plwpd, lwptolxlwp(lwp));
551 561 }
552 562 }
553 563
554 564 /*
555 565 * There is no need to have any locking for either the source or
556 566 * destination struct lx_lwp_data structs. This is always run in the
557 567 * thread context of the source thread, and the destination thread is
558 568 * always newly created and not referred to from anywhere else.
559 569 */
560 570 void
561 571 lx_forklwp(klwp_t *srclwp, klwp_t *dstlwp)
562 572 {
563 573 struct lx_lwp_data *src = srclwp->lwp_brand;
564 574 struct lx_lwp_data *dst = dstlwp->lwp_brand;
565 575
566 576 dst->br_ppid = src->br_pid;
567 577 dst->br_ptid = lwptot(srclwp)->t_tid;
568 578 bcopy(src->br_tls, dst->br_tls, sizeof (dst->br_tls));
569 579
570 580 switch (src->br_stack_mode) {
571 581 case LX_STACK_MODE_BRAND:
572 582 case LX_STACK_MODE_NATIVE:
573 583 /*
574 584 * The parent LWP has an alternate stack installed.
575 585 * The child LWP should have the same stack base and extent.
576 586 */
577 587 dst->br_stack_mode = src->br_stack_mode;
578 588 dst->br_ntv_stack = src->br_ntv_stack;
579 589 dst->br_ntv_stack_current = src->br_ntv_stack_current;
580 590 break;
581 591
582 592 default:
583 593 /*
584 594 * Otherwise, clear the stack data for this LWP.
585 595 */
586 596 dst->br_stack_mode = LX_STACK_MODE_PREINIT;
587 597 dst->br_ntv_stack = 0;
588 598 dst->br_ntv_stack_current = 0;
589 599 }
590 600
591 601 /*
592 602 * copy only these flags
593 603 */
594 604 dst->br_lwp_flags = src->br_lwp_flags & BR_CPU_BOUND;
595 605 dst->br_scall_args = NULL;
596 606 lx_affinity_forklwp(srclwp, dstlwp);
597 607
598 608 /*
599 609 * Flag so child doesn't ptrace-stop on syscall exit.
600 610 */
601 611 dst->br_ptrace_flags |= LX_PTF_NOSTOP;
602 612
603 613 if (src->br_clone_grp_flags != 0) {
604 614 lx_clone_grp_enter(src->br_clone_grp_flags, lwptoproc(srclwp),
605 615 lwptoproc(dstlwp));
|
↓ open down ↓ |
93 lines elided |
↑ open up ↑ |
606 616 /* clone group no longer pending on this thread */
607 617 src->br_clone_grp_flags = 0;
608 618 }
609 619 }
610 620
611 621 /*
612 622 * When switching a Linux process off the CPU, clear its GDT entries.
613 623 */
614 624 /* ARGSUSED */
615 625 static void
616 -lx_save(klwp_t *t)
626 +lx_save(void *arg)
617 627 {
628 + klwp_t *t = (klwp_t *)arg;
618 629 int i;
619 630
620 631 #if defined(__amd64)
621 632 reset_sregs();
622 633 #endif
623 634 for (i = 0; i < LX_TLSNUM; i++)
624 635 gdt_update_usegd(GDT_TLSMIN + i, &null_udesc);
625 636 }
626 637
627 638 /*
628 639 * When switching a Linux process on the CPU, set its GDT entries.
629 640 *
630 641 * For 64-bit code we don't have to worry about explicitly setting the
631 642 * %fsbase via wrmsr(MSR_AMD_FSBASE) here. Instead, that should happen
632 643 * automatically in update_sregs if we are executing in user-land. If this
633 644 * is the case then pcb_rupdate should be set.
634 645 */
635 646 static void
636 -lx_restore(klwp_t *t)
647 +lx_restore(void *arg)
637 648 {
649 + klwp_t *t = (klwp_t *)arg;
638 650 struct lx_lwp_data *lwpd = lwptolxlwp(t);
639 651 user_desc_t *tls;
640 652 int i;
641 653
642 654 ASSERT(lwpd);
643 655
644 656 tls = lwpd->br_tls;
645 657 for (i = 0; i < LX_TLSNUM; i++)
646 658 gdt_update_usegd(GDT_TLSMIN + i, &tls[i]);
647 659 }
648 660
649 661 void
650 662 lx_set_gdt(int entry, user_desc_t *descrp)
651 663 {
652 664
653 665 gdt_update_usegd(entry, descrp);
654 666 }
655 667
656 668 void
657 669 lx_clear_gdt(int entry)
658 670 {
659 671 gdt_update_usegd(entry, &null_udesc);
660 672 }
661 673
662 674 longlong_t
663 675 lx_nosys()
664 676 {
665 677 return (set_errno(ENOSYS));
666 678 }
667 679
668 680 /*
669 681 * Brand-specific routine to check if given non-Solaris standard segment
670 682 * register values should be modified to other values.
671 683 */
672 684 /*ARGSUSED*/
673 685 greg_t
674 686 lx_fixsegreg(greg_t sr, model_t datamodel)
675 687 {
676 688 uint16_t idx = SELTOIDX(sr);
677 689
678 690 ASSERT(sr == (sr & 0xffff));
679 691
680 692 /*
681 693 * If the segment selector is a valid TLS selector, just return it.
682 694 */
683 695 if (!SELISLDT(sr) && idx >= GDT_TLSMIN && idx <= GDT_TLSMAX)
684 696 return (sr | SEL_UPL);
685 697
686 698 /*
687 699 * Force the SR into the LDT in ring 3 for 32-bit processes.
688 700 *
689 701 * 64-bit processes get the null GDT selector since they are not
690 702 * allowed to have a private LDT.
691 703 */
692 704 #if defined(__amd64)
693 705 return (datamodel == DATAMODEL_ILP32 ? (sr | SEL_TI_LDT | SEL_UPL) : 0);
694 706 #elif defined(__i386)
695 707 datamodel = datamodel; /* datamodel currently unused for 32-bit */
696 708 return (sr | SEL_TI_LDT | SEL_UPL);
697 709 #endif /* __amd64 */
698 710 }
699 711
700 712 /*
701 713 * Brand-specific function to convert the fsbase as pulled from the register
702 714 * into a native fsbase suitable for locating the ulwp_t from the kernel.
703 715 */
704 716 uintptr_t
705 717 lx_fsbase(klwp_t *lwp, uintptr_t fsbase)
706 718 {
707 719 lx_lwp_data_t *lwpd = lwp->lwp_brand;
708 720
709 721 if (lwpd->br_stack_mode != LX_STACK_MODE_BRAND ||
710 722 lwpd->br_ntv_fsbase == (uintptr_t)NULL) {
711 723 return (fsbase);
712 724 }
713 725
714 726 return (lwpd->br_ntv_fsbase);
715 727 }
716 728
717 729 /*
718 730 * These two functions simulate winfo and post_sigcld for the lx brand. The
719 731 * difference is delivering a designated signal as opposed to always SIGCLD.
720 732 */
721 733 static void
722 734 lx_winfo(proc_t *pp, k_siginfo_t *ip, struct lx_proc_data *dat)
723 735 {
724 736 ASSERT(MUTEX_HELD(&pidlock));
725 737 bzero(ip, sizeof (k_siginfo_t));
726 738 ip->si_signo = ltos_signo[dat->l_signal];
727 739 ip->si_code = pp->p_wcode;
728 740 ip->si_pid = pp->p_pid;
729 741 ip->si_ctid = PRCTID(pp);
730 742 ip->si_zoneid = pp->p_zone->zone_id;
731 743 ip->si_status = pp->p_wdata;
732 744 /*
733 745 * These siginfo values are converted to USER_HZ in the user-land
734 746 * brand signal code.
735 747 */
736 748 ip->si_stime = pp->p_stime;
737 749 ip->si_utime = pp->p_utime;
738 750 }
739 751
740 752 static void
741 753 lx_post_exit_sig(proc_t *cp, sigqueue_t *sqp, struct lx_proc_data *dat)
742 754 {
743 755 proc_t *pp = cp->p_parent;
744 756
745 757 ASSERT(MUTEX_HELD(&pidlock));
746 758 mutex_enter(&pp->p_lock);
747 759 /*
748 760 * Since Linux doesn't queue SIGCHLD, or any other non RT
749 761 * signals, we just blindly deliver whatever signal we can.
750 762 */
751 763 ASSERT(sqp != NULL);
752 764 lx_winfo(cp, &sqp->sq_info, dat);
753 765 sigaddqa(pp, NULL, sqp);
754 766 sqp = NULL;
755 767 mutex_exit(&pp->p_lock);
756 768 }
757 769
758 770
759 771 /*
760 772 * Brand specific code for exiting and sending a signal to the parent, as
761 773 * opposed to sigcld().
762 774 */
763 775 void
764 776 lx_exit_with_sig(proc_t *cp, sigqueue_t *sqp)
765 777 {
766 778 proc_t *pp = cp->p_parent;
767 779 lx_proc_data_t *lx_brand_data = ptolxproc(cp);
768 780 ASSERT(MUTEX_HELD(&pidlock));
769 781
770 782 switch (cp->p_wcode) {
771 783 case CLD_EXITED:
772 784 case CLD_DUMPED:
773 785 case CLD_KILLED:
774 786 ASSERT(cp->p_stat == SZOMB);
775 787 /*
776 788 * The broadcast on p_srwchan_cv is a kludge to
777 789 * wakeup a possible thread in uadmin(A_SHUTDOWN).
778 790 */
779 791 cv_broadcast(&cp->p_srwchan_cv);
780 792
781 793 /*
782 794 * Add to newstate list of the parent
783 795 */
784 796 add_ns(pp, cp);
785 797
786 798 cv_broadcast(&pp->p_cv);
787 799 if ((pp->p_flag & SNOWAIT) ||
788 800 PTOU(pp)->u_signal[SIGCLD - 1] == SIG_IGN) {
789 801 if (!(cp->p_pidflag & CLDWAITPID))
790 802 freeproc(cp);
791 803 } else if (!(cp->p_pidflag & CLDNOSIGCHLD) &&
792 804 lx_brand_data->l_signal != 0) {
793 805 lx_post_exit_sig(cp, sqp, lx_brand_data);
794 806 sqp = NULL;
795 807 }
796 808 break;
797 809
798 810 case CLD_STOPPED:
799 811 case CLD_CONTINUED:
800 812 case CLD_TRAPPED:
801 813 panic("Should not be called in this case");
802 814 }
803 815
804 816 if (sqp)
805 817 siginfofree(sqp);
806 818 }
807 819
808 820 /*
809 821 * Filters based on arguments that have been passed in by a separate syscall
810 822 * using the B_STORE_ARGS mechanism. if the __WALL flag is set, no filter is
811 823 * applied, otherwise we look at the difference between a clone and non-clone
812 824 * process.
813 825 * The definition of a clone process in Linux is a thread that does not deliver
814 826 * SIGCHLD to its parent. The option __WCLONE indicates to wait only on clone
815 827 * processes. Without that option, a process should only wait on normal
816 828 * children. The following table shows the cases.
817 829 *
818 830 * default __WCLONE
819 831 * no SIGCHLD - X
820 832 * SIGCHLD X -
821 833 *
822 834 * This is an XOR of __WCLONE being set, and SIGCHLD being the signal sent on
823 835 * process exit.
824 836 *
825 837 * More information on wait in lx brands can be found at
826 838 * usr/src/lib/brand/lx/lx_brand/common/wait.c.
827 839 */
828 840 /* ARGSUSED */
829 841 boolean_t
830 842 lx_wait_filter(proc_t *pp, proc_t *cp)
831 843 {
832 844 lx_lwp_data_t *lwpd = ttolxlwp(curthread);
833 845 int flags = lwpd->br_waitid_flags;
834 846 boolean_t ret;
835 847
836 848 if (!lwpd->br_waitid_emulate) {
837 849 return (B_TRUE);
838 850 }
839 851
840 852 mutex_enter(&cp->p_lock);
841 853 if (flags & LX_WALL) {
842 854 ret = B_TRUE;
843 855 } else {
844 856 lx_proc_data_t *pd = ptolxproc(cp);
845 857 boolean_t is_sigchld = B_TRUE;
846 858 boolean_t match_wclone = B_FALSE;
847 859
848 860 /*
849 861 * When calling clone, an alternate signal can be chosen to
850 862 * deliver to the parent when the child exits.
851 863 */
852 864 if (pd != NULL && pd->l_signal != stol_signo[SIGCHLD]) {
853 865 is_sigchld = B_FALSE;
854 866 }
855 867 if ((flags & LX_WCLONE) != 0) {
856 868 match_wclone = B_TRUE;
857 869 }
858 870
859 871 ret = (match_wclone ^ is_sigchld) ? B_TRUE : B_FALSE;
860 872 }
861 873 mutex_exit(&cp->p_lock);
862 874
863 875 return (ret);
864 876 }
865 877
866 878 void
867 879 lx_ifname_convert(char *ifname, lx_if_action_t act)
868 880 {
869 881 if (act == LX_IF_TONATIVE) {
870 882 if (strncmp(ifname, "lo", IFNAMSIZ) == 0)
871 883 (void) strlcpy(ifname, "lo0", IFNAMSIZ);
872 884 } else {
873 885 if (strncmp(ifname, "lo0", IFNAMSIZ) == 0)
874 886 (void) strlcpy(ifname, "lo", IFNAMSIZ);
875 887 }
876 888 }
877 889
878 890 void
879 891 lx_ifflags_convert(uint64_t *flags, lx_if_action_t act)
880 892 {
881 893 uint64_t buf;
882 894
883 895 buf = *flags & (IFF_UP | IFF_BROADCAST | IFF_DEBUG |
884 896 IFF_LOOPBACK | IFF_POINTOPOINT | IFF_NOTRAILERS |
885 897 IFF_RUNNING | IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI);
886 898
887 899 /* Linux has different shift for multicast flag */
888 900 if (act == LX_IF_TONATIVE) {
889 901 if (*flags & 0x1000)
890 902 buf |= IFF_MULTICAST;
891 903 } else {
892 904 if (*flags & IFF_MULTICAST)
893 905 buf |= 0x1000;
894 906 }
895 907 *flags = buf;
896 908 }
897 909
898 910 /*
899 911 * Convert an IPv6 address into the numbers used by /proc/net/if_inet6
900 912 */
901 913 unsigned int
902 914 lx_ipv6_scope_convert(const in6_addr_t *addr)
903 915 {
904 916 if (IN6_IS_ADDR_V4COMPAT(addr)) {
905 917 return (LX_IPV6_ADDR_COMPATv4);
906 918 } else if (IN6_ARE_ADDR_EQUAL(addr, &ipv6_loopback)) {
907 919 return (LX_IPV6_ADDR_LOOPBACK);
908 920 } else if (IN6_IS_ADDR_LINKLOCAL(addr)) {
909 921 return (LX_IPV6_ADDR_LINKLOCAL);
910 922 } else if (IN6_IS_ADDR_SITELOCAL(addr)) {
911 923 return (LX_IPV6_ADDR_SITELOCAL);
912 924 } else {
913 925 return (0x0000U);
914 926 }
915 927 }
916 928
917 929
918 930 void
919 931 lx_stol_hwaddr(const struct sockaddr_dl *src, struct sockaddr *dst, int *size)
920 932 {
921 933 int copy_size = MIN(src->sdl_alen, sizeof (dst->sa_data));
922 934
923 935 switch (src->sdl_type) {
924 936 case DL_ETHER:
925 937 dst->sa_family = LX_ARPHRD_ETHER;
926 938 break;
927 939 case DL_LOOP:
928 940 dst->sa_family = LX_ARPHRD_LOOPBACK;
929 941 break;
930 942 default:
931 943 dst->sa_family = LX_ARPHRD_VOID;
932 944 }
933 945
934 946 bcopy(LLADDR(src), dst->sa_data, copy_size);
935 947 *size = copy_size;
936 948 }
937 949
938 950 /*
939 951 * Brand hook to convert native kernel siginfo signal number, errno, code, pid
940 952 * and si_status to Linux values. Similar to the stol_ksiginfo function but
941 953 * this one converts in-place, converts the pid, and does not copyout.
942 954 */
943 955 void
944 956 lx_sigfd_translate(k_siginfo_t *infop)
945 957 {
946 958 zone_t *zone = curproc->p_zone;
947 959
948 960 infop->si_signo = lx_stol_signo(infop->si_signo, LX_SIGKILL);
949 961 infop->si_status = lx_stol_status(infop->si_status, LX_SIGKILL);
950 962 infop->si_code = lx_stol_sigcode(infop->si_code);
951 963 infop->si_errno = lx_errno(infop->si_errno, EINVAL);
952 964
953 965 /* Map zsched and zone init to pid 1 */
954 966 if (infop->si_pid == zone->zone_proc_initpid ||
955 967 infop->si_pid == zone->zone_zsched->p_pid) {
956 968 infop->si_pid = 1;
957 969 }
958 970 }
959 971
960 972 int
961 973 stol_ksiginfo_copyout(k_siginfo_t *sip, void *ulxsip)
962 974 {
963 975 lx_siginfo_t lsi;
964 976
965 977 bzero(&lsi, sizeof (lsi));
966 978 lsi.lsi_signo = lx_stol_signo(sip->si_signo, SIGCLD);
967 979 lsi.lsi_code = lx_stol_sigcode(sip->si_code);
968 980 lsi.lsi_errno = lx_errno(sip->si_errno, EINVAL);
969 981
970 982 switch (lsi.lsi_signo) {
971 983 case LX_SIGPOLL:
972 984 lsi.lsi_band = sip->si_band;
973 985 lsi.lsi_fd = sip->si_fd;
974 986 break;
975 987
976 988 case LX_SIGCHLD:
977 989 lsi.lsi_pid = sip->si_pid;
978 990 if (sip->si_code <= 0 || sip->si_code == CLD_EXITED) {
979 991 lsi.lsi_status = sip->si_status;
980 992 } else {
981 993 lsi.lsi_status = lx_stol_status(sip->si_status,
982 994 SIGKILL);
983 995 }
984 996 lsi.lsi_utime = HZ_TO_LX_USERHZ(sip->si_utime);
985 997 lsi.lsi_stime = HZ_TO_LX_USERHZ(sip->si_stime);
986 998 break;
987 999
988 1000 case LX_SIGILL:
989 1001 case LX_SIGBUS:
990 1002 case LX_SIGFPE:
991 1003 case LX_SIGSEGV:
992 1004 lsi.lsi_addr = sip->si_addr;
993 1005 break;
994 1006
995 1007 default:
996 1008 lsi.lsi_pid = sip->si_pid;
997 1009 lsi.lsi_uid = LX_UID32_TO_UID16(sip->si_uid);
998 1010 }
999 1011
1000 1012 if (copyout(&lsi, ulxsip, sizeof (lsi)) != 0) {
1001 1013 return (set_errno(EFAULT));
1002 1014 }
1003 1015
1004 1016 return (0);
1005 1017 }
1006 1018
1007 1019 #if defined(_SYSCALL32_IMPL)
1008 1020 int
1009 1021 stol_ksiginfo32_copyout(k_siginfo_t *sip, void *ulxsip)
1010 1022 {
1011 1023 lx_siginfo32_t lsi;
1012 1024
1013 1025 bzero(&lsi, sizeof (lsi));
1014 1026 lsi.lsi_signo = lx_stol_signo(sip->si_signo, SIGCLD);
1015 1027 lsi.lsi_code = lx_stol_sigcode(sip->si_code);
1016 1028 lsi.lsi_errno = lx_errno(sip->si_errno, EINVAL);
1017 1029
1018 1030 switch (lsi.lsi_signo) {
1019 1031 case LX_SIGPOLL:
1020 1032 lsi.lsi_band = sip->si_band;
1021 1033 lsi.lsi_fd = sip->si_fd;
1022 1034 break;
1023 1035
1024 1036 case LX_SIGCHLD:
1025 1037 lsi.lsi_pid = sip->si_pid;
1026 1038 if (sip->si_code <= 0 || sip->si_code == CLD_EXITED) {
1027 1039 lsi.lsi_status = sip->si_status;
1028 1040 } else {
1029 1041 lsi.lsi_status = lx_stol_status(sip->si_status,
1030 1042 SIGKILL);
1031 1043 }
1032 1044 lsi.lsi_utime = HZ_TO_LX_USERHZ(sip->si_utime);
1033 1045 lsi.lsi_stime = HZ_TO_LX_USERHZ(sip->si_stime);
1034 1046 break;
1035 1047
1036 1048 case LX_SIGILL:
1037 1049 case LX_SIGBUS:
1038 1050 case LX_SIGFPE:
1039 1051 case LX_SIGSEGV:
1040 1052 lsi.lsi_addr = (caddr32_t)(uintptr_t)sip->si_addr;
1041 1053 break;
1042 1054
1043 1055 default:
1044 1056 lsi.lsi_pid = sip->si_pid;
1045 1057 lsi.lsi_uid = LX_UID32_TO_UID16(sip->si_uid);
1046 1058 }
1047 1059
1048 1060 if (copyout(&lsi, ulxsip, sizeof (lsi)) != 0) {
1049 1061 return (set_errno(EFAULT));
1050 1062 }
1051 1063
1052 1064 return (0);
1053 1065 }
1054 1066 #endif
1055 1067
1056 1068 /* Given an LX LWP, determine where user register state is stored. */
1057 1069 lx_regs_location_t
1058 1070 lx_regs_location(lx_lwp_data_t *lwpd, void **ucp, boolean_t for_write)
1059 1071 {
1060 1072 switch (lwpd->br_stack_mode) {
1061 1073 case LX_STACK_MODE_BRAND:
1062 1074 /*
1063 1075 * The LWP was stopped with the brand stack and register state
1064 1076 * loaded, e.g. during a syscall emulated within the kernel.
1065 1077 */
1066 1078 return (LX_REG_LOC_LWP);
1067 1079
1068 1080 case LX_STACK_MODE_PREINIT:
1069 1081 if (for_write) {
1070 1082 /* setting registers not allowed in this state */
1071 1083 break;
1072 1084 }
1073 1085 if (lwpd->br_ptrace_whatstop == LX_PR_SIGNALLED ||
1074 1086 lwpd->br_ptrace_whatstop == LX_PR_SYSEXIT) {
1075 1087 /* The LWP was stopped by tracing on exec. */
1076 1088 return (LX_REG_LOC_LWP);
1077 1089 }
1078 1090 break;
1079 1091
1080 1092 case LX_STACK_MODE_NATIVE:
1081 1093 if (for_write) {
1082 1094 /* setting registers not allowed in this state */
1083 1095 break;
1084 1096 }
1085 1097 if (lwpd->br_ptrace_whystop == PR_BRAND) {
1086 1098 /* Called while ptrace-event-stopped by lx_exec. */
1087 1099 if (lwpd->br_ptrace_whatstop == LX_PR_EVENT) {
1088 1100 return (LX_REG_LOC_LWP);
1089 1101 }
1090 1102
1091 1103 /* Called while ptrace-event-stopped after clone. */
1092 1104 if (lwpd->br_ptrace_whatstop == LX_PR_SIGNALLED &&
1093 1105 lwpd->br_ptrace_stopsig == LX_SIGSTOP &&
1094 1106 (lwpd->br_ptrace_flags & LX_PTF_STOPPED)) {
1095 1107 return (LX_REG_LOC_LWP);
1096 1108 }
1097 1109
1098 1110 /*
1099 1111 * Called to obtain syscall exit for other cases
1100 1112 * (e.g. pseudo return from rt_sigreturn).
1101 1113 */
1102 1114 if (lwpd->br_ptrace_whatstop == LX_PR_SYSEXIT &&
1103 1115 (lwpd->br_ptrace_flags & LX_PTF_STOPPED)) {
1104 1116 return (LX_REG_LOC_LWP);
1105 1117 }
1106 1118 }
1107 1119 break;
1108 1120 default:
1109 1121 break;
1110 1122 }
1111 1123
1112 1124 if (lwpd->br_ptrace_stopucp != (uintptr_t)NULL) {
1113 1125 /*
1114 1126 * The LWP was stopped in the usermode emulation library
1115 1127 * but a ucontext_t for the preserved brand stack and
1116 1128 * register state was provided. Return the register state
1117 1129 * from that ucontext_t.
1118 1130 */
1119 1131 VERIFY(ucp != NULL);
1120 1132 *ucp = (void *)lwpd->br_ptrace_stopucp;
1121 1133 return (LX_REG_LOC_UCP);
1122 1134 }
1123 1135
1124 1136 return (LX_REG_LOC_UNAVAIL);
1125 1137 }
|
↓ open down ↓ |
478 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX