Print this page
OS-3561 lxbrand emulation library should execute on alternate stack
OS-3558 lxbrand add support for full in-kernel syscall handling
OS-3545 lx_syscall_regs should not walk stack
OS-3868 many LTP testcases now hang
OS-3901 lxbrand lx_recvmsg fails to translate control messages when 64-bit
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Patrick Mooney <patrick.mooney@joyent.com>
Reviewed by: Bryan Cantrill <bryan@joyent.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/intel/ia32/syscall/getcontext.c
+++ new/usr/src/uts/intel/ia32/syscall/getcontext.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 + * Copyright 2015 Joyent, Inc.
24 + */
25 +/*
23 26 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 27 * Use is subject to license terms.
25 28 */
26 29
27 30 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 31 /* All Rights Reserved */
29 32
30 33 #include <sys/param.h>
31 34 #include <sys/types.h>
32 35 #include <sys/vmparam.h>
33 36 #include <sys/systm.h>
34 37 #include <sys/signal.h>
35 38 #include <sys/stack.h>
36 39 #include <sys/regset.h>
37 40 #include <sys/privregs.h>
38 41 #include <sys/frame.h>
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
39 42 #include <sys/proc.h>
40 43 #include <sys/brand.h>
41 44 #include <sys/psw.h>
42 45 #include <sys/ucontext.h>
43 46 #include <sys/asm_linkage.h>
44 47 #include <sys/errno.h>
45 48 #include <sys/archsystm.h>
46 49 #include <sys/schedctl.h>
47 50 #include <sys/debug.h>
48 51 #include <sys/sysmacros.h>
52 +#include <sys/sdt.h>
49 53
50 54 /*
51 55 * Save user context.
52 56 */
53 57 void
54 58 savecontext(ucontext_t *ucp, const k_sigset_t *mask)
55 59 {
56 60 proc_t *p = ttoproc(curthread);
57 61 klwp_t *lwp = ttolwp(curthread);
58 62 struct regs *rp = lwptoregs(lwp);
59 63
60 64 /*
61 65 * We unconditionally assign to every field through the end
62 66 * of the gregs, but we need to bzero() everything -after- that
63 67 * to avoid having any kernel stack garbage escape to userland.
64 68 */
65 69 bzero(&ucp->uc_mcontext.fpregs, sizeof (ucontext_t) -
66 70 offsetof(ucontext_t, uc_mcontext.fpregs));
67 71
68 72 ucp->uc_flags = UC_ALL;
69 73 ucp->uc_link = (struct ucontext *)lwp->lwp_oldcontext;
70 74
71 75 /*
72 76 * Try to copyin() the ustack if one is registered. If the stack
73 77 * has zero size, this indicates that stack bounds checking has
74 78 * been disabled for this LWP. If stack bounds checking is disabled
75 79 * or the copyin() fails, we fall back to the legacy behavior.
76 80 */
77 81 if (lwp->lwp_ustack == NULL ||
78 82 copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
79 83 sizeof (ucp->uc_stack)) != 0 ||
80 84 ucp->uc_stack.ss_size == 0) {
81 85
82 86 if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
83 87 ucp->uc_stack = lwp->lwp_sigaltstack;
84 88 } else {
85 89 ucp->uc_stack.ss_sp = p->p_usrstack - p->p_stksize;
86 90 ucp->uc_stack.ss_size = p->p_stksize;
87 91 ucp->uc_stack.ss_flags = 0;
88 92 }
89 93 }
90 94
91 95 /*
92 96 * If either the trace flag or REQUEST_STEP is set,
93 97 * arrange for single-stepping and turn off the trace flag.
94 98 */
95 99 if ((rp->r_ps & PS_T) || (lwp->lwp_pcb.pcb_flags & REQUEST_STEP)) {
96 100 /*
97 101 * Clear PS_T so that saved user context won't have trace
98 102 * flag set.
99 103 */
100 104 rp->r_ps &= ~PS_T;
101 105
102 106 if (!(lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP)) {
103 107 lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
104 108 /*
105 109 * trap() always checks DEBUG_PENDING before
106 110 * checking for any pending signal. This at times
107 111 * can potentially lead to DEBUG_PENDING not being
108 112 * honoured. (for eg: the lwp is stopped by
109 113 * stop_on_fault() called from trap(), after being
110 114 * awakened it might see a pending signal and call
111 115 * savecontext(), however on the way back to userland
112 116 * there is no place it can be detected). Hence in
113 117 * anticipation of such occassions, set AST flag for
114 118 * the thread which will make the thread take an
115 119 * excursion through trap() where it will be handled
116 120 * appropriately.
117 121 */
|
↓ open down ↓ |
59 lines elided |
↑ open up ↑ |
118 122 aston(curthread);
119 123 }
120 124 }
121 125
122 126 getgregs(lwp, ucp->uc_mcontext.gregs);
123 127 if (lwp->lwp_pcb.pcb_fpu.fpu_flags & FPU_EN)
124 128 getfpregs(lwp, &ucp->uc_mcontext.fpregs);
125 129 else
126 130 ucp->uc_flags &= ~UC_FPU;
127 131
128 - sigktou(mask, &ucp->uc_sigmask);
132 + if (mask != NULL) {
133 + /*
134 + * Save signal mask.
135 + */
136 + sigktou(mask, &ucp->uc_sigmask);
137 + } else {
138 + ucp->uc_flags &= ~UC_SIGMASK;
139 + bzero(&ucp->uc_sigmask, sizeof (ucp->uc_sigmask));
140 + }
141 +
142 + if (PROC_IS_BRANDED(p) && BROP(p)->b_savecontext != NULL) {
143 + /*
144 + * Allow the brand the chance to modify the context we
145 + * saved:
146 + */
147 + BROP(p)->b_savecontext(ucp);
148 + }
129 149 }
130 150
131 151 /*
132 152 * Restore user context.
133 153 */
134 154 void
135 155 restorecontext(ucontext_t *ucp)
136 156 {
137 157 kthread_t *t = curthread;
138 158 klwp_t *lwp = ttolwp(t);
159 + proc_t *p = lwptoproc(lwp);
139 160
161 + if (PROC_IS_BRANDED(p) && BROP(p)->b_restorecontext != NULL) {
162 + /*
163 + * Allow the brand the chance to modify the context before
164 + * we restore it:
165 + */
166 + BROP(p)->b_restorecontext(ucp);
167 + }
168 +
169 + DTRACE_PROBE3(oldcontext__set, klwp_t *, lwp,
170 + uintptr_t, lwp->lwp_oldcontext,
171 + uintptr_t, (uintptr_t)ucp->uc_link);
140 172 lwp->lwp_oldcontext = (uintptr_t)ucp->uc_link;
141 173
142 174 if (ucp->uc_flags & UC_STACK) {
143 175 if (ucp->uc_stack.ss_flags == SS_ONSTACK)
144 176 lwp->lwp_sigaltstack = ucp->uc_stack;
145 177 else
146 178 lwp->lwp_sigaltstack.ss_flags &= ~SS_ONSTACK;
147 179 }
148 180
149 181 if (ucp->uc_flags & UC_CPU) {
150 182 /*
151 183 * If the trace flag is set, mark the lwp to take a
152 184 * single-step trap on return to user level (below).
153 185 * The x86 lcall interface and sysenter has already done this,
154 186 * and turned off the flag, but amd64 syscall interface has not.
155 187 */
156 188 if (lwptoregs(lwp)->r_ps & PS_T)
157 189 lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
158 190 setgregs(lwp, ucp->uc_mcontext.gregs);
159 191 lwp->lwp_eosys = JUSTRETURN;
160 192 t->t_post_sys = 1;
161 193 aston(curthread);
162 194 }
163 195
164 196 if (ucp->uc_flags & UC_FPU)
165 197 setfpregs(lwp, &ucp->uc_mcontext.fpregs);
166 198
167 199 if (ucp->uc_flags & UC_SIGMASK) {
168 200 /*
169 201 * We don't need to acquire p->p_lock here;
170 202 * we are manipulating thread-private data.
171 203 */
172 204 schedctl_finish_sigblock(t);
173 205 sigutok(&ucp->uc_sigmask, &t->t_hold);
174 206 if (sigcheck(ttoproc(t), t))
175 207 t->t_sig_check = 1;
176 208 }
|
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
177 209 }
178 210
179 211
180 212 int
181 213 getsetcontext(int flag, void *arg)
182 214 {
183 215 ucontext_t uc;
184 216 ucontext_t *ucp;
185 217 klwp_t *lwp = ttolwp(curthread);
186 218 stack_t dummy_stk;
219 + proc_t *p = lwptoproc(lwp);
187 220
188 221 /*
189 222 * In future releases, when the ucontext structure grows,
190 223 * getcontext should be modified to only return the fields
191 224 * specified in the uc_flags. That way, the structure can grow
192 225 * and still be binary compatible will all .o's which will only
193 226 * have old fields defined in uc_flags
194 227 */
195 228
196 229 switch (flag) {
197 230 default:
198 231 return (set_errno(EINVAL));
199 232
200 233 case GETCONTEXT:
201 234 schedctl_finish_sigblock(curthread);
202 235 savecontext(&uc, &curthread->t_hold);
203 236 if (uc.uc_flags & UC_SIGMASK)
204 237 SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
205 238 if (copyout(&uc, arg, sizeof (uc)))
206 239 return (set_errno(EFAULT));
207 240 return (0);
208 241
209 242 case SETCONTEXT:
210 243 ucp = arg;
211 244 if (ucp == NULL)
212 245 exit(CLD_EXITED, 0);
213 246 /*
214 247 * Don't copyin filler or floating state unless we need it.
215 248 * The ucontext_t struct and fields are specified in the ABI.
216 249 */
217 250 if (copyin(ucp, &uc, sizeof (ucontext_t) -
218 251 sizeof (uc.uc_filler) -
219 252 sizeof (uc.uc_mcontext.fpregs))) {
220 253 return (set_errno(EFAULT));
|
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
221 254 }
222 255 if (uc.uc_flags & UC_SIGMASK)
223 256 SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
224 257
225 258 if ((uc.uc_flags & UC_FPU) &&
226 259 copyin(&ucp->uc_mcontext.fpregs, &uc.uc_mcontext.fpregs,
227 260 sizeof (uc.uc_mcontext.fpregs))) {
228 261 return (set_errno(EFAULT));
229 262 }
230 263
264 + /*
265 + * If this is a branded process, copy in the brand-private
266 + * data:
267 + */
268 + if (PROC_IS_BRANDED(p) && copyin(&ucp->uc_brand_data,
269 + &uc.uc_brand_data, sizeof (uc.uc_brand_data)) != 0) {
270 + return (set_errno(EFAULT));
271 + }
272 +
231 273 restorecontext(&uc);
232 274
233 275 if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0))
234 276 (void) copyout(&uc.uc_stack, (stack_t *)lwp->lwp_ustack,
235 277 sizeof (uc.uc_stack));
236 278 return (0);
237 279
238 280 case GETUSTACK:
239 281 if (copyout(&lwp->lwp_ustack, arg, sizeof (caddr_t)))
240 282 return (set_errno(EFAULT));
241 283 return (0);
242 284
243 285 case SETUSTACK:
244 286 if (copyin(arg, &dummy_stk, sizeof (dummy_stk)))
245 287 return (set_errno(EFAULT));
246 288 lwp->lwp_ustack = (uintptr_t)arg;
247 289 return (0);
248 290 }
249 291 }
250 292
251 293 #ifdef _SYSCALL32_IMPL
252 294
253 295 /*
254 296 * Save user context for 32-bit processes.
255 297 */
256 298 void
257 299 savecontext32(ucontext32_t *ucp, const k_sigset_t *mask)
258 300 {
259 301 proc_t *p = ttoproc(curthread);
260 302 klwp_t *lwp = ttolwp(curthread);
261 303 struct regs *rp = lwptoregs(lwp);
262 304
263 305 bzero(&ucp->uc_mcontext.fpregs, sizeof (ucontext32_t) -
264 306 offsetof(ucontext32_t, uc_mcontext.fpregs));
265 307
266 308 ucp->uc_flags = UC_ALL;
267 309 ucp->uc_link = (caddr32_t)lwp->lwp_oldcontext;
268 310
269 311 if (lwp->lwp_ustack == NULL ||
270 312 copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
271 313 sizeof (ucp->uc_stack)) != 0 ||
272 314 ucp->uc_stack.ss_size == 0) {
273 315
274 316 if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
275 317 ucp->uc_stack.ss_sp =
276 318 (caddr32_t)(uintptr_t)lwp->lwp_sigaltstack.ss_sp;
277 319 ucp->uc_stack.ss_size =
278 320 (size32_t)lwp->lwp_sigaltstack.ss_size;
279 321 ucp->uc_stack.ss_flags = SS_ONSTACK;
280 322 } else {
281 323 ucp->uc_stack.ss_sp = (caddr32_t)(uintptr_t)
282 324 (p->p_usrstack - p->p_stksize);
283 325 ucp->uc_stack.ss_size = (size32_t)p->p_stksize;
284 326 ucp->uc_stack.ss_flags = 0;
285 327 }
286 328 }
287 329
288 330 /*
289 331 * If either the trace flag or REQUEST_STEP is set, arrange
290 332 * for single-stepping and turn off the trace flag.
291 333 */
292 334 if ((rp->r_ps & PS_T) || (lwp->lwp_pcb.pcb_flags & REQUEST_STEP)) {
293 335 /*
294 336 * Clear PS_T so that saved user context won't have trace
295 337 * flag set.
296 338 */
297 339 rp->r_ps &= ~PS_T;
298 340
299 341 if (!(lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP)) {
300 342 lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
301 343 /*
302 344 * See comments in savecontext().
303 345 */
|
↓ open down ↓ |
63 lines elided |
↑ open up ↑ |
304 346 aston(curthread);
305 347 }
306 348 }
307 349
308 350 getgregs32(lwp, ucp->uc_mcontext.gregs);
309 351 if (lwp->lwp_pcb.pcb_fpu.fpu_flags & FPU_EN)
310 352 getfpregs32(lwp, &ucp->uc_mcontext.fpregs);
311 353 else
312 354 ucp->uc_flags &= ~UC_FPU;
313 355
314 - sigktou(mask, &ucp->uc_sigmask);
356 + if (mask != NULL) {
357 + /*
358 + * Save signal mask.
359 + */
360 + sigktou(mask, &ucp->uc_sigmask);
361 + } else {
362 + ucp->uc_flags &= ~UC_SIGMASK;
363 + bzero(&ucp->uc_sigmask, sizeof (ucp->uc_sigmask));
364 + }
365 +
366 + if (PROC_IS_BRANDED(p) && BROP(p)->b_savecontext32 != NULL) {
367 + /*
368 + * Allow the brand the chance to modify the context we
369 + * saved:
370 + */
371 + BROP(p)->b_savecontext32(ucp);
372 + }
315 373 }
316 374
317 375 int
318 376 getsetcontext32(int flag, void *arg)
319 377 {
320 378 ucontext32_t uc;
321 379 ucontext_t ucnat;
322 380 ucontext32_t *ucp;
323 381 klwp_t *lwp = ttolwp(curthread);
324 382 caddr32_t ustack32;
325 383 stack32_t dummy_stk32;
384 + proc_t *p = lwptoproc(lwp);
326 385
327 386 switch (flag) {
328 387 default:
329 388 return (set_errno(EINVAL));
330 389
331 390 case GETCONTEXT:
332 391 schedctl_finish_sigblock(curthread);
333 392 savecontext32(&uc, &curthread->t_hold);
334 393 if (uc.uc_flags & UC_SIGMASK)
335 394 SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
336 395 if (copyout(&uc, arg, sizeof (uc)))
337 396 return (set_errno(EFAULT));
338 397 return (0);
339 398
340 399 case SETCONTEXT:
341 400 ucp = arg;
342 401 if (ucp == NULL)
343 402 exit(CLD_EXITED, 0);
344 403 if (copyin(ucp, &uc, sizeof (uc) -
345 404 sizeof (uc.uc_filler) -
|
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
346 405 sizeof (uc.uc_mcontext.fpregs))) {
347 406 return (set_errno(EFAULT));
348 407 }
349 408 if (uc.uc_flags & UC_SIGMASK)
350 409 SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
351 410 if ((uc.uc_flags & UC_FPU) &&
352 411 copyin(&ucp->uc_mcontext.fpregs, &uc.uc_mcontext.fpregs,
353 412 sizeof (uc.uc_mcontext.fpregs))) {
354 413 return (set_errno(EFAULT));
355 414 }
415 +
416 + /*
417 + * If this is a branded process, copy in the brand-private
418 + * data:
419 + */
420 + if (PROC_IS_BRANDED(p) && copyin(&ucp->uc_brand_data,
421 + &uc.uc_brand_data, sizeof (uc.uc_brand_data)) != 0) {
422 + return (set_errno(EFAULT));
423 + }
356 424
357 425 ucontext_32ton(&uc, &ucnat);
358 426 restorecontext(&ucnat);
359 427
360 428 if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0))
361 429 (void) copyout(&uc.uc_stack,
362 430 (stack32_t *)lwp->lwp_ustack, sizeof (uc.uc_stack));
363 431 return (0);
364 432
365 433 case GETUSTACK:
366 434 ustack32 = (caddr32_t)lwp->lwp_ustack;
367 435 if (copyout(&ustack32, arg, sizeof (ustack32)))
368 436 return (set_errno(EFAULT));
369 437 return (0);
370 438
371 439 case SETUSTACK:
372 440 if (copyin(arg, &dummy_stk32, sizeof (dummy_stk32)))
373 441 return (set_errno(EFAULT));
374 442 lwp->lwp_ustack = (uintptr_t)arg;
375 443 return (0);
376 444 }
377 445 }
378 446
379 447 #endif /* _SYSCALL32_IMPL */
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX