Print this page
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/intel/ia32/syscall/getcontext.c
+++ new/usr/src/uts/intel/ia32/syscall/getcontext.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2015 Joyent, Inc.
24 24 */
25 25 /*
26 26 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
27 27 * Use is subject to license terms.
28 28 */
29 29
30 30 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
31 31 /* All Rights Reserved */
32 32
33 33 #include <sys/param.h>
34 34 #include <sys/types.h>
35 35 #include <sys/vmparam.h>
36 36 #include <sys/systm.h>
37 37 #include <sys/signal.h>
38 38 #include <sys/stack.h>
39 39 #include <sys/regset.h>
40 40 #include <sys/privregs.h>
41 41 #include <sys/frame.h>
42 42 #include <sys/proc.h>
43 43 #include <sys/brand.h>
44 44 #include <sys/psw.h>
45 45 #include <sys/ucontext.h>
46 46 #include <sys/asm_linkage.h>
47 47 #include <sys/errno.h>
48 48 #include <sys/archsystm.h>
49 49 #include <sys/schedctl.h>
50 50 #include <sys/debug.h>
51 51 #include <sys/sysmacros.h>
52 52 #include <sys/sdt.h>
53 53
54 54 /*
55 55 * Save user context.
56 56 */
57 57 void
58 58 savecontext(ucontext_t *ucp, const k_sigset_t *mask)
59 59 {
60 60 proc_t *p = ttoproc(curthread);
61 61 klwp_t *lwp = ttolwp(curthread);
62 62 struct regs *rp = lwptoregs(lwp);
63 63
64 64 /*
65 65 * We unconditionally assign to every field through the end
66 66 * of the gregs, but we need to bzero() everything -after- that
67 67 * to avoid having any kernel stack garbage escape to userland.
68 68 */
69 69 bzero(&ucp->uc_mcontext.fpregs, sizeof (ucontext_t) -
70 70 offsetof(ucontext_t, uc_mcontext.fpregs));
71 71
72 72 ucp->uc_flags = UC_ALL;
73 73 ucp->uc_link = (struct ucontext *)lwp->lwp_oldcontext;
74 74
75 75 /*
76 76 * Try to copyin() the ustack if one is registered. If the stack
77 77 * has zero size, this indicates that stack bounds checking has
78 78 * been disabled for this LWP. If stack bounds checking is disabled
79 79 * or the copyin() fails, we fall back to the legacy behavior.
80 80 */
81 81 if (lwp->lwp_ustack == NULL ||
82 82 copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
83 83 sizeof (ucp->uc_stack)) != 0 ||
84 84 ucp->uc_stack.ss_size == 0) {
85 85
86 86 if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
87 87 ucp->uc_stack = lwp->lwp_sigaltstack;
88 88 } else {
89 89 ucp->uc_stack.ss_sp = p->p_usrstack - p->p_stksize;
90 90 ucp->uc_stack.ss_size = p->p_stksize;
91 91 ucp->uc_stack.ss_flags = 0;
92 92 }
93 93 }
94 94
95 95 /*
96 96 * If either the trace flag or REQUEST_STEP is set,
97 97 * arrange for single-stepping and turn off the trace flag.
98 98 */
99 99 if ((rp->r_ps & PS_T) || (lwp->lwp_pcb.pcb_flags & REQUEST_STEP)) {
100 100 /*
101 101 * Clear PS_T so that saved user context won't have trace
102 102 * flag set.
103 103 */
104 104 rp->r_ps &= ~PS_T;
105 105
106 106 if (!(lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP)) {
107 107 lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
108 108 /*
109 109 * trap() always checks DEBUG_PENDING before
110 110 * checking for any pending signal. This at times
111 111 * can potentially lead to DEBUG_PENDING not being
112 112 * honoured. (for eg: the lwp is stopped by
113 113 * stop_on_fault() called from trap(), after being
114 114 * awakened it might see a pending signal and call
115 115 * savecontext(), however on the way back to userland
116 116 * there is no place it can be detected). Hence in
117 117 * anticipation of such occassions, set AST flag for
118 118 * the thread which will make the thread take an
119 119 * excursion through trap() where it will be handled
120 120 * appropriately.
121 121 */
122 122 aston(curthread);
123 123 }
124 124 }
125 125
126 126 getgregs(lwp, ucp->uc_mcontext.gregs);
127 127 if (lwp->lwp_pcb.pcb_fpu.fpu_flags & FPU_EN)
128 128 getfpregs(lwp, &ucp->uc_mcontext.fpregs);
129 129 else
130 130 ucp->uc_flags &= ~UC_FPU;
131 131
132 132 if (mask != NULL) {
133 133 /*
134 134 * Save signal mask.
135 135 */
136 136 sigktou(mask, &ucp->uc_sigmask);
137 137 } else {
138 138 ucp->uc_flags &= ~UC_SIGMASK;
139 139 bzero(&ucp->uc_sigmask, sizeof (ucp->uc_sigmask));
140 140 }
141 141
142 142 if (PROC_IS_BRANDED(p) && BROP(p)->b_savecontext != NULL) {
143 143 /*
144 144 * Allow the brand the chance to modify the context we
145 145 * saved:
146 146 */
147 147 BROP(p)->b_savecontext(ucp);
148 148 }
149 149 }
150 150
151 151 /*
152 152 * Restore user context.
153 153 */
154 154 void
155 155 restorecontext(ucontext_t *ucp)
156 156 {
157 157 kthread_t *t = curthread;
158 158 klwp_t *lwp = ttolwp(t);
159 159 proc_t *p = lwptoproc(lwp);
160 160
161 161 if (PROC_IS_BRANDED(p) && BROP(p)->b_restorecontext != NULL) {
162 162 /*
163 163 * Allow the brand the chance to modify the context before
164 164 * we restore it:
165 165 */
166 166 BROP(p)->b_restorecontext(ucp);
167 167 }
168 168
169 169 DTRACE_PROBE3(oldcontext__set, klwp_t *, lwp,
170 170 uintptr_t, lwp->lwp_oldcontext,
171 171 uintptr_t, (uintptr_t)ucp->uc_link);
172 172 lwp->lwp_oldcontext = (uintptr_t)ucp->uc_link;
173 173
174 174 if (ucp->uc_flags & UC_STACK) {
175 175 if (ucp->uc_stack.ss_flags == SS_ONSTACK)
176 176 lwp->lwp_sigaltstack = ucp->uc_stack;
177 177 else
178 178 lwp->lwp_sigaltstack.ss_flags &= ~SS_ONSTACK;
179 179 }
180 180
181 181 if (ucp->uc_flags & UC_CPU) {
182 182 /*
183 183 * If the trace flag is set, mark the lwp to take a
184 184 * single-step trap on return to user level (below).
185 185 * The x86 lcall interface and sysenter has already done this,
186 186 * and turned off the flag, but amd64 syscall interface has not.
187 187 */
188 188 if (lwptoregs(lwp)->r_ps & PS_T)
189 189 lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
190 190 setgregs(lwp, ucp->uc_mcontext.gregs);
191 191 lwp->lwp_eosys = JUSTRETURN;
192 192 t->t_post_sys = 1;
193 193 aston(curthread);
194 194 }
195 195
196 196 if (ucp->uc_flags & UC_FPU)
197 197 setfpregs(lwp, &ucp->uc_mcontext.fpregs);
198 198
199 199 if (ucp->uc_flags & UC_SIGMASK) {
200 200 /*
201 201 * We don't need to acquire p->p_lock here;
202 202 * we are manipulating thread-private data.
203 203 */
204 204 schedctl_finish_sigblock(t);
205 205 sigutok(&ucp->uc_sigmask, &t->t_hold);
206 206 if (sigcheck(ttoproc(t), t))
207 207 t->t_sig_check = 1;
208 208 }
209 209 }
210 210
211 211
212 212 int
213 213 getsetcontext(int flag, void *arg)
214 214 {
215 215 ucontext_t uc;
216 216 ucontext_t *ucp;
217 217 klwp_t *lwp = ttolwp(curthread);
218 218 stack_t dummy_stk;
219 219 proc_t *p = lwptoproc(lwp);
220 220
221 221 /*
222 222 * In future releases, when the ucontext structure grows,
223 223 * getcontext should be modified to only return the fields
224 224 * specified in the uc_flags. That way, the structure can grow
225 225 * and still be binary compatible will all .o's which will only
226 226 * have old fields defined in uc_flags
227 227 */
228 228
229 229 switch (flag) {
230 230 default:
231 231 return (set_errno(EINVAL));
232 232
233 233 case GETCONTEXT:
234 234 schedctl_finish_sigblock(curthread);
235 235 savecontext(&uc, &curthread->t_hold);
236 236 if (uc.uc_flags & UC_SIGMASK)
237 237 SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
238 238 if (copyout(&uc, arg, sizeof (uc)))
239 239 return (set_errno(EFAULT));
240 240 return (0);
241 241
242 242 case SETCONTEXT:
243 243 ucp = arg;
244 244 if (ucp == NULL)
245 245 exit(CLD_EXITED, 0);
246 246 /*
247 247 * Don't copyin filler or floating state unless we need it.
248 248 * The ucontext_t struct and fields are specified in the ABI.
249 249 */
250 250 if (copyin(ucp, &uc, sizeof (ucontext_t) -
251 251 sizeof (uc.uc_filler) -
252 252 sizeof (uc.uc_mcontext.fpregs))) {
253 253 return (set_errno(EFAULT));
254 254 }
255 255 if (uc.uc_flags & UC_SIGMASK)
256 256 SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
257 257
258 258 if ((uc.uc_flags & UC_FPU) &&
259 259 copyin(&ucp->uc_mcontext.fpregs, &uc.uc_mcontext.fpregs,
260 260 sizeof (uc.uc_mcontext.fpregs))) {
261 261 return (set_errno(EFAULT));
262 262 }
263 263
264 264 /*
265 265 * If this is a branded process, copy in the brand-private
266 266 * data:
267 267 */
268 268 if (PROC_IS_BRANDED(p) && copyin(&ucp->uc_brand_data,
269 269 &uc.uc_brand_data, sizeof (uc.uc_brand_data)) != 0) {
270 270 return (set_errno(EFAULT));
271 271 }
272 272
273 273 restorecontext(&uc);
274 274
275 275 if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0))
276 276 (void) copyout(&uc.uc_stack, (stack_t *)lwp->lwp_ustack,
277 277 sizeof (uc.uc_stack));
278 278 return (0);
279 279
280 280 case GETUSTACK:
281 281 if (copyout(&lwp->lwp_ustack, arg, sizeof (caddr_t)))
282 282 return (set_errno(EFAULT));
283 283 return (0);
284 284
285 285 case SETUSTACK:
286 286 if (copyin(arg, &dummy_stk, sizeof (dummy_stk)))
287 287 return (set_errno(EFAULT));
288 288 lwp->lwp_ustack = (uintptr_t)arg;
289 289 return (0);
290 290 }
291 291 }
292 292
293 293 #ifdef _SYSCALL32_IMPL
294 294
295 295 /*
296 296 * Save user context for 32-bit processes.
297 297 */
298 298 void
299 299 savecontext32(ucontext32_t *ucp, const k_sigset_t *mask)
300 300 {
301 301 proc_t *p = ttoproc(curthread);
302 302 klwp_t *lwp = ttolwp(curthread);
303 303 struct regs *rp = lwptoregs(lwp);
304 304
305 305 bzero(&ucp->uc_mcontext.fpregs, sizeof (ucontext32_t) -
306 306 offsetof(ucontext32_t, uc_mcontext.fpregs));
307 307
308 308 ucp->uc_flags = UC_ALL;
309 309 ucp->uc_link = (caddr32_t)lwp->lwp_oldcontext;
310 310
311 311 if (lwp->lwp_ustack == NULL ||
312 312 copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
313 313 sizeof (ucp->uc_stack)) != 0 ||
314 314 ucp->uc_stack.ss_size == 0) {
315 315
316 316 if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
317 317 ucp->uc_stack.ss_sp =
318 318 (caddr32_t)(uintptr_t)lwp->lwp_sigaltstack.ss_sp;
319 319 ucp->uc_stack.ss_size =
320 320 (size32_t)lwp->lwp_sigaltstack.ss_size;
321 321 ucp->uc_stack.ss_flags = SS_ONSTACK;
322 322 } else {
323 323 ucp->uc_stack.ss_sp = (caddr32_t)(uintptr_t)
324 324 (p->p_usrstack - p->p_stksize);
325 325 ucp->uc_stack.ss_size = (size32_t)p->p_stksize;
326 326 ucp->uc_stack.ss_flags = 0;
327 327 }
328 328 }
329 329
330 330 /*
331 331 * If either the trace flag or REQUEST_STEP is set, arrange
332 332 * for single-stepping and turn off the trace flag.
333 333 */
334 334 if ((rp->r_ps & PS_T) || (lwp->lwp_pcb.pcb_flags & REQUEST_STEP)) {
335 335 /*
336 336 * Clear PS_T so that saved user context won't have trace
337 337 * flag set.
338 338 */
339 339 rp->r_ps &= ~PS_T;
340 340
341 341 if (!(lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP)) {
342 342 lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
343 343 /*
344 344 * See comments in savecontext().
345 345 */
346 346 aston(curthread);
347 347 }
348 348 }
349 349
350 350 getgregs32(lwp, ucp->uc_mcontext.gregs);
351 351 if (lwp->lwp_pcb.pcb_fpu.fpu_flags & FPU_EN)
352 352 getfpregs32(lwp, &ucp->uc_mcontext.fpregs);
353 353 else
354 354 ucp->uc_flags &= ~UC_FPU;
355 355
356 356 if (mask != NULL) {
357 357 /*
358 358 * Save signal mask.
359 359 */
360 360 sigktou(mask, &ucp->uc_sigmask);
361 361 } else {
362 362 ucp->uc_flags &= ~UC_SIGMASK;
363 363 bzero(&ucp->uc_sigmask, sizeof (ucp->uc_sigmask));
364 364 }
365 365
366 366 if (PROC_IS_BRANDED(p) && BROP(p)->b_savecontext32 != NULL) {
367 367 /*
368 368 * Allow the brand the chance to modify the context we
369 369 * saved:
370 370 */
371 371 BROP(p)->b_savecontext32(ucp);
372 372 }
373 373 }
374 374
375 375 int
376 376 getsetcontext32(int flag, void *arg)
377 377 {
378 378 ucontext32_t uc;
379 379 ucontext_t ucnat;
380 380 ucontext32_t *ucp;
381 381 klwp_t *lwp = ttolwp(curthread);
382 382 caddr32_t ustack32;
383 383 stack32_t dummy_stk32;
384 384 proc_t *p = lwptoproc(lwp);
385 385
386 386 switch (flag) {
387 387 default:
388 388 return (set_errno(EINVAL));
389 389
390 390 case GETCONTEXT:
391 391 schedctl_finish_sigblock(curthread);
392 392 savecontext32(&uc, &curthread->t_hold);
393 393 if (uc.uc_flags & UC_SIGMASK)
394 394 SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
395 395 if (copyout(&uc, arg, sizeof (uc)))
396 396 return (set_errno(EFAULT));
397 397 return (0);
398 398
399 399 case SETCONTEXT:
400 400 ucp = arg;
401 401 if (ucp == NULL)
402 402 exit(CLD_EXITED, 0);
403 403 if (copyin(ucp, &uc, sizeof (uc) -
404 404 sizeof (uc.uc_filler) -
405 405 sizeof (uc.uc_mcontext.fpregs))) {
406 406 return (set_errno(EFAULT));
407 407 }
408 408 if (uc.uc_flags & UC_SIGMASK)
409 409 SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
410 410 if ((uc.uc_flags & UC_FPU) &&
411 411 copyin(&ucp->uc_mcontext.fpregs, &uc.uc_mcontext.fpregs,
412 412 sizeof (uc.uc_mcontext.fpregs))) {
413 413 return (set_errno(EFAULT));
414 414 }
415 415
416 416 /*
417 417 * If this is a branded process, copy in the brand-private
418 418 * data:
419 419 */
420 420 if (PROC_IS_BRANDED(p) && copyin(&ucp->uc_brand_data,
421 421 &uc.uc_brand_data, sizeof (uc.uc_brand_data)) != 0) {
422 422 return (set_errno(EFAULT));
423 423 }
424 424
425 425 ucontext_32ton(&uc, &ucnat);
426 426 restorecontext(&ucnat);
427 427
428 428 if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0))
429 429 (void) copyout(&uc.uc_stack,
430 430 (stack32_t *)lwp->lwp_ustack, sizeof (uc.uc_stack));
431 431 return (0);
432 432
433 433 case GETUSTACK:
434 434 ustack32 = (caddr32_t)lwp->lwp_ustack;
435 435 if (copyout(&ustack32, arg, sizeof (ustack32)))
436 436 return (set_errno(EFAULT));
437 437 return (0);
438 438
439 439 case SETUSTACK:
440 440 if (copyin(arg, &dummy_stk32, sizeof (dummy_stk32)))
441 441 return (set_errno(EFAULT));
442 442 lwp->lwp_ustack = (uintptr_t)arg;
443 443 return (0);
444 444 }
445 445 }
446 446
447 447 #endif /* _SYSCALL32_IMPL */
|
↓ open down ↓ |
447 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX