Print this page
manifest
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/intel/syscall/getcontext.c
+++ new/usr/src/uts/intel/syscall/getcontext.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
|
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
23 23 * Copyright 2015 Joyent, Inc.
24 24 */
25 25 /*
26 26 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
27 27 * Use is subject to license terms.
28 28 */
29 29
30 30 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
31 31 /* All Rights Reserved */
32 32
33 +/*
34 + * Copyright 2023 Oxide Computer Company
35 + */
36 +
33 37 #include <sys/param.h>
34 38 #include <sys/types.h>
35 39 #include <sys/vmparam.h>
36 40 #include <sys/systm.h>
37 41 #include <sys/signal.h>
38 42 #include <sys/stack.h>
39 43 #include <sys/regset.h>
40 44 #include <sys/privregs.h>
41 45 #include <sys/frame.h>
42 46 #include <sys/proc.h>
43 47 #include <sys/brand.h>
44 48 #include <sys/psw.h>
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
45 49 #include <sys/ucontext.h>
46 50 #include <sys/asm_linkage.h>
47 51 #include <sys/errno.h>
48 52 #include <sys/archsystm.h>
49 53 #include <sys/schedctl.h>
50 54 #include <sys/debug.h>
51 55 #include <sys/sysmacros.h>
52 56 #include <sys/sdt.h>
53 57
54 58 /*
59 + * This is a wrapper around copyout_noerr that returns a guaranteed error code.
60 + * Because we're using copyout_noerr(), we need to bound the time we're under an
61 + * on_fault/no_fault and attempt to do so only while we're actually copying data
62 + * out. The main reason for this is because we're being called back from the
63 + * FPU, which is being held with a kpreempt_disable() and related, we can't use
64 + * a larger on_fault()/no_fault() as that would both hide legitimate errors we
65 + * make, masquerading as user issues, and it gets trickier to reason about the
66 + * correct restoration of our state.
67 + */
68 +static int
69 +savecontext_copyout(const void *kaddr, void *uaddr, size_t size)
70 +{
71 + label_t ljb;
72 + if (!on_fault(&ljb)) {
73 + copyout_noerr(kaddr, uaddr, size);
74 + no_fault();
75 + return (0);
76 + } else {
77 + no_fault();
78 + return (EFAULT);
79 + }
80 +}
81 +
82 +/*
55 83 * Save user context.
84 + *
85 + * Generally speaking ucp is a pointer to kernel memory. In the traditional
86 + * version of this (when flags is 0), then we just write and fill out all of the
87 + * ucontext_t without any care for what was there ahead of this. However, when
88 + * we extended the state to push additional data when user pointers in the
89 + * ucontext_t are valid (currently only uc_xsave), then we will copy out that
90 + * extended state to the user pointer.
91 + *
92 + * We allow the copying to happen in two different ways mostly because this is
93 + * also used in the signal handling context where we must be much more careful
94 + * about how to copy out data.
56 95 */
57 -void
58 -savecontext(ucontext_t *ucp, const k_sigset_t *mask)
96 +
97 +int
98 +savecontext(ucontext_t *ucp, const k_sigset_t *mask, savecontext_flags_t flags)
59 99 {
60 100 proc_t *p = ttoproc(curthread);
61 101 klwp_t *lwp = ttolwp(curthread);
62 102 struct regs *rp = lwptoregs(lwp);
103 + boolean_t need_xsave = B_FALSE;
104 + boolean_t fpu_en;
105 + long user_xsave = 0;
106 + int ret;
63 107
108 + VERIFY0(flags & ~(SAVECTXT_F_EXTD | SAVECTXT_F_ONFAULT));
109 +
64 110 /*
65 111 * We unconditionally assign to every field through the end
66 112 * of the gregs, but we need to bzero() everything -after- that
67 113 * to avoid having any kernel stack garbage escape to userland.
114 + *
115 + * If we have been asked to save extended state, then we must make sure
116 + * that we don't clobber that value. We must also determine if the
117 + * processor has xsave state. If it does not, then we just simply honor
118 + * the pointer, but do not write anything out and do not set the flag.
68 119 */
120 + if ((flags & SAVECTXT_F_EXTD) != 0) {
121 + user_xsave = ucp->uc_xsave;
122 + if (fpu_xsave_enabled() && user_xsave != 0) {
123 + need_xsave = B_TRUE;
124 + }
125 + } else {
126 + /*
127 + * The only other flag that we have right now is about modifying
128 + * the copyout behavior when we're copying out extended
129 + * information. If it's not here, we should not do anything.
130 + */
131 + VERIFY0(flags);
132 + }
69 133 bzero(&ucp->uc_mcontext.fpregs, sizeof (ucontext_t) -
70 134 offsetof(ucontext_t, uc_mcontext.fpregs));
135 + ucp->uc_xsave = user_xsave;
71 136
72 137 ucp->uc_flags = UC_ALL;
73 138 ucp->uc_link = (struct ucontext *)lwp->lwp_oldcontext;
74 139
75 140 /*
76 141 * Try to copyin() the ustack if one is registered. If the stack
77 142 * has zero size, this indicates that stack bounds checking has
78 143 * been disabled for this LWP. If stack bounds checking is disabled
79 144 * or the copyin() fails, we fall back to the legacy behavior.
80 145 */
81 146 if (lwp->lwp_ustack == (uintptr_t)NULL ||
82 147 copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
83 148 sizeof (ucp->uc_stack)) != 0 ||
84 149 ucp->uc_stack.ss_size == 0) {
85 150
86 151 if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
87 152 ucp->uc_stack = lwp->lwp_sigaltstack;
88 153 } else {
89 154 ucp->uc_stack.ss_sp = p->p_usrstack - p->p_stksize;
90 155 ucp->uc_stack.ss_size = p->p_stksize;
91 156 ucp->uc_stack.ss_flags = 0;
92 157 }
93 158 }
94 159
95 160 /*
96 161 * If either the trace flag or REQUEST_STEP is set,
97 162 * arrange for single-stepping and turn off the trace flag.
98 163 */
99 164 if ((rp->r_ps & PS_T) || (lwp->lwp_pcb.pcb_flags & REQUEST_STEP)) {
100 165 /*
101 166 * Clear PS_T so that saved user context won't have trace
102 167 * flag set.
103 168 */
104 169 rp->r_ps &= ~PS_T;
105 170
106 171 if (!(lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP)) {
|
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
107 172 lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
108 173 /*
109 174 * trap() always checks DEBUG_PENDING before
110 175 * checking for any pending signal. This at times
111 176 * can potentially lead to DEBUG_PENDING not being
112 177 * honoured. (for eg: the lwp is stopped by
113 178 * stop_on_fault() called from trap(), after being
114 179 * awakened it might see a pending signal and call
115 180 * savecontext(), however on the way back to userland
116 181 * there is no place it can be detected). Hence in
117 - * anticipation of such occassions, set AST flag for
182 + * anticipation of such occasions, set AST flag for
118 183 * the thread which will make the thread take an
119 184 * excursion through trap() where it will be handled
120 185 * appropriately.
121 186 */
122 187 aston(curthread);
123 188 }
124 189 }
125 190
126 191 getgregs(lwp, ucp->uc_mcontext.gregs);
127 - if (lwp->lwp_pcb.pcb_fpu.fpu_flags & FPU_EN)
192 + fpu_en = (lwp->lwp_pcb.pcb_fpu.fpu_flags & FPU_EN) != 0;
193 + if (fpu_en)
128 194 getfpregs(lwp, &ucp->uc_mcontext.fpregs);
129 195 else
130 196 ucp->uc_flags &= ~UC_FPU;
131 197
132 198 if (mask != NULL) {
133 199 /*
134 200 * Save signal mask.
135 201 */
136 202 sigktou(mask, &ucp->uc_sigmask);
137 203 } else {
138 204 ucp->uc_flags &= ~UC_SIGMASK;
139 205 bzero(&ucp->uc_sigmask, sizeof (ucp->uc_sigmask));
140 206 }
141 207
208 + /*
209 + * Determine if we need to get the reset of the xsave context out here.
210 + * If the thread doesn't actually have the FPU enabled, then we don't
211 + * actually need to do this. We also don't have to if it wasn't
212 + * requested.
213 + */
214 + if (!need_xsave || !fpu_en) {
215 + return (0);
216 + }
217 +
218 + ucp->uc_flags |= UC_XSAVE;
219 +
220 + /*
221 + * While you might be asking why and contemplating despair, just know
222 + * that some things need to just be done in the face of signal (half the
223 + * reason this function exists). Basically when in signal context we
224 + * can't trigger watch points. This means we need to tell the FPU copy
225 + * logic to actually use the on_fault/no_fault and the non-error form of
226 + * copyout (which still checks if it's a user address at least).
227 + */
228 + if ((flags & SAVECTXT_F_ONFAULT) != 0) {
229 + ret = fpu_signal_copyout(lwp, ucp->uc_xsave,
230 + savecontext_copyout);
231 + } else {
232 + ret = fpu_signal_copyout(lwp, ucp->uc_xsave, copyout);
233 + }
234 +
142 235 if (PROC_IS_BRANDED(p) && BROP(p)->b_savecontext != NULL) {
143 236 /*
144 237 * Allow the brand the chance to modify the context we
145 238 * saved:
146 239 */
240 + /* XXX KEBE SAYS FIX ME! */
147 241 BROP(p)->b_savecontext(ucp);
148 242 }
243 +
244 + return (ret);
149 245 }
150 246
151 247 /*
152 248 * Restore user context.
153 249 */
154 250 void
155 251 restorecontext(ucontext_t *ucp)
156 252 {
157 253 kthread_t *t = curthread;
158 254 klwp_t *lwp = ttolwp(t);
159 255 proc_t *p = lwptoproc(lwp);
160 256
161 257 if (PROC_IS_BRANDED(p) && BROP(p)->b_restorecontext != NULL) {
162 258 /*
163 259 * Allow the brand the chance to modify the context before
164 260 * we restore it:
165 261 */
166 262 BROP(p)->b_restorecontext(ucp);
167 263 }
168 264
169 265 DTRACE_PROBE3(oldcontext__set, klwp_t *, lwp,
170 266 uintptr_t, lwp->lwp_oldcontext,
171 267 uintptr_t, (uintptr_t)ucp->uc_link);
172 268 lwp->lwp_oldcontext = (uintptr_t)ucp->uc_link;
173 269
174 270 if (ucp->uc_flags & UC_STACK) {
175 271 if (ucp->uc_stack.ss_flags == SS_ONSTACK)
176 272 lwp->lwp_sigaltstack = ucp->uc_stack;
177 273 else
178 274 lwp->lwp_sigaltstack.ss_flags &= ~SS_ONSTACK;
179 275 }
180 276
181 277 if (ucp->uc_flags & UC_CPU) {
182 278 /*
183 279 * If the trace flag is set, mark the lwp to take a
184 280 * single-step trap on return to user level (below).
185 281 * The x86 lcall interface and sysenter has already done this,
|
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
186 282 * and turned off the flag, but amd64 syscall interface has not.
187 283 */
188 284 if (lwptoregs(lwp)->r_ps & PS_T)
189 285 lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
190 286 setgregs(lwp, ucp->uc_mcontext.gregs);
191 287 lwp->lwp_eosys = JUSTRETURN;
192 288 t->t_post_sys = 1;
193 289 aston(curthread);
194 290 }
195 291
196 - if (ucp->uc_flags & UC_FPU)
292 + /*
293 + * The logic to copy in the ucontex_t takes care of combining the UC_FPU
294 + * and UC_XSAVE, so at this point only one of them should be set, if
295 + * any.
296 + */
297 + if (ucp->uc_flags & UC_XSAVE) {
298 + ASSERT0(ucp->uc_flags & UC_FPU);
299 + ASSERT3U((uintptr_t)ucp->uc_xsave, >=, _kernelbase);
300 + fpu_set_xsave(lwp, (const void *)ucp->uc_xsave);
301 + } else if (ucp->uc_flags & UC_FPU) {
197 302 setfpregs(lwp, &ucp->uc_mcontext.fpregs);
303 + }
198 304
199 305 if (ucp->uc_flags & UC_SIGMASK) {
200 306 /*
201 307 * We don't need to acquire p->p_lock here;
202 308 * we are manipulating thread-private data.
203 309 */
204 310 schedctl_finish_sigblock(t);
205 311 sigutok(&ucp->uc_sigmask, &t->t_hold);
206 312 if (sigcheck(ttoproc(t), t))
207 313 t->t_sig_check = 1;
208 314 }
209 315 }
210 316
211 317
212 318 int
213 319 getsetcontext(int flag, void *arg)
214 320 {
215 321 ucontext_t uc;
216 322 ucontext_t *ucp;
217 323 klwp_t *lwp = ttolwp(curthread);
324 + void *fpu = NULL;
218 325 stack_t dummy_stk;
219 326 proc_t *p = lwptoproc(lwp);
327 + int ret;
220 328
221 329 /*
222 330 * In future releases, when the ucontext structure grows,
223 331 * getcontext should be modified to only return the fields
224 332 * specified in the uc_flags. That way, the structure can grow
225 333 * and still be binary compatible will all .o's which will only
226 334 * have old fields defined in uc_flags
227 335 */
228 336
229 337 switch (flag) {
230 338 default:
231 339 return (set_errno(EINVAL));
232 340
233 341 case GETCONTEXT:
234 342 schedctl_finish_sigblock(curthread);
235 - savecontext(&uc, &curthread->t_hold);
343 + ret = savecontext(&uc, &curthread->t_hold, SAVECTXT_F_NONE);
344 + if (ret != 0)
345 + return (set_errno(ret));
236 346 if (uc.uc_flags & UC_SIGMASK)
237 347 SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
238 348 if (copyout(&uc, arg, sizeof (uc)))
239 349 return (set_errno(EFAULT));
240 350 return (0);
241 351
352 + /*
353 + * In the case of GETCONTEXT_EXTD, we've theoretically been given all
354 + * the required pointers of the appropriate length by libc in the
355 + * ucontext_t. We must first copyin the offsets that we care about to
356 + * seed the known extensions. Right now that is just the uc_xsave
357 + * member. As we are setting uc_flags, we only look at the members we
358 + * need to care about.
359 + *
360 + * The main reason that we have a different entry point is that we don't
361 + * want to assume that callers have always properly zeroed their
362 + * ucontext_t ahead of calling into libc. In fact, it often is just
363 + * declared on the stack so we can't assume that at all. Instead,
364 + * getcontext_extd does require that.
365 + */
366 + case GETCONTEXT_EXTD:
367 + schedctl_finish_sigblock(curthread);
368 + ucp = arg;
369 + if (copyin(&ucp->uc_xsave, &uc.uc_xsave,
370 + sizeof (uc.uc_xsave)) != 0) {
371 + return (set_errno(EFAULT));
372 + }
373 + ret = savecontext(&uc, &curthread->t_hold, SAVECTXT_F_EXTD);
374 + if (ret != 0)
375 + return (set_errno(ret));
376 + if (uc.uc_flags & UC_SIGMASK)
377 + SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
378 + if (copyout(&uc, arg, sizeof (uc)))
379 + return (set_errno(EFAULT));
380 + return (0);
381 +
382 +
242 383 case SETCONTEXT:
243 384 ucp = arg;
244 385 if (ucp == NULL)
245 386 exit(CLD_EXITED, 0);
246 387 /*
247 388 * Don't copyin filler or floating state unless we need it.
248 389 * The ucontext_t struct and fields are specified in the ABI.
249 390 */
250 - if (copyin(ucp, &uc, sizeof (ucontext_t) -
251 - sizeof (uc.uc_filler) -
391 + if (copyin(ucp, &uc, offsetof(ucontext_t, uc_brand_data) -
252 392 sizeof (uc.uc_mcontext.fpregs))) {
253 393 return (set_errno(EFAULT));
254 394 }
255 395 if (uc.uc_flags & UC_SIGMASK)
256 396 SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
257 397
258 398 if ((uc.uc_flags & UC_FPU) &&
259 399 copyin(&ucp->uc_mcontext.fpregs, &uc.uc_mcontext.fpregs,
260 400 sizeof (uc.uc_mcontext.fpregs))) {
261 401 return (set_errno(EFAULT));
262 402 }
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
263 403
264 404 /*
265 405 * If this is a branded process, copy in the brand-private
266 406 * data:
267 407 */
268 408 if (PROC_IS_BRANDED(p) && copyin(&ucp->uc_brand_data,
269 409 &uc.uc_brand_data, sizeof (uc.uc_brand_data)) != 0) {
270 410 return (set_errno(EFAULT));
271 411 }
272 412
413 + uc.uc_xsave = 0;
414 + if ((uc.uc_flags & UC_XSAVE) != 0) {
415 + int ret;
416 +
417 + if (copyin(&ucp->uc_xsave, &uc.uc_xsave,
418 + sizeof (uc.uc_xsave)) != 0) {
419 + return (set_errno(EFAULT));
420 + }
421 +
422 + ret = fpu_signal_copyin(lwp, &uc);
423 + if (ret != 0) {
424 + return (set_errno(ret));
425 + }
426 + }
427 +
273 428 restorecontext(&uc);
274 429
275 430 if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0))
276 431 (void) copyout(&uc.uc_stack, (stack_t *)lwp->lwp_ustack,
277 432 sizeof (uc.uc_stack));
278 433 return (0);
279 434
280 435 case GETUSTACK:
281 436 if (copyout(&lwp->lwp_ustack, arg, sizeof (caddr_t)))
282 437 return (set_errno(EFAULT));
283 438 return (0);
284 439
285 440 case SETUSTACK:
286 441 if (copyin(arg, &dummy_stk, sizeof (dummy_stk)))
287 442 return (set_errno(EFAULT));
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
288 443 lwp->lwp_ustack = (uintptr_t)arg;
289 444 return (0);
290 445 }
291 446 }
292 447
293 448 #ifdef _SYSCALL32_IMPL
294 449
295 450 /*
296 451 * Save user context for 32-bit processes.
297 452 */
298 -void
299 -savecontext32(ucontext32_t *ucp, const k_sigset_t *mask)
453 +int
454 +savecontext32(ucontext32_t *ucp, const k_sigset_t *mask,
455 + savecontext_flags_t flags)
300 456 {
301 457 proc_t *p = ttoproc(curthread);
302 458 klwp_t *lwp = ttolwp(curthread);
303 459 struct regs *rp = lwptoregs(lwp);
460 + boolean_t need_xsave = B_FALSE;
461 + boolean_t fpu_en;
462 + int32_t user_xsave = 0;
463 + uintptr_t uaddr;
464 + int ret;
304 465
466 + /*
467 + * See savecontext for an explanation of this.
468 + */
469 + if ((flags & SAVECTXT_F_EXTD) != 0) {
470 + user_xsave = ucp->uc_xsave;
471 + if (fpu_xsave_enabled() && user_xsave != 0) {
472 + need_xsave = B_TRUE;
473 + }
474 + } else {
475 + VERIFY0(flags);
476 + }
305 477 bzero(&ucp->uc_mcontext.fpregs, sizeof (ucontext32_t) -
306 478 offsetof(ucontext32_t, uc_mcontext.fpregs));
479 + ucp->uc_xsave = user_xsave;
307 480
308 481 ucp->uc_flags = UC_ALL;
309 482 ucp->uc_link = (caddr32_t)lwp->lwp_oldcontext;
310 483
311 484 if (lwp->lwp_ustack == (uintptr_t)NULL ||
312 485 copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
313 486 sizeof (ucp->uc_stack)) != 0 ||
314 487 ucp->uc_stack.ss_size == 0) {
315 488
316 489 if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
317 490 ucp->uc_stack.ss_sp =
318 491 (caddr32_t)(uintptr_t)lwp->lwp_sigaltstack.ss_sp;
319 492 ucp->uc_stack.ss_size =
320 493 (size32_t)lwp->lwp_sigaltstack.ss_size;
321 494 ucp->uc_stack.ss_flags = SS_ONSTACK;
322 495 } else {
323 496 ucp->uc_stack.ss_sp = (caddr32_t)(uintptr_t)
324 497 (p->p_usrstack - p->p_stksize);
325 498 ucp->uc_stack.ss_size = (size32_t)p->p_stksize;
326 499 ucp->uc_stack.ss_flags = 0;
327 500 }
328 501 }
329 502
330 503 /*
331 504 * If either the trace flag or REQUEST_STEP is set, arrange
332 505 * for single-stepping and turn off the trace flag.
333 506 */
334 507 if ((rp->r_ps & PS_T) || (lwp->lwp_pcb.pcb_flags & REQUEST_STEP)) {
335 508 /*
336 509 * Clear PS_T so that saved user context won't have trace
337 510 * flag set.
338 511 */
339 512 rp->r_ps &= ~PS_T;
340 513
|
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
341 514 if (!(lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP)) {
342 515 lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
343 516 /*
344 517 * See comments in savecontext().
345 518 */
346 519 aston(curthread);
347 520 }
348 521 }
349 522
350 523 getgregs32(lwp, ucp->uc_mcontext.gregs);
351 - if (lwp->lwp_pcb.pcb_fpu.fpu_flags & FPU_EN)
524 + fpu_en = (lwp->lwp_pcb.pcb_fpu.fpu_flags & FPU_EN) != 0;
525 + if (fpu_en)
352 526 getfpregs32(lwp, &ucp->uc_mcontext.fpregs);
353 527 else
354 528 ucp->uc_flags &= ~UC_FPU;
355 529
356 530 if (mask != NULL) {
357 531 /*
358 532 * Save signal mask.
359 533 */
360 534 sigktou(mask, &ucp->uc_sigmask);
361 535 } else {
362 536 ucp->uc_flags &= ~UC_SIGMASK;
363 537 bzero(&ucp->uc_sigmask, sizeof (ucp->uc_sigmask));
364 538 }
365 539
540 + if (!need_xsave || !fpu_en) {
541 + return (0);
542 + }
543 +
544 + ucp->uc_flags |= UC_XSAVE;
545 +
546 + /*
547 + * Due to not wanting to change or break programs, the filler in the
548 + * ucontext_t was always declared as a long, which is signed. Because
549 + * this is the 32-bit version, this is an int32_t. We cannot directly go
550 + * to a uintptr_t otherwise we might get sign extension, so we first
551 + * have to go through a uint32_t and then a uintptr_t. Otherwise, see
552 + * savecontext().
553 + */
554 + uaddr = (uintptr_t)(uint32_t)ucp->uc_xsave;
555 + if ((flags & SAVECTXT_F_ONFAULT) != 0) {
556 + ret = fpu_signal_copyout(lwp, uaddr, savecontext_copyout);
557 + } else {
558 + ret = fpu_signal_copyout(lwp, uaddr, copyout);
559 + }
560 +
561 +
366 562 if (PROC_IS_BRANDED(p) && BROP(p)->b_savecontext32 != NULL) {
367 563 /*
368 564 * Allow the brand the chance to modify the context we
369 565 * saved:
370 566 */
567 + /* XXX KEBE SAYS FIX ME */
371 568 BROP(p)->b_savecontext32(ucp);
372 569 }
570 +
571 + return (ret);
373 572 }
374 573
375 574 int
376 575 getsetcontext32(int flag, void *arg)
377 576 {
378 577 ucontext32_t uc;
379 578 ucontext_t ucnat;
380 579 ucontext32_t *ucp;
381 580 klwp_t *lwp = ttolwp(curthread);
382 581 caddr32_t ustack32;
383 582 stack32_t dummy_stk32;
384 583 proc_t *p = lwptoproc(lwp);
584 + int ret;
385 585
386 586 switch (flag) {
387 587 default:
388 588 return (set_errno(EINVAL));
389 589
390 590 case GETCONTEXT:
391 591 schedctl_finish_sigblock(curthread);
392 - savecontext32(&uc, &curthread->t_hold);
592 + ret = savecontext32(&uc, &curthread->t_hold, SAVECTXT_F_NONE);
593 + if (ret != 0)
594 + return (set_errno(ret));
393 595 if (uc.uc_flags & UC_SIGMASK)
394 596 SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
395 597 if (copyout(&uc, arg, sizeof (uc)))
396 598 return (set_errno(EFAULT));
397 599 return (0);
398 600
601 + /*
602 + * See getsetcontext() for an explanation of what is going on here.
603 + */
604 + case GETCONTEXT_EXTD:
605 + schedctl_finish_sigblock(curthread);
606 + ucp = arg;
607 + if (copyin(&ucp->uc_xsave, &uc.uc_xsave,
608 + sizeof (uc.uc_xsave)) != 0) {
609 + return (set_errno(EFAULT));
610 + }
611 + ret = savecontext32(&uc, &curthread->t_hold, SAVECTXT_F_EXTD);
612 + if (ret != 0)
613 + return (set_errno(ret));
614 + if (uc.uc_flags & UC_SIGMASK)
615 + SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
616 + if (copyout(&uc, arg, sizeof (uc)))
617 + return (set_errno(EFAULT));
618 + return (0);
619 +
399 620 case SETCONTEXT:
400 621 ucp = arg;
401 622 if (ucp == NULL)
402 623 exit(CLD_EXITED, 0);
403 - if (copyin(ucp, &uc, sizeof (uc) -
404 - sizeof (uc.uc_filler) -
624 + if (copyin(ucp, &uc, offsetof(ucontext32_t, uc_brand_data) -
405 625 sizeof (uc.uc_mcontext.fpregs))) {
406 626 return (set_errno(EFAULT));
407 627 }
408 628 if (uc.uc_flags & UC_SIGMASK)
409 629 SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
410 630 if ((uc.uc_flags & UC_FPU) &&
411 631 copyin(&ucp->uc_mcontext.fpregs, &uc.uc_mcontext.fpregs,
412 632 sizeof (uc.uc_mcontext.fpregs))) {
413 633 return (set_errno(EFAULT));
414 634 }
415 635
416 636 /*
417 637 * If this is a branded process, copy in the brand-private
418 638 * data:
419 639 */
420 640 if (PROC_IS_BRANDED(p) && copyin(&ucp->uc_brand_data,
421 641 &uc.uc_brand_data, sizeof (uc.uc_brand_data)) != 0) {
422 642 return (set_errno(EFAULT));
423 643 }
424 644
645 + uc.uc_xsave = 0;
646 + if ((uc.uc_flags & UC_XSAVE) != 0 &&
647 + copyin(&ucp->uc_xsave, &uc.uc_xsave,
648 + sizeof (uc.uc_xsave)) != 0) {
649 + return (set_errno(EFAULT));
650 + }
651 +
425 652 ucontext_32ton(&uc, &ucnat);
653 +
654 + if ((ucnat.uc_flags & UC_XSAVE) != 0) {
655 + int ret = fpu_signal_copyin(lwp, &ucnat);
656 + if (ret != 0) {
657 + return (set_errno(ret));
658 + }
659 + }
660 +
426 661 restorecontext(&ucnat);
427 662
428 663 if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0))
429 664 (void) copyout(&uc.uc_stack,
430 665 (stack32_t *)lwp->lwp_ustack, sizeof (uc.uc_stack));
431 666 return (0);
432 667
433 668 case GETUSTACK:
434 669 ustack32 = (caddr32_t)lwp->lwp_ustack;
435 670 if (copyout(&ustack32, arg, sizeof (ustack32)))
436 671 return (set_errno(EFAULT));
437 672 return (0);
438 673
439 674 case SETUSTACK:
440 675 if (copyin(arg, &dummy_stk32, sizeof (dummy_stk32)))
441 676 return (set_errno(EFAULT));
442 677 lwp->lwp_ustack = (uintptr_t)arg;
443 678 return (0);
444 679 }
445 680 }
446 681
447 682 #endif /* _SYSCALL32_IMPL */
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX