Print this page
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/intel/syscall/getcontext.c
+++ new/usr/src/uts/intel/syscall/getcontext.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 + * Copyright 2015 Joyent, Inc.
24 + */
25 +/*
23 26 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 27 * Use is subject to license terms.
25 28 */
26 29
27 30 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 31 /* All Rights Reserved */
29 32
30 33 /*
31 34 * Copyright 2023 Oxide Computer Company
32 35 */
33 36
34 37 #include <sys/param.h>
35 38 #include <sys/types.h>
36 39 #include <sys/vmparam.h>
37 40 #include <sys/systm.h>
38 41 #include <sys/signal.h>
39 42 #include <sys/stack.h>
40 43 #include <sys/regset.h>
41 44 #include <sys/privregs.h>
42 45 #include <sys/frame.h>
|
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
43 46 #include <sys/proc.h>
44 47 #include <sys/brand.h>
45 48 #include <sys/psw.h>
46 49 #include <sys/ucontext.h>
47 50 #include <sys/asm_linkage.h>
48 51 #include <sys/errno.h>
49 52 #include <sys/archsystm.h>
50 53 #include <sys/schedctl.h>
51 54 #include <sys/debug.h>
52 55 #include <sys/sysmacros.h>
56 +#include <sys/sdt.h>
53 57
54 58 /*
55 59 * This is a wrapper around copyout_noerr that returns a guaranteed error code.
56 60 * Because we're using copyout_noerr(), we need to bound the time we're under an
57 61 * on_fault/no_fault and attempt to do so only while we're actually copying data
58 62 * out. The main reason for this is because we're being called back from the
59 63 * FPU, which is being held with a kpreempt_disable() and related, we can't use
60 64 * a larger on_fault()/no_fault() as that would both hide legitimate errors we
61 65 * make, masquerading as user issues, and it gets trickier to reason about the
62 66 * correct restoration of our state.
63 67 */
64 68 static int
65 69 savecontext_copyout(const void *kaddr, void *uaddr, size_t size)
66 70 {
67 71 label_t ljb;
68 72 if (!on_fault(&ljb)) {
69 73 copyout_noerr(kaddr, uaddr, size);
70 74 no_fault();
71 75 return (0);
72 76 } else {
73 77 no_fault();
74 78 return (EFAULT);
75 79 }
76 80 }
77 81
78 82 /*
79 83 * Save user context.
80 84 *
81 85 * Generally speaking ucp is a pointer to kernel memory. In the traditional
82 86 * version of this (when flags is 0), then we just write and fill out all of the
83 87 * ucontext_t without any care for what was there ahead of this. However, when
84 88 * we extended the state to push additional data when user pointers in the
85 89 * ucontext_t are valid (currently only uc_xsave), then we will copy out that
86 90 * extended state to the user pointer.
87 91 *
88 92 * We allow the copying to happen in two different ways mostly because this is
89 93 * also used in the signal handling context where we must be much more careful
90 94 * about how to copy out data.
91 95 */
92 96
93 97 int
94 98 savecontext(ucontext_t *ucp, const k_sigset_t *mask, savecontext_flags_t flags)
95 99 {
96 100 proc_t *p = ttoproc(curthread);
97 101 klwp_t *lwp = ttolwp(curthread);
98 102 struct regs *rp = lwptoregs(lwp);
99 103 boolean_t need_xsave = B_FALSE;
100 104 boolean_t fpu_en;
101 105 long user_xsave = 0;
102 106 int ret;
103 107
104 108 VERIFY0(flags & ~(SAVECTXT_F_EXTD | SAVECTXT_F_ONFAULT));
105 109
106 110 /*
107 111 * We unconditionally assign to every field through the end
108 112 * of the gregs, but we need to bzero() everything -after- that
109 113 * to avoid having any kernel stack garbage escape to userland.
110 114 *
111 115 * If we have been asked to save extended state, then we must make sure
112 116 * that we don't clobber that value. We must also determine if the
113 117 * processor has xsave state. If it does not, then we just simply honor
114 118 * the pointer, but do not write anything out and do not set the flag.
115 119 */
116 120 if ((flags & SAVECTXT_F_EXTD) != 0) {
117 121 user_xsave = ucp->uc_xsave;
118 122 if (fpu_xsave_enabled() && user_xsave != 0) {
119 123 need_xsave = B_TRUE;
120 124 }
121 125 } else {
122 126 /*
123 127 * The only other flag that we have right now is about modifying
124 128 * the copyout behavior when we're copying out extended
125 129 * information. If it's not here, we should not do anything.
126 130 */
127 131 VERIFY0(flags);
128 132 }
129 133 bzero(&ucp->uc_mcontext.fpregs, sizeof (ucontext_t) -
130 134 offsetof(ucontext_t, uc_mcontext.fpregs));
131 135 ucp->uc_xsave = user_xsave;
132 136
133 137 ucp->uc_flags = UC_ALL;
134 138 ucp->uc_link = (struct ucontext *)lwp->lwp_oldcontext;
135 139
136 140 /*
137 141 * Try to copyin() the ustack if one is registered. If the stack
138 142 * has zero size, this indicates that stack bounds checking has
139 143 * been disabled for this LWP. If stack bounds checking is disabled
140 144 * or the copyin() fails, we fall back to the legacy behavior.
141 145 */
142 146 if (lwp->lwp_ustack == (uintptr_t)NULL ||
143 147 copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
144 148 sizeof (ucp->uc_stack)) != 0 ||
145 149 ucp->uc_stack.ss_size == 0) {
146 150
147 151 if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
148 152 ucp->uc_stack = lwp->lwp_sigaltstack;
149 153 } else {
150 154 ucp->uc_stack.ss_sp = p->p_usrstack - p->p_stksize;
151 155 ucp->uc_stack.ss_size = p->p_stksize;
152 156 ucp->uc_stack.ss_flags = 0;
153 157 }
154 158 }
155 159
156 160 /*
157 161 * If either the trace flag or REQUEST_STEP is set,
158 162 * arrange for single-stepping and turn off the trace flag.
159 163 */
160 164 if ((rp->r_ps & PS_T) || (lwp->lwp_pcb.pcb_flags & REQUEST_STEP)) {
161 165 /*
162 166 * Clear PS_T so that saved user context won't have trace
163 167 * flag set.
164 168 */
165 169 rp->r_ps &= ~PS_T;
166 170
167 171 if (!(lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP)) {
168 172 lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
169 173 /*
170 174 * trap() always checks DEBUG_PENDING before
171 175 * checking for any pending signal. This at times
172 176 * can potentially lead to DEBUG_PENDING not being
173 177 * honoured. (for eg: the lwp is stopped by
174 178 * stop_on_fault() called from trap(), after being
175 179 * awakened it might see a pending signal and call
176 180 * savecontext(), however on the way back to userland
177 181 * there is no place it can be detected). Hence in
178 182 * anticipation of such occasions, set AST flag for
179 183 * the thread which will make the thread take an
180 184 * excursion through trap() where it will be handled
181 185 * appropriately.
182 186 */
183 187 aston(curthread);
|
↓ open down ↓ |
121 lines elided |
↑ open up ↑ |
184 188 }
185 189 }
186 190
187 191 getgregs(lwp, ucp->uc_mcontext.gregs);
188 192 fpu_en = (lwp->lwp_pcb.pcb_fpu.fpu_flags & FPU_EN) != 0;
189 193 if (fpu_en)
190 194 getfpregs(lwp, &ucp->uc_mcontext.fpregs);
191 195 else
192 196 ucp->uc_flags &= ~UC_FPU;
193 197
194 - sigktou(mask, &ucp->uc_sigmask);
198 + if (mask != NULL) {
199 + /*
200 + * Save signal mask.
201 + */
202 + sigktou(mask, &ucp->uc_sigmask);
203 + } else {
204 + ucp->uc_flags &= ~UC_SIGMASK;
205 + bzero(&ucp->uc_sigmask, sizeof (ucp->uc_sigmask));
206 + }
195 207
196 208 /*
197 209 * Determine if we need to get the reset of the xsave context out here.
198 210 * If the thread doesn't actually have the FPU enabled, then we don't
199 211 * actually need to do this. We also don't have to if it wasn't
200 212 * requested.
201 213 */
202 214 if (!need_xsave || !fpu_en) {
203 215 return (0);
204 216 }
205 217
206 218 ucp->uc_flags |= UC_XSAVE;
207 219
208 220 /*
209 221 * While you might be asking why and contemplating despair, just know
210 222 * that some things need to just be done in the face of signal (half the
211 223 * reason this function exists). Basically when in signal context we
212 224 * can't trigger watch points. This means we need to tell the FPU copy
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
213 225 * logic to actually use the on_fault/no_fault and the non-error form of
214 226 * copyout (which still checks if it's a user address at least).
215 227 */
216 228 if ((flags & SAVECTXT_F_ONFAULT) != 0) {
217 229 ret = fpu_signal_copyout(lwp, ucp->uc_xsave,
218 230 savecontext_copyout);
219 231 } else {
220 232 ret = fpu_signal_copyout(lwp, ucp->uc_xsave, copyout);
221 233 }
222 234
235 + if (PROC_IS_BRANDED(p) && BROP(p)->b_savecontext != NULL) {
236 + /*
237 + * Allow the brand the chance to modify the context we
238 + * saved:
239 + */
240 + /* XXX KEBE SAYS FIX ME! */
241 + BROP(p)->b_savecontext(ucp);
242 + }
243 +
223 244 return (ret);
224 245 }
225 246
226 247 /*
227 248 * Restore user context.
228 249 */
229 250 void
230 251 restorecontext(ucontext_t *ucp)
231 252 {
232 253 kthread_t *t = curthread;
233 254 klwp_t *lwp = ttolwp(t);
255 + proc_t *p = lwptoproc(lwp);
234 256
257 + if (PROC_IS_BRANDED(p) && BROP(p)->b_restorecontext != NULL) {
258 + /*
259 + * Allow the brand the chance to modify the context before
260 + * we restore it:
261 + */
262 + BROP(p)->b_restorecontext(ucp);
263 + }
264 +
265 + DTRACE_PROBE3(oldcontext__set, klwp_t *, lwp,
266 + uintptr_t, lwp->lwp_oldcontext,
267 + uintptr_t, (uintptr_t)ucp->uc_link);
235 268 lwp->lwp_oldcontext = (uintptr_t)ucp->uc_link;
236 269
237 270 if (ucp->uc_flags & UC_STACK) {
238 271 if (ucp->uc_stack.ss_flags == SS_ONSTACK)
239 272 lwp->lwp_sigaltstack = ucp->uc_stack;
240 273 else
241 274 lwp->lwp_sigaltstack.ss_flags &= ~SS_ONSTACK;
242 275 }
243 276
244 277 if (ucp->uc_flags & UC_CPU) {
245 278 /*
246 279 * If the trace flag is set, mark the lwp to take a
247 280 * single-step trap on return to user level (below).
248 281 * The x86 lcall interface and sysenter has already done this,
249 282 * and turned off the flag, but amd64 syscall interface has not.
250 283 */
251 284 if (lwptoregs(lwp)->r_ps & PS_T)
252 285 lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
253 286 setgregs(lwp, ucp->uc_mcontext.gregs);
254 287 lwp->lwp_eosys = JUSTRETURN;
255 288 t->t_post_sys = 1;
256 289 aston(curthread);
257 290 }
258 291
259 292 /*
260 293 * The logic to copy in the ucontex_t takes care of combining the UC_FPU
261 294 * and UC_XSAVE, so at this point only one of them should be set, if
262 295 * any.
263 296 */
264 297 if (ucp->uc_flags & UC_XSAVE) {
265 298 ASSERT0(ucp->uc_flags & UC_FPU);
266 299 ASSERT3U((uintptr_t)ucp->uc_xsave, >=, _kernelbase);
267 300 fpu_set_xsave(lwp, (const void *)ucp->uc_xsave);
268 301 } else if (ucp->uc_flags & UC_FPU) {
269 302 setfpregs(lwp, &ucp->uc_mcontext.fpregs);
270 303 }
271 304
272 305 if (ucp->uc_flags & UC_SIGMASK) {
273 306 /*
274 307 * We don't need to acquire p->p_lock here;
275 308 * we are manipulating thread-private data.
276 309 */
277 310 schedctl_finish_sigblock(t);
278 311 sigutok(&ucp->uc_sigmask, &t->t_hold);
279 312 if (sigcheck(ttoproc(t), t))
280 313 t->t_sig_check = 1;
281 314 }
282 315 }
|
↓ open down ↓ |
38 lines elided |
↑ open up ↑ |
283 316
284 317
285 318 int
286 319 getsetcontext(int flag, void *arg)
287 320 {
288 321 ucontext_t uc;
289 322 ucontext_t *ucp;
290 323 klwp_t *lwp = ttolwp(curthread);
291 324 void *fpu = NULL;
292 325 stack_t dummy_stk;
326 + proc_t *p = lwptoproc(lwp);
293 327 int ret;
294 328
295 329 /*
296 330 * In future releases, when the ucontext structure grows,
297 331 * getcontext should be modified to only return the fields
298 332 * specified in the uc_flags. That way, the structure can grow
299 333 * and still be binary compatible will all .o's which will only
300 334 * have old fields defined in uc_flags
301 335 */
302 336
303 337 switch (flag) {
304 338 default:
305 339 return (set_errno(EINVAL));
306 340
307 341 case GETCONTEXT:
308 342 schedctl_finish_sigblock(curthread);
309 343 ret = savecontext(&uc, &curthread->t_hold, SAVECTXT_F_NONE);
310 344 if (ret != 0)
311 345 return (set_errno(ret));
312 346 if (uc.uc_flags & UC_SIGMASK)
313 347 SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
314 348 if (copyout(&uc, arg, sizeof (uc)))
315 349 return (set_errno(EFAULT));
316 350 return (0);
317 351
318 352 /*
319 353 * In the case of GETCONTEXT_EXTD, we've theoretically been given all
320 354 * the required pointers of the appropriate length by libc in the
321 355 * ucontext_t. We must first copyin the offsets that we care about to
322 356 * seed the known extensions. Right now that is just the uc_xsave
323 357 * member. As we are setting uc_flags, we only look at the members we
324 358 * need to care about.
325 359 *
326 360 * The main reason that we have a different entry point is that we don't
327 361 * want to assume that callers have always properly zeroed their
328 362 * ucontext_t ahead of calling into libc. In fact, it often is just
329 363 * declared on the stack so we can't assume that at all. Instead,
330 364 * getcontext_extd does require that.
331 365 */
332 366 case GETCONTEXT_EXTD:
333 367 schedctl_finish_sigblock(curthread);
334 368 ucp = arg;
335 369 if (copyin(&ucp->uc_xsave, &uc.uc_xsave,
336 370 sizeof (uc.uc_xsave)) != 0) {
337 371 return (set_errno(EFAULT));
338 372 }
339 373 ret = savecontext(&uc, &curthread->t_hold, SAVECTXT_F_EXTD);
340 374 if (ret != 0)
341 375 return (set_errno(ret));
342 376 if (uc.uc_flags & UC_SIGMASK)
343 377 SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
344 378 if (copyout(&uc, arg, sizeof (uc)))
345 379 return (set_errno(EFAULT));
346 380 return (0);
|
↓ open down ↓ |
44 lines elided |
↑ open up ↑ |
347 381
348 382
349 383 case SETCONTEXT:
350 384 ucp = arg;
351 385 if (ucp == NULL)
352 386 exit(CLD_EXITED, 0);
353 387 /*
354 388 * Don't copyin filler or floating state unless we need it.
355 389 * The ucontext_t struct and fields are specified in the ABI.
356 390 */
357 - if (copyin(ucp, &uc, offsetof(ucontext_t, uc_filler) -
391 + if (copyin(ucp, &uc, offsetof(ucontext_t, uc_brand_data) -
358 392 sizeof (uc.uc_mcontext.fpregs))) {
359 393 return (set_errno(EFAULT));
360 394 }
361 395 if (uc.uc_flags & UC_SIGMASK)
362 396 SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
363 397
364 398 if ((uc.uc_flags & UC_FPU) &&
365 399 copyin(&ucp->uc_mcontext.fpregs, &uc.uc_mcontext.fpregs,
366 400 sizeof (uc.uc_mcontext.fpregs))) {
367 401 return (set_errno(EFAULT));
368 402 }
369 403
404 + /*
405 + * If this is a branded process, copy in the brand-private
406 + * data:
407 + */
408 + if (PROC_IS_BRANDED(p) && copyin(&ucp->uc_brand_data,
409 + &uc.uc_brand_data, sizeof (uc.uc_brand_data)) != 0) {
410 + return (set_errno(EFAULT));
411 + }
412 +
370 413 uc.uc_xsave = 0;
371 414 if ((uc.uc_flags & UC_XSAVE) != 0) {
372 415 int ret;
373 416
374 417 if (copyin(&ucp->uc_xsave, &uc.uc_xsave,
375 418 sizeof (uc.uc_xsave)) != 0) {
376 419 return (set_errno(EFAULT));
377 420 }
378 421
379 422 ret = fpu_signal_copyin(lwp, &uc);
380 423 if (ret != 0) {
381 424 return (set_errno(ret));
382 425 }
383 426 }
384 427
385 428 restorecontext(&uc);
386 429
387 430 if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0))
388 431 (void) copyout(&uc.uc_stack, (stack_t *)lwp->lwp_ustack,
389 432 sizeof (uc.uc_stack));
390 433 return (0);
391 434
392 435 case GETUSTACK:
393 436 if (copyout(&lwp->lwp_ustack, arg, sizeof (caddr_t)))
394 437 return (set_errno(EFAULT));
395 438 return (0);
396 439
397 440 case SETUSTACK:
398 441 if (copyin(arg, &dummy_stk, sizeof (dummy_stk)))
399 442 return (set_errno(EFAULT));
400 443 lwp->lwp_ustack = (uintptr_t)arg;
401 444 return (0);
402 445 }
403 446 }
404 447
405 448 #ifdef _SYSCALL32_IMPL
406 449
407 450 /*
408 451 * Save user context for 32-bit processes.
409 452 */
410 453 int
411 454 savecontext32(ucontext32_t *ucp, const k_sigset_t *mask,
412 455 savecontext_flags_t flags)
413 456 {
414 457 proc_t *p = ttoproc(curthread);
415 458 klwp_t *lwp = ttolwp(curthread);
416 459 struct regs *rp = lwptoregs(lwp);
417 460 boolean_t need_xsave = B_FALSE;
418 461 boolean_t fpu_en;
419 462 int32_t user_xsave = 0;
420 463 uintptr_t uaddr;
421 464 int ret;
422 465
423 466 /*
424 467 * See savecontext for an explanation of this.
425 468 */
426 469 if ((flags & SAVECTXT_F_EXTD) != 0) {
427 470 user_xsave = ucp->uc_xsave;
428 471 if (fpu_xsave_enabled() && user_xsave != 0) {
429 472 need_xsave = B_TRUE;
430 473 }
431 474 } else {
432 475 VERIFY0(flags);
433 476 }
434 477 bzero(&ucp->uc_mcontext.fpregs, sizeof (ucontext32_t) -
435 478 offsetof(ucontext32_t, uc_mcontext.fpregs));
436 479 ucp->uc_xsave = user_xsave;
437 480
438 481 ucp->uc_flags = UC_ALL;
439 482 ucp->uc_link = (caddr32_t)lwp->lwp_oldcontext;
440 483
441 484 if (lwp->lwp_ustack == (uintptr_t)NULL ||
442 485 copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
443 486 sizeof (ucp->uc_stack)) != 0 ||
444 487 ucp->uc_stack.ss_size == 0) {
445 488
446 489 if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
447 490 ucp->uc_stack.ss_sp =
448 491 (caddr32_t)(uintptr_t)lwp->lwp_sigaltstack.ss_sp;
449 492 ucp->uc_stack.ss_size =
450 493 (size32_t)lwp->lwp_sigaltstack.ss_size;
451 494 ucp->uc_stack.ss_flags = SS_ONSTACK;
452 495 } else {
453 496 ucp->uc_stack.ss_sp = (caddr32_t)(uintptr_t)
454 497 (p->p_usrstack - p->p_stksize);
455 498 ucp->uc_stack.ss_size = (size32_t)p->p_stksize;
456 499 ucp->uc_stack.ss_flags = 0;
457 500 }
458 501 }
459 502
460 503 /*
461 504 * If either the trace flag or REQUEST_STEP is set, arrange
462 505 * for single-stepping and turn off the trace flag.
463 506 */
464 507 if ((rp->r_ps & PS_T) || (lwp->lwp_pcb.pcb_flags & REQUEST_STEP)) {
465 508 /*
466 509 * Clear PS_T so that saved user context won't have trace
467 510 * flag set.
468 511 */
469 512 rp->r_ps &= ~PS_T;
470 513
471 514 if (!(lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP)) {
472 515 lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
473 516 /*
474 517 * See comments in savecontext().
475 518 */
476 519 aston(curthread);
|
↓ open down ↓ |
97 lines elided |
↑ open up ↑ |
477 520 }
478 521 }
479 522
480 523 getgregs32(lwp, ucp->uc_mcontext.gregs);
481 524 fpu_en = (lwp->lwp_pcb.pcb_fpu.fpu_flags & FPU_EN) != 0;
482 525 if (fpu_en)
483 526 getfpregs32(lwp, &ucp->uc_mcontext.fpregs);
484 527 else
485 528 ucp->uc_flags &= ~UC_FPU;
486 529
487 - sigktou(mask, &ucp->uc_sigmask);
530 + if (mask != NULL) {
531 + /*
532 + * Save signal mask.
533 + */
534 + sigktou(mask, &ucp->uc_sigmask);
535 + } else {
536 + ucp->uc_flags &= ~UC_SIGMASK;
537 + bzero(&ucp->uc_sigmask, sizeof (ucp->uc_sigmask));
538 + }
488 539
489 540 if (!need_xsave || !fpu_en) {
490 541 return (0);
491 542 }
492 543
493 544 ucp->uc_flags |= UC_XSAVE;
494 545
495 546 /*
496 547 * Due to not wanting to change or break programs, the filler in the
497 548 * ucontext_t was always declared as a long, which is signed. Because
498 549 * this is the 32-bit version, this is an int32_t. We cannot directly go
499 550 * to a uintptr_t otherwise we might get sign extension, so we first
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
500 551 * have to go through a uint32_t and then a uintptr_t. Otherwise, see
501 552 * savecontext().
502 553 */
503 554 uaddr = (uintptr_t)(uint32_t)ucp->uc_xsave;
504 555 if ((flags & SAVECTXT_F_ONFAULT) != 0) {
505 556 ret = fpu_signal_copyout(lwp, uaddr, savecontext_copyout);
506 557 } else {
507 558 ret = fpu_signal_copyout(lwp, uaddr, copyout);
508 559 }
509 560
561 +
562 + if (PROC_IS_BRANDED(p) && BROP(p)->b_savecontext32 != NULL) {
563 + /*
564 + * Allow the brand the chance to modify the context we
565 + * saved:
566 + */
567 + /* XXX KEBE SAYS FIX ME */
568 + BROP(p)->b_savecontext32(ucp);
569 + }
570 +
510 571 return (ret);
511 572 }
512 573
513 574 int
514 575 getsetcontext32(int flag, void *arg)
515 576 {
516 577 ucontext32_t uc;
517 578 ucontext_t ucnat;
518 579 ucontext32_t *ucp;
519 580 klwp_t *lwp = ttolwp(curthread);
520 581 caddr32_t ustack32;
521 582 stack32_t dummy_stk32;
583 + proc_t *p = lwptoproc(lwp);
522 584 int ret;
523 585
524 586 switch (flag) {
525 587 default:
526 588 return (set_errno(EINVAL));
527 589
528 590 case GETCONTEXT:
529 591 schedctl_finish_sigblock(curthread);
530 592 ret = savecontext32(&uc, &curthread->t_hold, SAVECTXT_F_NONE);
531 593 if (ret != 0)
532 594 return (set_errno(ret));
533 595 if (uc.uc_flags & UC_SIGMASK)
534 596 SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
535 597 if (copyout(&uc, arg, sizeof (uc)))
536 598 return (set_errno(EFAULT));
537 599 return (0);
538 600
539 601 /*
540 602 * See getsetcontext() for an explanation of what is going on here.
541 603 */
542 604 case GETCONTEXT_EXTD:
543 605 schedctl_finish_sigblock(curthread);
544 606 ucp = arg;
545 607 if (copyin(&ucp->uc_xsave, &uc.uc_xsave,
546 608 sizeof (uc.uc_xsave)) != 0) {
547 609 return (set_errno(EFAULT));
548 610 }
549 611 ret = savecontext32(&uc, &curthread->t_hold, SAVECTXT_F_EXTD);
550 612 if (ret != 0)
551 613 return (set_errno(ret));
|
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
552 614 if (uc.uc_flags & UC_SIGMASK)
553 615 SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
554 616 if (copyout(&uc, arg, sizeof (uc)))
555 617 return (set_errno(EFAULT));
556 618 return (0);
557 619
558 620 case SETCONTEXT:
559 621 ucp = arg;
560 622 if (ucp == NULL)
561 623 exit(CLD_EXITED, 0);
562 - if (copyin(ucp, &uc, offsetof(ucontext32_t, uc_filler) -
624 + if (copyin(ucp, &uc, offsetof(ucontext32_t, uc_brand_data) -
563 625 sizeof (uc.uc_mcontext.fpregs))) {
564 626 return (set_errno(EFAULT));
565 627 }
566 628 if (uc.uc_flags & UC_SIGMASK)
567 629 SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
568 630 if ((uc.uc_flags & UC_FPU) &&
569 631 copyin(&ucp->uc_mcontext.fpregs, &uc.uc_mcontext.fpregs,
570 632 sizeof (uc.uc_mcontext.fpregs))) {
571 633 return (set_errno(EFAULT));
572 634 }
635 +
636 + /*
637 + * If this is a branded process, copy in the brand-private
638 + * data:
639 + */
640 + if (PROC_IS_BRANDED(p) && copyin(&ucp->uc_brand_data,
641 + &uc.uc_brand_data, sizeof (uc.uc_brand_data)) != 0) {
642 + return (set_errno(EFAULT));
643 + }
573 644
574 645 uc.uc_xsave = 0;
575 646 if ((uc.uc_flags & UC_XSAVE) != 0 &&
576 647 copyin(&ucp->uc_xsave, &uc.uc_xsave,
577 648 sizeof (uc.uc_xsave)) != 0) {
578 649 return (set_errno(EFAULT));
579 650 }
580 651
581 652 ucontext_32ton(&uc, &ucnat);
582 653
583 654 if ((ucnat.uc_flags & UC_XSAVE) != 0) {
584 655 int ret = fpu_signal_copyin(lwp, &ucnat);
585 656 if (ret != 0) {
586 657 return (set_errno(ret));
587 658 }
588 659 }
589 660
590 661 restorecontext(&ucnat);
591 662
592 663 if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0))
593 664 (void) copyout(&uc.uc_stack,
594 665 (stack32_t *)lwp->lwp_ustack, sizeof (uc.uc_stack));
595 666 return (0);
596 667
597 668 case GETUSTACK:
598 669 ustack32 = (caddr32_t)lwp->lwp_ustack;
599 670 if (copyout(&ustack32, arg, sizeof (ustack32)))
600 671 return (set_errno(EFAULT));
601 672 return (0);
602 673
603 674 case SETUSTACK:
604 675 if (copyin(arg, &dummy_stk32, sizeof (dummy_stk32)))
605 676 return (set_errno(EFAULT));
606 677 lwp->lwp_ustack = (uintptr_t)arg;
607 678 return (0);
608 679 }
609 680 }
610 681
611 682 #endif /* _SYSCALL32_IMPL */
|
↓ open down ↓ |
29 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX