Print this page
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/sparc/syscall/getcontext.c
+++ new/usr/src/uts/sparc/syscall/getcontext.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2015 Joyent, Inc.
24 24 */
25 25 /*
26 26 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
27 27 * Use is subject to license terms.
28 28 */
29 29
30 30 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
31 31 /* All Rights Reserved */
32 32
33 33 #include <sys/param.h>
34 34 #include <sys/types.h>
35 35 #include <sys/vmparam.h>
36 36 #include <sys/systm.h>
37 37 #include <sys/signal.h>
38 38 #include <sys/stack.h>
39 39 #include <sys/frame.h>
40 40 #include <sys/proc.h>
41 41 #include <sys/brand.h>
42 42 #include <sys/ucontext.h>
43 43 #include <sys/asm_linkage.h>
44 44 #include <sys/kmem.h>
45 45 #include <sys/errno.h>
46 46 #include <sys/archsystm.h>
47 47 #include <sys/fpu/fpusystm.h>
48 48 #include <sys/debug.h>
49 49 #include <sys/model.h>
50 50 #include <sys/cmn_err.h>
51 51 #include <sys/sysmacros.h>
52 52 #include <sys/privregs.h>
53 53 #include <sys/schedctl.h>
54 54
55 55
56 56 /*
57 57 * Save user context.
58 58 */
59 59 void
60 60 savecontext(ucontext_t *ucp, const k_sigset_t *mask)
61 61 {
62 62 proc_t *p = ttoproc(curthread);
63 63 klwp_t *lwp = ttolwp(curthread);
64 64
65 65 /*
66 66 * We assign to every field through uc_mcontext.fpregs.fpu_en,
67 67 * but we have to bzero() everything after that.
68 68 */
69 69 bzero(&ucp->uc_mcontext.fpregs.fpu_en, sizeof (ucontext_t) -
70 70 offsetof(ucontext_t, uc_mcontext.fpregs.fpu_en));
71 71 /*
72 72 * There are unused holes in the ucontext_t structure, zero-fill
73 73 * them so that we don't expose kernel data to the user.
74 74 */
75 75 (&ucp->uc_flags)[1] = 0;
76 76 (&ucp->uc_stack.ss_flags)[1] = 0;
77 77
78 78 /*
79 79 * Flushing the user windows isn't strictly necessary; we do
80 80 * it to maintain backward compatibility.
81 81 */
82 82 (void) flush_user_windows_to_stack(NULL);
83 83
84 84 ucp->uc_flags = UC_ALL;
85 85 ucp->uc_link = (ucontext_t *)lwp->lwp_oldcontext;
86 86
87 87 /*
88 88 * Try to copyin() the ustack if one is registered. If the stack
89 89 * has zero size, this indicates that stack bounds checking has
90 90 * been disabled for this LWP. If stack bounds checking is disabled
91 91 * or the copyin() fails, we fall back to the legacy behavior.
92 92 */
93 93 if (lwp->lwp_ustack == NULL ||
94 94 copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
95 95 sizeof (ucp->uc_stack)) != 0 ||
96 96 ucp->uc_stack.ss_size == 0) {
97 97
98 98 if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
99 99 ucp->uc_stack = lwp->lwp_sigaltstack;
100 100 } else {
101 101 ucp->uc_stack.ss_sp = p->p_usrstack - p->p_stksize;
102 102 ucp->uc_stack.ss_size = p->p_stksize;
103 103 ucp->uc_stack.ss_flags = 0;
104 104 }
105 105 }
106 106
107 107 getgregs(lwp, ucp->uc_mcontext.gregs);
108 108 getasrs(lwp, ucp->uc_mcontext.asrs);
109 109
110 110 getfpregs(lwp, &ucp->uc_mcontext.fpregs);
111 111 getfpasrs(lwp, ucp->uc_mcontext.asrs);
112 112 if (ucp->uc_mcontext.fpregs.fpu_en == 0)
113 113 ucp->uc_flags &= ~UC_FPU;
114 114 ucp->uc_mcontext.gwins = (gwindows_t *)NULL;
115 115
116 116 if (mask != NULL) {
117 117 /*
118 118 * Save signal mask.
119 119 */
120 120 sigktou(mask, &ucp->uc_sigmask);
121 121 } else {
122 122 ucp->uc_flags &= ~UC_SIGMASK;
123 123 bzero(&ucp->uc_sigmask, sizeof (ucp->uc_sigmask));
124 124 }
125 125 }
126 126
127 127
128 128 void
129 129 restorecontext(ucontext_t *ucp)
130 130 {
131 131 kthread_t *t = curthread;
132 132 klwp_t *lwp = ttolwp(t);
133 133 mcontext_t *mcp = &ucp->uc_mcontext;
134 134 model_t model = lwp_getdatamodel(lwp);
135 135
136 136 (void) flush_user_windows_to_stack(NULL);
137 137 if (lwp->lwp_pcb.pcb_xregstat != XREGNONE)
138 138 xregrestore(lwp, 0);
139 139
140 140 lwp->lwp_oldcontext = (uintptr_t)ucp->uc_link;
141 141
142 142 if (ucp->uc_flags & UC_STACK) {
143 143 if (ucp->uc_stack.ss_flags == SS_ONSTACK)
144 144 lwp->lwp_sigaltstack = ucp->uc_stack;
145 145 else
146 146 lwp->lwp_sigaltstack.ss_flags &= ~SS_ONSTACK;
147 147 }
148 148
149 149 if (ucp->uc_flags & UC_CPU) {
150 150 if (mcp->gwins != 0)
151 151 setgwins(lwp, mcp->gwins);
152 152 setgregs(lwp, mcp->gregs);
153 153 if (model == DATAMODEL_LP64)
154 154 setasrs(lwp, mcp->asrs);
155 155 else
156 156 xregs_setgregs(lwp, xregs_getptr(lwp, ucp));
157 157 }
158 158
159 159 if (ucp->uc_flags & UC_FPU) {
160 160 fpregset_t *fp = &ucp->uc_mcontext.fpregs;
161 161
162 162 setfpregs(lwp, fp);
163 163 if (model == DATAMODEL_LP64)
164 164 setfpasrs(lwp, mcp->asrs);
165 165 else
166 166 xregs_setfpregs(lwp, xregs_getptr(lwp, ucp));
167 167 run_fpq(lwp, fp);
168 168 }
169 169
170 170 if (ucp->uc_flags & UC_SIGMASK) {
171 171 /*
172 172 * We don't need to acquire p->p_lock here;
173 173 * we are manipulating thread-private data.
174 174 */
175 175 schedctl_finish_sigblock(t);
176 176 sigutok(&ucp->uc_sigmask, &t->t_hold);
177 177 if (sigcheck(ttoproc(t), t))
178 178 t->t_sig_check = 1;
179 179 }
180 180 }
181 181
182 182
183 183 int
184 184 getsetcontext(int flag, void *arg)
185 185 {
186 186 ucontext_t uc;
187 187 struct _fq fpu_q[MAXFPQ]; /* to hold floating queue */
188 188 fpregset_t *fpp;
189 189 gwindows_t *gwin = NULL; /* to hold windows */
190 190 caddr_t xregs = NULL;
191 191 int xregs_size = 0;
192 192 extern int nwindows;
193 193 ucontext_t *ucp;
194 194 klwp_t *lwp = ttolwp(curthread);
195 195 stack_t dummy_stk;
196 196
197 197 /*
198 198 * In future releases, when the ucontext structure grows,
199 199 * getcontext should be modified to only return the fields
200 200 * specified in the uc_flags. That way, the structure can grow
201 201 * and still be binary compatible will all .o's which will only
202 202 * have old fields defined in uc_flags
203 203 */
204 204
205 205 switch (flag) {
206 206 default:
207 207 return (set_errno(EINVAL));
208 208
209 209 case GETCONTEXT:
210 210 schedctl_finish_sigblock(curthread);
211 211 savecontext(&uc, &curthread->t_hold);
212 212 if (uc.uc_flags & UC_SIGMASK)
213 213 SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
214 214 /*
215 215 * When using floating point it should not be possible to
216 216 * get here with a fpu_qcnt other than zero since we go
217 217 * to great pains to handle all outstanding FP exceptions
218 218 * before any system call code gets executed. However we
219 219 * clear fpu_q and fpu_qcnt here before copyout anyway -
220 220 * this will prevent us from interpreting the garbage we
221 221 * get back (when FP is not enabled) as valid queue data on
222 222 * a later setcontext(2).
223 223 */
224 224 uc.uc_mcontext.fpregs.fpu_qcnt = 0;
225 225 uc.uc_mcontext.fpregs.fpu_q = (struct _fq *)NULL;
226 226
227 227 if (copyout(&uc, arg, sizeof (ucontext_t)))
228 228 return (set_errno(EFAULT));
229 229 return (0);
230 230
231 231 case SETCONTEXT:
232 232 ucp = arg;
233 233 if (ucp == NULL)
234 234 exit(CLD_EXITED, 0);
235 235 /*
236 236 * Don't copyin filler or floating state unless we need it.
237 237 * The ucontext_t struct and fields are specified in the ABI.
238 238 */
239 239 if (copyin(ucp, &uc, sizeof (ucontext_t) -
240 240 sizeof (uc.uc_filler) -
241 241 sizeof (uc.uc_mcontext.fpregs) -
242 242 sizeof (uc.uc_mcontext.xrs) -
243 243 sizeof (uc.uc_mcontext.asrs) -
244 244 sizeof (uc.uc_mcontext.filler))) {
245 245 return (set_errno(EFAULT));
246 246 }
247 247 if (uc.uc_flags & UC_SIGMASK)
248 248 SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
249 249 if (copyin(&ucp->uc_mcontext.xrs, &uc.uc_mcontext.xrs,
250 250 sizeof (uc.uc_mcontext.xrs))) {
251 251 return (set_errno(EFAULT));
252 252 }
253 253 fpp = &uc.uc_mcontext.fpregs;
254 254 if (uc.uc_flags & UC_FPU) {
255 255 /*
256 256 * Need to copyin floating point state
257 257 */
258 258 if (copyin(&ucp->uc_mcontext.fpregs,
259 259 &uc.uc_mcontext.fpregs,
260 260 sizeof (uc.uc_mcontext.fpregs)))
261 261 return (set_errno(EFAULT));
262 262 /* if floating queue not empty */
263 263 if ((fpp->fpu_q) && (fpp->fpu_qcnt)) {
264 264 if (fpp->fpu_qcnt > MAXFPQ ||
265 265 fpp->fpu_q_entrysize <= 0 ||
266 266 fpp->fpu_q_entrysize > sizeof (struct _fq))
267 267 return (set_errno(EINVAL));
268 268 if (copyin(fpp->fpu_q, fpu_q,
269 269 fpp->fpu_qcnt * fpp->fpu_q_entrysize))
270 270 return (set_errno(EFAULT));
271 271 fpp->fpu_q = fpu_q;
272 272 } else {
273 273 fpp->fpu_qcnt = 0; /* avoid confusion later */
274 274 }
275 275 } else {
276 276 fpp->fpu_qcnt = 0;
277 277 }
278 278 if (uc.uc_mcontext.gwins) { /* if windows in context */
279 279 size_t gwin_size;
280 280
281 281 /*
282 282 * We do the same computation here to determine
283 283 * how many bytes of gwindows_t to copy in that
284 284 * is also done in sendsig() to decide how many
285 285 * bytes to copy out. We just *know* that wbcnt
286 286 * is the first element of the structure.
287 287 */
288 288 gwin = kmem_zalloc(sizeof (gwindows_t), KM_SLEEP);
289 289 if (copyin(uc.uc_mcontext.gwins,
290 290 &gwin->wbcnt, sizeof (gwin->wbcnt))) {
291 291 kmem_free(gwin, sizeof (gwindows_t));
292 292 return (set_errno(EFAULT));
293 293 }
294 294 if (gwin->wbcnt < 0 || gwin->wbcnt > nwindows) {
295 295 kmem_free(gwin, sizeof (gwindows_t));
296 296 return (set_errno(EINVAL));
297 297 }
298 298 gwin_size = gwin->wbcnt * sizeof (struct rwindow) +
299 299 SPARC_MAXREGWINDOW * sizeof (int *) + sizeof (long);
300 300 if (gwin_size > sizeof (gwindows_t) ||
301 301 copyin(uc.uc_mcontext.gwins, gwin, gwin_size)) {
302 302 kmem_free(gwin, sizeof (gwindows_t));
303 303 return (set_errno(EFAULT));
304 304 }
305 305 uc.uc_mcontext.gwins = gwin;
306 306 }
307 307
308 308 /*
309 309 * get extra register state or asrs if any exists
310 310 * there is no extra register state for _LP64 user programs
311 311 */
312 312 xregs_clrptr(lwp, &uc);
313 313 if (copyin(&ucp->uc_mcontext.asrs, &uc.uc_mcontext.asrs,
314 314 sizeof (asrset_t))) {
315 315 /* Free up gwin structure if used */
316 316 if (gwin)
317 317 kmem_free(gwin, sizeof (gwindows_t));
318 318 return (set_errno(EFAULT));
319 319 }
320 320
321 321 restorecontext(&uc);
322 322
323 323 if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0)) {
324 324 (void) copyout(&uc.uc_stack, (stack_t *)lwp->lwp_ustack,
325 325 sizeof (stack_t));
326 326 }
327 327
328 328 /*
329 329 * free extra register state area
330 330 */
331 331 if (xregs_size)
332 332 kmem_free(xregs, xregs_size);
333 333
334 334 if (gwin)
335 335 kmem_free(gwin, sizeof (gwindows_t));
336 336
337 337 return (0);
338 338
339 339 case GETUSTACK:
340 340 if (copyout(&lwp->lwp_ustack, arg, sizeof (caddr_t)))
341 341 return (set_errno(EFAULT));
342 342
343 343 return (0);
344 344
345 345 case SETUSTACK:
346 346 if (copyin(arg, &dummy_stk, sizeof (dummy_stk)))
347 347 return (set_errno(EFAULT));
348 348
349 349 lwp->lwp_ustack = (uintptr_t)arg;
350 350
351 351 return (0);
352 352 }
353 353 }
354 354
355 355
356 356 #ifdef _SYSCALL32_IMPL
357 357
358 358 /*
359 359 * Save user context for 32-bit processes.
360 360 */
361 361 void
362 362 savecontext32(ucontext32_t *ucp, const k_sigset_t *mask, struct fq32 *dfq)
363 363 {
364 364 proc_t *p = ttoproc(curthread);
365 365 klwp_t *lwp = ttolwp(curthread);
366 366 fpregset_t fpregs;
367 367
368 368 /*
369 369 * We assign to every field through uc_mcontext.fpregs.fpu_en,
370 370 * but we have to bzero() everything after that.
371 371 */
372 372 bzero(&ucp->uc_mcontext.fpregs.fpu_en, sizeof (ucontext32_t) -
373 373 offsetof(ucontext32_t, uc_mcontext.fpregs.fpu_en));
374 374 /*
375 375 * There is an unused hole in the ucontext32_t structure; zero-fill
376 376 * it so that we don't expose kernel data to the user.
377 377 */
378 378 (&ucp->uc_stack.ss_flags)[1] = 0;
379 379
380 380 /*
381 381 * Flushing the user windows isn't strictly necessary; we do
382 382 * it to maintain backward compatibility.
383 383 */
384 384 (void) flush_user_windows_to_stack(NULL);
385 385
386 386 ucp->uc_flags = UC_ALL;
387 387 ucp->uc_link = (caddr32_t)lwp->lwp_oldcontext;
388 388
389 389 /*
390 390 * Try to copyin() the ustack if one is registered. If the stack
391 391 * has zero size, this indicates that stack bounds checking has
392 392 * been disabled for this LWP. If stack bounds checking is disabled
393 393 * or the copyin() fails, we fall back to the legacy behavior.
394 394 */
395 395 if (lwp->lwp_ustack == NULL ||
396 396 copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
397 397 sizeof (ucp->uc_stack)) != 0 ||
398 398 ucp->uc_stack.ss_size == 0) {
399 399
400 400 if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
401 401 ucp->uc_stack.ss_sp =
402 402 (caddr32_t)(uintptr_t)lwp->lwp_sigaltstack.ss_sp;
403 403 ucp->uc_stack.ss_size =
404 404 (size32_t)lwp->lwp_sigaltstack.ss_size;
405 405 ucp->uc_stack.ss_flags = SS_ONSTACK;
406 406 } else {
407 407 ucp->uc_stack.ss_sp =
408 408 (caddr32_t)(uintptr_t)p->p_usrstack - p->p_stksize;
409 409 ucp->uc_stack.ss_size =
410 410 (size32_t)p->p_stksize;
411 411 ucp->uc_stack.ss_flags = 0;
412 412 }
413 413 }
414 414
415 415 getgregs32(lwp, ucp->uc_mcontext.gregs);
416 416 getfpregs(lwp, &fpregs);
417 417 fpuregset_nto32(&fpregs, &ucp->uc_mcontext.fpregs, dfq);
418 418
419 419 if (ucp->uc_mcontext.fpregs.fpu_en == 0)
420 420 ucp->uc_flags &= ~UC_FPU;
421 421 ucp->uc_mcontext.gwins = (caddr32_t)NULL;
422 422
423 423 if (mask != NULL) {
424 424 /*
425 425 * Save signal mask (the 32- and 64-bit sigset_t structures are
426 426 * identical).
427 427 */
428 428 sigktou(mask, (sigset_t *)&ucp->uc_sigmask);
429 429 } else {
430 430 ucp->uc_flags &= ~UC_SIGMASK;
431 431 bzero(&ucp->uc_sigmask, sizeof (ucp->uc_sigmask));
432 432 }
433 433 }
434 434
435 435 int
436 436 getsetcontext32(int flag, void *arg)
437 437 {
438 438 ucontext32_t uc;
439 439 ucontext_t ucnat;
440 440 struct _fq fpu_qnat[MAXFPQ]; /* to hold "native" floating queue */
441 441 struct fq32 fpu_q[MAXFPQ]; /* to hold 32 bit floating queue */
442 442 fpregset32_t *fpp;
443 443 gwindows32_t *gwin = NULL; /* to hold windows */
444 444 caddr_t xregs;
445 445 int xregs_size = 0;
446 446 extern int nwindows;
447 447 klwp_t *lwp = ttolwp(curthread);
448 448 ucontext32_t *ucp;
449 449 uint32_t ustack32;
450 450 stack32_t dummy_stk32;
451 451
452 452 /*
453 453 * In future releases, when the ucontext structure grows,
454 454 * getcontext should be modified to only return the fields
455 455 * specified in the uc_flags. That way, the structure can grow
456 456 * and still be binary compatible will all .o's which will only
457 457 * have old fields defined in uc_flags
458 458 */
459 459
460 460 switch (flag) {
461 461 default:
462 462 return (set_errno(EINVAL));
463 463
464 464 case GETCONTEXT:
465 465 schedctl_finish_sigblock(curthread);
466 466 savecontext32(&uc, &curthread->t_hold, NULL);
467 467 if (uc.uc_flags & UC_SIGMASK)
468 468 SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
469 469 /*
470 470 * When using floating point it should not be possible to
471 471 * get here with a fpu_qcnt other than zero since we go
472 472 * to great pains to handle all outstanding FP exceptions
473 473 * before any system call code gets executed. However we
474 474 * clear fpu_q and fpu_qcnt here before copyout anyway -
475 475 * this will prevent us from interpreting the garbage we
476 476 * get back (when FP is not enabled) as valid queue data on
477 477 * a later setcontext(2).
478 478 */
479 479 uc.uc_mcontext.fpregs.fpu_qcnt = 0;
480 480 uc.uc_mcontext.fpregs.fpu_q = (caddr32_t)NULL;
481 481
482 482 if (copyout(&uc, arg, sizeof (ucontext32_t)))
483 483 return (set_errno(EFAULT));
484 484 return (0);
485 485
486 486 case SETCONTEXT:
487 487 ucp = arg;
488 488 if (ucp == NULL)
489 489 exit(CLD_EXITED, 0);
490 490 /*
491 491 * Don't copyin filler or floating state unless we need it.
492 492 * The ucontext_t struct and fields are specified in the ABI.
493 493 */
494 494 if (copyin(ucp, &uc, sizeof (uc) - sizeof (uc.uc_filler) -
495 495 sizeof (uc.uc_mcontext.fpregs) -
496 496 sizeof (uc.uc_mcontext.xrs) -
497 497 sizeof (uc.uc_mcontext.filler))) {
498 498 return (set_errno(EFAULT));
499 499 }
500 500 if (uc.uc_flags & UC_SIGMASK)
501 501 SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
502 502 if (copyin(&ucp->uc_mcontext.xrs, &uc.uc_mcontext.xrs,
503 503 sizeof (uc.uc_mcontext.xrs))) {
504 504 return (set_errno(EFAULT));
505 505 }
506 506 fpp = &uc.uc_mcontext.fpregs;
507 507 if (uc.uc_flags & UC_FPU) {
508 508 /*
509 509 * Need to copyin floating point state
510 510 */
511 511 if (copyin(&ucp->uc_mcontext.fpregs,
512 512 &uc.uc_mcontext.fpregs,
513 513 sizeof (uc.uc_mcontext.fpregs)))
514 514 return (set_errno(EFAULT));
515 515 /* if floating queue not empty */
516 516 if ((fpp->fpu_q) && (fpp->fpu_qcnt)) {
517 517 if (fpp->fpu_qcnt > MAXFPQ ||
518 518 fpp->fpu_q_entrysize <= 0 ||
519 519 fpp->fpu_q_entrysize > sizeof (struct fq32))
520 520 return (set_errno(EINVAL));
521 521 if (copyin((void *)(uintptr_t)fpp->fpu_q, fpu_q,
522 522 fpp->fpu_qcnt * fpp->fpu_q_entrysize))
523 523 return (set_errno(EFAULT));
524 524 } else {
525 525 fpp->fpu_qcnt = 0; /* avoid confusion later */
526 526 }
527 527 } else {
528 528 fpp->fpu_qcnt = 0;
529 529 }
530 530
531 531 if (uc.uc_mcontext.gwins) { /* if windows in context */
532 532 size_t gwin_size;
533 533
534 534 /*
535 535 * We do the same computation here to determine
536 536 * how many bytes of gwindows_t to copy in that
537 537 * is also done in sendsig() to decide how many
538 538 * bytes to copy out. We just *know* that wbcnt
539 539 * is the first element of the structure.
540 540 */
541 541 gwin = kmem_zalloc(sizeof (gwindows32_t), KM_SLEEP);
542 542 if (copyin((void *)(uintptr_t)uc.uc_mcontext.gwins,
543 543 &gwin->wbcnt, sizeof (gwin->wbcnt))) {
544 544 kmem_free(gwin, sizeof (gwindows32_t));
545 545 return (set_errno(EFAULT));
546 546 }
547 547 if (gwin->wbcnt < 0 || gwin->wbcnt > nwindows) {
548 548 kmem_free(gwin, sizeof (gwindows32_t));
549 549 return (set_errno(EINVAL));
550 550 }
551 551 gwin_size = gwin->wbcnt * sizeof (struct rwindow32) +
552 552 SPARC_MAXREGWINDOW * sizeof (caddr32_t) +
553 553 sizeof (int32_t);
554 554 if (gwin_size > sizeof (gwindows32_t) ||
555 555 copyin((void *)(uintptr_t)uc.uc_mcontext.gwins,
556 556 gwin, gwin_size)) {
557 557 kmem_free(gwin, sizeof (gwindows32_t));
558 558 return (set_errno(EFAULT));
559 559 }
560 560 /* restorecontext() should ignore this */
561 561 uc.uc_mcontext.gwins = (caddr32_t)0;
562 562 }
563 563
564 564 ucontext_32ton(&uc, &ucnat, fpu_q, fpu_qnat);
565 565
566 566 /*
567 567 * get extra register state if any exists
568 568 */
569 569 if (xregs_hasptr32(lwp, &uc) &&
570 570 ((xregs_size = xregs_getsize(curproc)) > 0)) {
571 571 xregs = kmem_zalloc(xregs_size, KM_SLEEP);
572 572 if (copyin((void *)(uintptr_t)xregs_getptr32(lwp, &uc),
573 573 xregs, xregs_size)) {
574 574 kmem_free(xregs, xregs_size);
575 575 if (gwin)
576 576 kmem_free(gwin, sizeof (gwindows32_t));
577 577 return (set_errno(EFAULT));
578 578 }
579 579 xregs_setptr(lwp, &ucnat, xregs);
580 580 } else {
581 581 xregs_clrptr(lwp, &ucnat);
582 582 }
583 583
584 584 restorecontext(&ucnat);
585 585
586 586 if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0)) {
587 587 (void) copyout(&uc.uc_stack,
588 588 (stack32_t *)lwp->lwp_ustack, sizeof (stack32_t));
589 589 }
590 590
591 591 if (gwin)
592 592 setgwins32(lwp, gwin);
593 593
594 594 /*
595 595 * free extra register state area
596 596 */
597 597 if (xregs_size)
598 598 kmem_free(xregs, xregs_size);
599 599
600 600 if (gwin)
601 601 kmem_free(gwin, sizeof (gwindows32_t));
602 602
603 603 return (0);
604 604
605 605 case GETUSTACK:
606 606 ustack32 = (uint32_t)lwp->lwp_ustack;
607 607 if (copyout(&ustack32, arg, sizeof (caddr32_t)))
608 608 return (set_errno(EFAULT));
609 609
610 610 return (0);
611 611
612 612 case SETUSTACK:
613 613 if (copyin(arg, &dummy_stk32, sizeof (dummy_stk32)))
614 614 return (set_errno(EFAULT));
615 615
616 616 lwp->lwp_ustack = (uintptr_t)arg;
617 617
618 618 return (0);
619 619 }
620 620 }
621 621
622 622 #endif /* _SYSCALL32_IMPL */
|
↓ open down ↓ |
622 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX