Print this page
OS-3561 lxbrand emulation library should execute on alternate stack
OS-3558 lxbrand add support for full in-kernel syscall handling
OS-3545 lx_syscall_regs should not walk stack
OS-3868 many LTP testcases now hang
OS-3901 lxbrand lx_recvmsg fails to translate control messages when 64-bit
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Patrick Mooney <patrick.mooney@joyent.com>
Reviewed by: Bryan Cantrill <bryan@joyent.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/sparc/syscall/getcontext.c
+++ new/usr/src/uts/sparc/syscall/getcontext.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 + * Copyright 2015 Joyent, Inc.
24 + */
25 +/*
23 26 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 27 * Use is subject to license terms.
25 28 */
26 29
27 30 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 31 /* All Rights Reserved */
29 32
30 33 #include <sys/param.h>
31 34 #include <sys/types.h>
32 35 #include <sys/vmparam.h>
33 36 #include <sys/systm.h>
34 37 #include <sys/signal.h>
35 38 #include <sys/stack.h>
36 39 #include <sys/frame.h>
37 40 #include <sys/proc.h>
38 41 #include <sys/brand.h>
39 42 #include <sys/ucontext.h>
40 43 #include <sys/asm_linkage.h>
41 44 #include <sys/kmem.h>
42 45 #include <sys/errno.h>
43 46 #include <sys/archsystm.h>
44 47 #include <sys/fpu/fpusystm.h>
45 48 #include <sys/debug.h>
46 49 #include <sys/model.h>
47 50 #include <sys/cmn_err.h>
48 51 #include <sys/sysmacros.h>
49 52 #include <sys/privregs.h>
50 53 #include <sys/schedctl.h>
51 54
52 55
53 56 /*
54 57 * Save user context.
55 58 */
56 59 void
57 60 savecontext(ucontext_t *ucp, const k_sigset_t *mask)
58 61 {
59 62 proc_t *p = ttoproc(curthread);
60 63 klwp_t *lwp = ttolwp(curthread);
61 64
62 65 /*
63 66 * We assign to every field through uc_mcontext.fpregs.fpu_en,
64 67 * but we have to bzero() everything after that.
65 68 */
66 69 bzero(&ucp->uc_mcontext.fpregs.fpu_en, sizeof (ucontext_t) -
67 70 offsetof(ucontext_t, uc_mcontext.fpregs.fpu_en));
68 71 /*
69 72 * There are unused holes in the ucontext_t structure, zero-fill
70 73 * them so that we don't expose kernel data to the user.
71 74 */
72 75 (&ucp->uc_flags)[1] = 0;
73 76 (&ucp->uc_stack.ss_flags)[1] = 0;
74 77
75 78 /*
76 79 * Flushing the user windows isn't strictly necessary; we do
77 80 * it to maintain backward compatibility.
78 81 */
79 82 (void) flush_user_windows_to_stack(NULL);
80 83
81 84 ucp->uc_flags = UC_ALL;
82 85 ucp->uc_link = (ucontext_t *)lwp->lwp_oldcontext;
83 86
84 87 /*
85 88 * Try to copyin() the ustack if one is registered. If the stack
86 89 * has zero size, this indicates that stack bounds checking has
87 90 * been disabled for this LWP. If stack bounds checking is disabled
88 91 * or the copyin() fails, we fall back to the legacy behavior.
89 92 */
90 93 if (lwp->lwp_ustack == NULL ||
91 94 copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
92 95 sizeof (ucp->uc_stack)) != 0 ||
93 96 ucp->uc_stack.ss_size == 0) {
94 97
95 98 if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
96 99 ucp->uc_stack = lwp->lwp_sigaltstack;
97 100 } else {
98 101 ucp->uc_stack.ss_sp = p->p_usrstack - p->p_stksize;
99 102 ucp->uc_stack.ss_size = p->p_stksize;
100 103 ucp->uc_stack.ss_flags = 0;
101 104 }
102 105 }
|
↓ open down ↓ |
70 lines elided |
↑ open up ↑ |
103 106
104 107 getgregs(lwp, ucp->uc_mcontext.gregs);
105 108 getasrs(lwp, ucp->uc_mcontext.asrs);
106 109
107 110 getfpregs(lwp, &ucp->uc_mcontext.fpregs);
108 111 getfpasrs(lwp, ucp->uc_mcontext.asrs);
109 112 if (ucp->uc_mcontext.fpregs.fpu_en == 0)
110 113 ucp->uc_flags &= ~UC_FPU;
111 114 ucp->uc_mcontext.gwins = (gwindows_t *)NULL;
112 115
113 - /*
114 - * Save signal mask.
115 - */
116 - sigktou(mask, &ucp->uc_sigmask);
116 + if (mask != NULL) {
117 + /*
118 + * Save signal mask.
119 + */
120 + sigktou(mask, &ucp->uc_sigmask);
121 + } else {
122 + ucp->uc_flags &= ~UC_SIGMASK;
123 + bzero(&ucp->uc_sigmask, sizeof (ucp->uc_sigmask));
124 + }
117 125 }
118 126
119 127
120 128 void
121 129 restorecontext(ucontext_t *ucp)
122 130 {
123 131 kthread_t *t = curthread;
124 132 klwp_t *lwp = ttolwp(t);
125 133 mcontext_t *mcp = &ucp->uc_mcontext;
126 134 model_t model = lwp_getdatamodel(lwp);
127 135
128 136 (void) flush_user_windows_to_stack(NULL);
129 137 if (lwp->lwp_pcb.pcb_xregstat != XREGNONE)
130 138 xregrestore(lwp, 0);
131 139
132 140 lwp->lwp_oldcontext = (uintptr_t)ucp->uc_link;
133 141
134 142 if (ucp->uc_flags & UC_STACK) {
135 143 if (ucp->uc_stack.ss_flags == SS_ONSTACK)
136 144 lwp->lwp_sigaltstack = ucp->uc_stack;
137 145 else
138 146 lwp->lwp_sigaltstack.ss_flags &= ~SS_ONSTACK;
139 147 }
140 148
141 149 if (ucp->uc_flags & UC_CPU) {
142 150 if (mcp->gwins != 0)
143 151 setgwins(lwp, mcp->gwins);
144 152 setgregs(lwp, mcp->gregs);
145 153 if (model == DATAMODEL_LP64)
146 154 setasrs(lwp, mcp->asrs);
147 155 else
148 156 xregs_setgregs(lwp, xregs_getptr(lwp, ucp));
149 157 }
150 158
151 159 if (ucp->uc_flags & UC_FPU) {
152 160 fpregset_t *fp = &ucp->uc_mcontext.fpregs;
153 161
154 162 setfpregs(lwp, fp);
155 163 if (model == DATAMODEL_LP64)
156 164 setfpasrs(lwp, mcp->asrs);
157 165 else
158 166 xregs_setfpregs(lwp, xregs_getptr(lwp, ucp));
159 167 run_fpq(lwp, fp);
160 168 }
161 169
162 170 if (ucp->uc_flags & UC_SIGMASK) {
163 171 /*
164 172 * We don't need to acquire p->p_lock here;
165 173 * we are manipulating thread-private data.
166 174 */
167 175 schedctl_finish_sigblock(t);
168 176 sigutok(&ucp->uc_sigmask, &t->t_hold);
169 177 if (sigcheck(ttoproc(t), t))
170 178 t->t_sig_check = 1;
171 179 }
172 180 }
173 181
174 182
175 183 int
176 184 getsetcontext(int flag, void *arg)
177 185 {
178 186 ucontext_t uc;
179 187 struct _fq fpu_q[MAXFPQ]; /* to hold floating queue */
180 188 fpregset_t *fpp;
181 189 gwindows_t *gwin = NULL; /* to hold windows */
182 190 caddr_t xregs = NULL;
183 191 int xregs_size = 0;
184 192 extern int nwindows;
185 193 ucontext_t *ucp;
186 194 klwp_t *lwp = ttolwp(curthread);
187 195 stack_t dummy_stk;
188 196
189 197 /*
190 198 * In future releases, when the ucontext structure grows,
191 199 * getcontext should be modified to only return the fields
192 200 * specified in the uc_flags. That way, the structure can grow
193 201 * and still be binary compatible will all .o's which will only
194 202 * have old fields defined in uc_flags
195 203 */
196 204
197 205 switch (flag) {
198 206 default:
199 207 return (set_errno(EINVAL));
200 208
201 209 case GETCONTEXT:
202 210 schedctl_finish_sigblock(curthread);
203 211 savecontext(&uc, &curthread->t_hold);
204 212 if (uc.uc_flags & UC_SIGMASK)
205 213 SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
206 214 /*
207 215 * When using floating point it should not be possible to
208 216 * get here with a fpu_qcnt other than zero since we go
209 217 * to great pains to handle all outstanding FP exceptions
210 218 * before any system call code gets executed. However we
211 219 * clear fpu_q and fpu_qcnt here before copyout anyway -
212 220 * this will prevent us from interpreting the garbage we
213 221 * get back (when FP is not enabled) as valid queue data on
214 222 * a later setcontext(2).
215 223 */
216 224 uc.uc_mcontext.fpregs.fpu_qcnt = 0;
217 225 uc.uc_mcontext.fpregs.fpu_q = (struct _fq *)NULL;
218 226
219 227 if (copyout(&uc, arg, sizeof (ucontext_t)))
220 228 return (set_errno(EFAULT));
221 229 return (0);
222 230
223 231 case SETCONTEXT:
224 232 ucp = arg;
225 233 if (ucp == NULL)
226 234 exit(CLD_EXITED, 0);
227 235 /*
228 236 * Don't copyin filler or floating state unless we need it.
229 237 * The ucontext_t struct and fields are specified in the ABI.
230 238 */
231 239 if (copyin(ucp, &uc, sizeof (ucontext_t) -
232 240 sizeof (uc.uc_filler) -
233 241 sizeof (uc.uc_mcontext.fpregs) -
234 242 sizeof (uc.uc_mcontext.xrs) -
235 243 sizeof (uc.uc_mcontext.asrs) -
236 244 sizeof (uc.uc_mcontext.filler))) {
237 245 return (set_errno(EFAULT));
238 246 }
239 247 if (uc.uc_flags & UC_SIGMASK)
240 248 SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
241 249 if (copyin(&ucp->uc_mcontext.xrs, &uc.uc_mcontext.xrs,
242 250 sizeof (uc.uc_mcontext.xrs))) {
243 251 return (set_errno(EFAULT));
244 252 }
245 253 fpp = &uc.uc_mcontext.fpregs;
246 254 if (uc.uc_flags & UC_FPU) {
247 255 /*
248 256 * Need to copyin floating point state
249 257 */
250 258 if (copyin(&ucp->uc_mcontext.fpregs,
251 259 &uc.uc_mcontext.fpregs,
252 260 sizeof (uc.uc_mcontext.fpregs)))
253 261 return (set_errno(EFAULT));
254 262 /* if floating queue not empty */
255 263 if ((fpp->fpu_q) && (fpp->fpu_qcnt)) {
256 264 if (fpp->fpu_qcnt > MAXFPQ ||
257 265 fpp->fpu_q_entrysize <= 0 ||
258 266 fpp->fpu_q_entrysize > sizeof (struct _fq))
259 267 return (set_errno(EINVAL));
260 268 if (copyin(fpp->fpu_q, fpu_q,
261 269 fpp->fpu_qcnt * fpp->fpu_q_entrysize))
262 270 return (set_errno(EFAULT));
263 271 fpp->fpu_q = fpu_q;
264 272 } else {
265 273 fpp->fpu_qcnt = 0; /* avoid confusion later */
266 274 }
267 275 } else {
268 276 fpp->fpu_qcnt = 0;
269 277 }
270 278 if (uc.uc_mcontext.gwins) { /* if windows in context */
271 279 size_t gwin_size;
272 280
273 281 /*
274 282 * We do the same computation here to determine
275 283 * how many bytes of gwindows_t to copy in that
276 284 * is also done in sendsig() to decide how many
277 285 * bytes to copy out. We just *know* that wbcnt
278 286 * is the first element of the structure.
279 287 */
280 288 gwin = kmem_zalloc(sizeof (gwindows_t), KM_SLEEP);
281 289 if (copyin(uc.uc_mcontext.gwins,
282 290 &gwin->wbcnt, sizeof (gwin->wbcnt))) {
283 291 kmem_free(gwin, sizeof (gwindows_t));
284 292 return (set_errno(EFAULT));
285 293 }
286 294 if (gwin->wbcnt < 0 || gwin->wbcnt > nwindows) {
287 295 kmem_free(gwin, sizeof (gwindows_t));
288 296 return (set_errno(EINVAL));
289 297 }
290 298 gwin_size = gwin->wbcnt * sizeof (struct rwindow) +
291 299 SPARC_MAXREGWINDOW * sizeof (int *) + sizeof (long);
292 300 if (gwin_size > sizeof (gwindows_t) ||
293 301 copyin(uc.uc_mcontext.gwins, gwin, gwin_size)) {
294 302 kmem_free(gwin, sizeof (gwindows_t));
295 303 return (set_errno(EFAULT));
296 304 }
297 305 uc.uc_mcontext.gwins = gwin;
298 306 }
299 307
300 308 /*
301 309 * get extra register state or asrs if any exists
302 310 * there is no extra register state for _LP64 user programs
303 311 */
304 312 xregs_clrptr(lwp, &uc);
305 313 if (copyin(&ucp->uc_mcontext.asrs, &uc.uc_mcontext.asrs,
306 314 sizeof (asrset_t))) {
307 315 /* Free up gwin structure if used */
308 316 if (gwin)
309 317 kmem_free(gwin, sizeof (gwindows_t));
310 318 return (set_errno(EFAULT));
311 319 }
312 320
313 321 restorecontext(&uc);
314 322
315 323 if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0)) {
316 324 (void) copyout(&uc.uc_stack, (stack_t *)lwp->lwp_ustack,
317 325 sizeof (stack_t));
318 326 }
319 327
320 328 /*
321 329 * free extra register state area
322 330 */
323 331 if (xregs_size)
324 332 kmem_free(xregs, xregs_size);
325 333
326 334 if (gwin)
327 335 kmem_free(gwin, sizeof (gwindows_t));
328 336
329 337 return (0);
330 338
331 339 case GETUSTACK:
332 340 if (copyout(&lwp->lwp_ustack, arg, sizeof (caddr_t)))
333 341 return (set_errno(EFAULT));
334 342
335 343 return (0);
336 344
337 345 case SETUSTACK:
338 346 if (copyin(arg, &dummy_stk, sizeof (dummy_stk)))
339 347 return (set_errno(EFAULT));
340 348
341 349 lwp->lwp_ustack = (uintptr_t)arg;
342 350
343 351 return (0);
344 352 }
345 353 }
346 354
347 355
348 356 #ifdef _SYSCALL32_IMPL
349 357
350 358 /*
351 359 * Save user context for 32-bit processes.
352 360 */
353 361 void
354 362 savecontext32(ucontext32_t *ucp, const k_sigset_t *mask, struct fq32 *dfq)
355 363 {
356 364 proc_t *p = ttoproc(curthread);
357 365 klwp_t *lwp = ttolwp(curthread);
358 366 fpregset_t fpregs;
359 367
360 368 /*
361 369 * We assign to every field through uc_mcontext.fpregs.fpu_en,
362 370 * but we have to bzero() everything after that.
363 371 */
364 372 bzero(&ucp->uc_mcontext.fpregs.fpu_en, sizeof (ucontext32_t) -
365 373 offsetof(ucontext32_t, uc_mcontext.fpregs.fpu_en));
366 374 /*
367 375 * There is an unused hole in the ucontext32_t structure; zero-fill
368 376 * it so that we don't expose kernel data to the user.
369 377 */
370 378 (&ucp->uc_stack.ss_flags)[1] = 0;
371 379
372 380 /*
373 381 * Flushing the user windows isn't strictly necessary; we do
374 382 * it to maintain backward compatibility.
375 383 */
376 384 (void) flush_user_windows_to_stack(NULL);
377 385
378 386 ucp->uc_flags = UC_ALL;
379 387 ucp->uc_link = (caddr32_t)lwp->lwp_oldcontext;
380 388
381 389 /*
382 390 * Try to copyin() the ustack if one is registered. If the stack
383 391 * has zero size, this indicates that stack bounds checking has
384 392 * been disabled for this LWP. If stack bounds checking is disabled
385 393 * or the copyin() fails, we fall back to the legacy behavior.
386 394 */
387 395 if (lwp->lwp_ustack == NULL ||
388 396 copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
389 397 sizeof (ucp->uc_stack)) != 0 ||
390 398 ucp->uc_stack.ss_size == 0) {
391 399
392 400 if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
393 401 ucp->uc_stack.ss_sp =
394 402 (caddr32_t)(uintptr_t)lwp->lwp_sigaltstack.ss_sp;
395 403 ucp->uc_stack.ss_size =
396 404 (size32_t)lwp->lwp_sigaltstack.ss_size;
397 405 ucp->uc_stack.ss_flags = SS_ONSTACK;
398 406 } else {
399 407 ucp->uc_stack.ss_sp =
400 408 (caddr32_t)(uintptr_t)p->p_usrstack - p->p_stksize;
401 409 ucp->uc_stack.ss_size =
402 410 (size32_t)p->p_stksize;
403 411 ucp->uc_stack.ss_flags = 0;
404 412 }
|
↓ open down ↓ |
278 lines elided |
↑ open up ↑ |
405 413 }
406 414
407 415 getgregs32(lwp, ucp->uc_mcontext.gregs);
408 416 getfpregs(lwp, &fpregs);
409 417 fpuregset_nto32(&fpregs, &ucp->uc_mcontext.fpregs, dfq);
410 418
411 419 if (ucp->uc_mcontext.fpregs.fpu_en == 0)
412 420 ucp->uc_flags &= ~UC_FPU;
413 421 ucp->uc_mcontext.gwins = (caddr32_t)NULL;
414 422
415 - /*
416 - * Save signal mask (the 32- and 64-bit sigset_t structures are
417 - * identical).
418 - */
419 - sigktou(mask, (sigset_t *)&ucp->uc_sigmask);
423 + if (mask != NULL) {
424 + /*
425 + * Save signal mask (the 32- and 64-bit sigset_t structures are
426 + * identical).
427 + */
428 + sigktou(mask, (sigset_t *)&ucp->uc_sigmask);
429 + } else {
430 + ucp->uc_flags &= ~UC_SIGMASK;
431 + bzero(&ucp->uc_sigmask, sizeof (ucp->uc_sigmask));
432 + }
420 433 }
421 434
422 435 int
423 436 getsetcontext32(int flag, void *arg)
424 437 {
425 438 ucontext32_t uc;
426 439 ucontext_t ucnat;
427 440 struct _fq fpu_qnat[MAXFPQ]; /* to hold "native" floating queue */
428 441 struct fq32 fpu_q[MAXFPQ]; /* to hold 32 bit floating queue */
429 442 fpregset32_t *fpp;
430 443 gwindows32_t *gwin = NULL; /* to hold windows */
431 444 caddr_t xregs;
432 445 int xregs_size = 0;
433 446 extern int nwindows;
434 447 klwp_t *lwp = ttolwp(curthread);
435 448 ucontext32_t *ucp;
436 449 uint32_t ustack32;
437 450 stack32_t dummy_stk32;
438 451
439 452 /*
440 453 * In future releases, when the ucontext structure grows,
441 454 * getcontext should be modified to only return the fields
442 455 * specified in the uc_flags. That way, the structure can grow
443 456 * and still be binary compatible will all .o's which will only
444 457 * have old fields defined in uc_flags
445 458 */
446 459
447 460 switch (flag) {
448 461 default:
449 462 return (set_errno(EINVAL));
450 463
451 464 case GETCONTEXT:
452 465 schedctl_finish_sigblock(curthread);
453 466 savecontext32(&uc, &curthread->t_hold, NULL);
454 467 if (uc.uc_flags & UC_SIGMASK)
455 468 SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
456 469 /*
457 470 * When using floating point it should not be possible to
458 471 * get here with a fpu_qcnt other than zero since we go
459 472 * to great pains to handle all outstanding FP exceptions
460 473 * before any system call code gets executed. However we
461 474 * clear fpu_q and fpu_qcnt here before copyout anyway -
462 475 * this will prevent us from interpreting the garbage we
463 476 * get back (when FP is not enabled) as valid queue data on
464 477 * a later setcontext(2).
465 478 */
466 479 uc.uc_mcontext.fpregs.fpu_qcnt = 0;
467 480 uc.uc_mcontext.fpregs.fpu_q = (caddr32_t)NULL;
468 481
469 482 if (copyout(&uc, arg, sizeof (ucontext32_t)))
470 483 return (set_errno(EFAULT));
471 484 return (0);
472 485
473 486 case SETCONTEXT:
474 487 ucp = arg;
475 488 if (ucp == NULL)
476 489 exit(CLD_EXITED, 0);
477 490 /*
478 491 * Don't copyin filler or floating state unless we need it.
479 492 * The ucontext_t struct and fields are specified in the ABI.
480 493 */
481 494 if (copyin(ucp, &uc, sizeof (uc) - sizeof (uc.uc_filler) -
482 495 sizeof (uc.uc_mcontext.fpregs) -
483 496 sizeof (uc.uc_mcontext.xrs) -
484 497 sizeof (uc.uc_mcontext.filler))) {
485 498 return (set_errno(EFAULT));
486 499 }
487 500 if (uc.uc_flags & UC_SIGMASK)
488 501 SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
489 502 if (copyin(&ucp->uc_mcontext.xrs, &uc.uc_mcontext.xrs,
490 503 sizeof (uc.uc_mcontext.xrs))) {
491 504 return (set_errno(EFAULT));
492 505 }
493 506 fpp = &uc.uc_mcontext.fpregs;
494 507 if (uc.uc_flags & UC_FPU) {
495 508 /*
496 509 * Need to copyin floating point state
497 510 */
498 511 if (copyin(&ucp->uc_mcontext.fpregs,
499 512 &uc.uc_mcontext.fpregs,
500 513 sizeof (uc.uc_mcontext.fpregs)))
501 514 return (set_errno(EFAULT));
502 515 /* if floating queue not empty */
503 516 if ((fpp->fpu_q) && (fpp->fpu_qcnt)) {
504 517 if (fpp->fpu_qcnt > MAXFPQ ||
505 518 fpp->fpu_q_entrysize <= 0 ||
506 519 fpp->fpu_q_entrysize > sizeof (struct fq32))
507 520 return (set_errno(EINVAL));
508 521 if (copyin((void *)(uintptr_t)fpp->fpu_q, fpu_q,
509 522 fpp->fpu_qcnt * fpp->fpu_q_entrysize))
510 523 return (set_errno(EFAULT));
511 524 } else {
512 525 fpp->fpu_qcnt = 0; /* avoid confusion later */
513 526 }
514 527 } else {
515 528 fpp->fpu_qcnt = 0;
516 529 }
517 530
518 531 if (uc.uc_mcontext.gwins) { /* if windows in context */
519 532 size_t gwin_size;
520 533
521 534 /*
522 535 * We do the same computation here to determine
523 536 * how many bytes of gwindows_t to copy in that
524 537 * is also done in sendsig() to decide how many
525 538 * bytes to copy out. We just *know* that wbcnt
526 539 * is the first element of the structure.
527 540 */
528 541 gwin = kmem_zalloc(sizeof (gwindows32_t), KM_SLEEP);
529 542 if (copyin((void *)(uintptr_t)uc.uc_mcontext.gwins,
530 543 &gwin->wbcnt, sizeof (gwin->wbcnt))) {
531 544 kmem_free(gwin, sizeof (gwindows32_t));
532 545 return (set_errno(EFAULT));
533 546 }
534 547 if (gwin->wbcnt < 0 || gwin->wbcnt > nwindows) {
535 548 kmem_free(gwin, sizeof (gwindows32_t));
536 549 return (set_errno(EINVAL));
537 550 }
538 551 gwin_size = gwin->wbcnt * sizeof (struct rwindow32) +
539 552 SPARC_MAXREGWINDOW * sizeof (caddr32_t) +
540 553 sizeof (int32_t);
541 554 if (gwin_size > sizeof (gwindows32_t) ||
542 555 copyin((void *)(uintptr_t)uc.uc_mcontext.gwins,
543 556 gwin, gwin_size)) {
544 557 kmem_free(gwin, sizeof (gwindows32_t));
545 558 return (set_errno(EFAULT));
546 559 }
547 560 /* restorecontext() should ignore this */
548 561 uc.uc_mcontext.gwins = (caddr32_t)0;
549 562 }
550 563
551 564 ucontext_32ton(&uc, &ucnat, fpu_q, fpu_qnat);
552 565
553 566 /*
554 567 * get extra register state if any exists
555 568 */
556 569 if (xregs_hasptr32(lwp, &uc) &&
557 570 ((xregs_size = xregs_getsize(curproc)) > 0)) {
558 571 xregs = kmem_zalloc(xregs_size, KM_SLEEP);
559 572 if (copyin((void *)(uintptr_t)xregs_getptr32(lwp, &uc),
560 573 xregs, xregs_size)) {
561 574 kmem_free(xregs, xregs_size);
562 575 if (gwin)
563 576 kmem_free(gwin, sizeof (gwindows32_t));
564 577 return (set_errno(EFAULT));
565 578 }
566 579 xregs_setptr(lwp, &ucnat, xregs);
567 580 } else {
568 581 xregs_clrptr(lwp, &ucnat);
569 582 }
570 583
571 584 restorecontext(&ucnat);
572 585
573 586 if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0)) {
574 587 (void) copyout(&uc.uc_stack,
575 588 (stack32_t *)lwp->lwp_ustack, sizeof (stack32_t));
576 589 }
577 590
578 591 if (gwin)
579 592 setgwins32(lwp, gwin);
580 593
581 594 /*
582 595 * free extra register state area
583 596 */
584 597 if (xregs_size)
585 598 kmem_free(xregs, xregs_size);
586 599
587 600 if (gwin)
588 601 kmem_free(gwin, sizeof (gwindows32_t));
589 602
590 603 return (0);
591 604
592 605 case GETUSTACK:
593 606 ustack32 = (uint32_t)lwp->lwp_ustack;
594 607 if (copyout(&ustack32, arg, sizeof (caddr32_t)))
595 608 return (set_errno(EFAULT));
596 609
597 610 return (0);
598 611
599 612 case SETUSTACK:
600 613 if (copyin(arg, &dummy_stk32, sizeof (dummy_stk32)))
601 614 return (set_errno(EFAULT));
602 615
603 616 lwp->lwp_ustack = (uintptr_t)arg;
604 617
605 618 return (0);
606 619 }
607 620 }
608 621
609 622 #endif /* _SYSCALL32_IMPL */
|
↓ open down ↓ |
180 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX