1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2015 Joyent, Inc.
24 */
25 /*
26 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
27 * Use is subject to license terms.
28 */
29
30 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
31 /* All Rights Reserved */
32
33 #include <sys/param.h>
34 #include <sys/types.h>
35 #include <sys/vmparam.h>
36 #include <sys/systm.h>
37 #include <sys/signal.h>
38 #include <sys/stack.h>
39 #include <sys/frame.h>
40 #include <sys/proc.h>
41 #include <sys/brand.h>
42 #include <sys/ucontext.h>
43 #include <sys/asm_linkage.h>
44 #include <sys/kmem.h>
45 #include <sys/errno.h>
46 #include <sys/archsystm.h>
47 #include <sys/fpu/fpusystm.h>
48 #include <sys/debug.h>
49 #include <sys/model.h>
50 #include <sys/cmn_err.h>
51 #include <sys/sysmacros.h>
52 #include <sys/privregs.h>
53 #include <sys/schedctl.h>
54
55
56 /*
57 * Save user context.
58 */
59 void
60 savecontext(ucontext_t *ucp, const k_sigset_t *mask)
61 {
62 proc_t *p = ttoproc(curthread);
63 klwp_t *lwp = ttolwp(curthread);
64
65 /*
66 * We assign to every field through uc_mcontext.fpregs.fpu_en,
67 * but we have to bzero() everything after that.
68 */
69 bzero(&ucp->uc_mcontext.fpregs.fpu_en, sizeof (ucontext_t) -
70 offsetof(ucontext_t, uc_mcontext.fpregs.fpu_en));
71 /*
72 * There are unused holes in the ucontext_t structure, zero-fill
73 * them so that we don't expose kernel data to the user.
74 */
75 (&ucp->uc_flags)[1] = 0;
76 (&ucp->uc_stack.ss_flags)[1] = 0;
77
78 /*
79 * Flushing the user windows isn't strictly necessary; we do
80 * it to maintain backward compatibility.
81 */
82 (void) flush_user_windows_to_stack(NULL);
83
84 ucp->uc_flags = UC_ALL;
85 ucp->uc_link = (ucontext_t *)lwp->lwp_oldcontext;
86
87 /*
88 * Try to copyin() the ustack if one is registered. If the stack
89 * has zero size, this indicates that stack bounds checking has
90 * been disabled for this LWP. If stack bounds checking is disabled
91 * or the copyin() fails, we fall back to the legacy behavior.
92 */
93 if (lwp->lwp_ustack == NULL ||
94 copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
95 sizeof (ucp->uc_stack)) != 0 ||
96 ucp->uc_stack.ss_size == 0) {
97
98 if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
99 ucp->uc_stack = lwp->lwp_sigaltstack;
100 } else {
101 ucp->uc_stack.ss_sp = p->p_usrstack - p->p_stksize;
102 ucp->uc_stack.ss_size = p->p_stksize;
103 ucp->uc_stack.ss_flags = 0;
104 }
105 }
106
107 getgregs(lwp, ucp->uc_mcontext.gregs);
108 getasrs(lwp, ucp->uc_mcontext.asrs);
109
110 getfpregs(lwp, &ucp->uc_mcontext.fpregs);
111 getfpasrs(lwp, ucp->uc_mcontext.asrs);
112 if (ucp->uc_mcontext.fpregs.fpu_en == 0)
113 ucp->uc_flags &= ~UC_FPU;
114 ucp->uc_mcontext.gwins = (gwindows_t *)NULL;
115
116 if (mask != NULL) {
117 /*
118 * Save signal mask.
119 */
120 sigktou(mask, &ucp->uc_sigmask);
121 } else {
122 ucp->uc_flags &= ~UC_SIGMASK;
123 bzero(&ucp->uc_sigmask, sizeof (ucp->uc_sigmask));
124 }
125 }
126
127
128 void
129 restorecontext(ucontext_t *ucp)
130 {
131 kthread_t *t = curthread;
132 klwp_t *lwp = ttolwp(t);
133 mcontext_t *mcp = &ucp->uc_mcontext;
134 model_t model = lwp_getdatamodel(lwp);
135
136 (void) flush_user_windows_to_stack(NULL);
137 if (lwp->lwp_pcb.pcb_xregstat != XREGNONE)
138 xregrestore(lwp, 0);
139
140 lwp->lwp_oldcontext = (uintptr_t)ucp->uc_link;
141
142 if (ucp->uc_flags & UC_STACK) {
143 if (ucp->uc_stack.ss_flags == SS_ONSTACK)
144 lwp->lwp_sigaltstack = ucp->uc_stack;
145 else
146 lwp->lwp_sigaltstack.ss_flags &= ~SS_ONSTACK;
147 }
148
149 if (ucp->uc_flags & UC_CPU) {
150 if (mcp->gwins != 0)
151 setgwins(lwp, mcp->gwins);
152 setgregs(lwp, mcp->gregs);
153 if (model == DATAMODEL_LP64)
154 setasrs(lwp, mcp->asrs);
155 else
156 xregs_setgregs(lwp, xregs_getptr(lwp, ucp));
157 }
158
159 if (ucp->uc_flags & UC_FPU) {
160 fpregset_t *fp = &ucp->uc_mcontext.fpregs;
161
162 setfpregs(lwp, fp);
163 if (model == DATAMODEL_LP64)
164 setfpasrs(lwp, mcp->asrs);
165 else
166 xregs_setfpregs(lwp, xregs_getptr(lwp, ucp));
167 run_fpq(lwp, fp);
168 }
169
170 if (ucp->uc_flags & UC_SIGMASK) {
171 /*
172 * We don't need to acquire p->p_lock here;
173 * we are manipulating thread-private data.
174 */
175 schedctl_finish_sigblock(t);
176 sigutok(&ucp->uc_sigmask, &t->t_hold);
177 if (sigcheck(ttoproc(t), t))
178 t->t_sig_check = 1;
179 }
180 }
181
182
183 int
184 getsetcontext(int flag, void *arg)
185 {
186 ucontext_t uc;
187 struct _fq fpu_q[MAXFPQ]; /* to hold floating queue */
188 fpregset_t *fpp;
189 gwindows_t *gwin = NULL; /* to hold windows */
190 caddr_t xregs = NULL;
191 int xregs_size = 0;
192 extern int nwindows;
193 ucontext_t *ucp;
194 klwp_t *lwp = ttolwp(curthread);
195 stack_t dummy_stk;
196
197 /*
198 * In future releases, when the ucontext structure grows,
199 * getcontext should be modified to only return the fields
200 * specified in the uc_flags. That way, the structure can grow
201 * and still be binary compatible will all .o's which will only
202 * have old fields defined in uc_flags
203 */
204
205 switch (flag) {
206 default:
207 return (set_errno(EINVAL));
208
209 case GETCONTEXT:
210 schedctl_finish_sigblock(curthread);
211 savecontext(&uc, &curthread->t_hold);
212 if (uc.uc_flags & UC_SIGMASK)
213 SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
214 /*
215 * When using floating point it should not be possible to
216 * get here with a fpu_qcnt other than zero since we go
217 * to great pains to handle all outstanding FP exceptions
218 * before any system call code gets executed. However we
219 * clear fpu_q and fpu_qcnt here before copyout anyway -
220 * this will prevent us from interpreting the garbage we
221 * get back (when FP is not enabled) as valid queue data on
222 * a later setcontext(2).
223 */
224 uc.uc_mcontext.fpregs.fpu_qcnt = 0;
225 uc.uc_mcontext.fpregs.fpu_q = (struct _fq *)NULL;
226
227 if (copyout(&uc, arg, sizeof (ucontext_t)))
228 return (set_errno(EFAULT));
229 return (0);
230
231 case SETCONTEXT:
232 ucp = arg;
233 if (ucp == NULL)
234 exit(CLD_EXITED, 0);
235 /*
236 * Don't copyin filler or floating state unless we need it.
237 * The ucontext_t struct and fields are specified in the ABI.
238 */
239 if (copyin(ucp, &uc, sizeof (ucontext_t) -
240 sizeof (uc.uc_filler) -
241 sizeof (uc.uc_mcontext.fpregs) -
242 sizeof (uc.uc_mcontext.xrs) -
243 sizeof (uc.uc_mcontext.asrs) -
244 sizeof (uc.uc_mcontext.filler))) {
245 return (set_errno(EFAULT));
246 }
247 if (uc.uc_flags & UC_SIGMASK)
248 SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
249 if (copyin(&ucp->uc_mcontext.xrs, &uc.uc_mcontext.xrs,
250 sizeof (uc.uc_mcontext.xrs))) {
251 return (set_errno(EFAULT));
252 }
253 fpp = &uc.uc_mcontext.fpregs;
254 if (uc.uc_flags & UC_FPU) {
255 /*
256 * Need to copyin floating point state
257 */
258 if (copyin(&ucp->uc_mcontext.fpregs,
259 &uc.uc_mcontext.fpregs,
260 sizeof (uc.uc_mcontext.fpregs)))
261 return (set_errno(EFAULT));
262 /* if floating queue not empty */
263 if ((fpp->fpu_q) && (fpp->fpu_qcnt)) {
264 if (fpp->fpu_qcnt > MAXFPQ ||
265 fpp->fpu_q_entrysize <= 0 ||
266 fpp->fpu_q_entrysize > sizeof (struct _fq))
267 return (set_errno(EINVAL));
268 if (copyin(fpp->fpu_q, fpu_q,
269 fpp->fpu_qcnt * fpp->fpu_q_entrysize))
270 return (set_errno(EFAULT));
271 fpp->fpu_q = fpu_q;
272 } else {
273 fpp->fpu_qcnt = 0; /* avoid confusion later */
274 }
275 } else {
276 fpp->fpu_qcnt = 0;
277 }
278 if (uc.uc_mcontext.gwins) { /* if windows in context */
279 size_t gwin_size;
280
281 /*
282 * We do the same computation here to determine
283 * how many bytes of gwindows_t to copy in that
284 * is also done in sendsig() to decide how many
285 * bytes to copy out. We just *know* that wbcnt
286 * is the first element of the structure.
287 */
288 gwin = kmem_zalloc(sizeof (gwindows_t), KM_SLEEP);
289 if (copyin(uc.uc_mcontext.gwins,
290 &gwin->wbcnt, sizeof (gwin->wbcnt))) {
291 kmem_free(gwin, sizeof (gwindows_t));
292 return (set_errno(EFAULT));
293 }
294 if (gwin->wbcnt < 0 || gwin->wbcnt > nwindows) {
295 kmem_free(gwin, sizeof (gwindows_t));
296 return (set_errno(EINVAL));
297 }
298 gwin_size = gwin->wbcnt * sizeof (struct rwindow) +
299 SPARC_MAXREGWINDOW * sizeof (int *) + sizeof (long);
300 if (gwin_size > sizeof (gwindows_t) ||
301 copyin(uc.uc_mcontext.gwins, gwin, gwin_size)) {
302 kmem_free(gwin, sizeof (gwindows_t));
303 return (set_errno(EFAULT));
304 }
305 uc.uc_mcontext.gwins = gwin;
306 }
307
308 /*
309 * get extra register state or asrs if any exists
310 * there is no extra register state for _LP64 user programs
311 */
312 xregs_clrptr(lwp, &uc);
313 if (copyin(&ucp->uc_mcontext.asrs, &uc.uc_mcontext.asrs,
314 sizeof (asrset_t))) {
315 /* Free up gwin structure if used */
316 if (gwin)
317 kmem_free(gwin, sizeof (gwindows_t));
318 return (set_errno(EFAULT));
319 }
320
321 restorecontext(&uc);
322
323 if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0)) {
324 (void) copyout(&uc.uc_stack, (stack_t *)lwp->lwp_ustack,
325 sizeof (stack_t));
326 }
327
328 /*
329 * free extra register state area
330 */
331 if (xregs_size)
332 kmem_free(xregs, xregs_size);
333
334 if (gwin)
335 kmem_free(gwin, sizeof (gwindows_t));
336
337 return (0);
338
339 case GETUSTACK:
340 if (copyout(&lwp->lwp_ustack, arg, sizeof (caddr_t)))
341 return (set_errno(EFAULT));
342
343 return (0);
344
345 case SETUSTACK:
346 if (copyin(arg, &dummy_stk, sizeof (dummy_stk)))
347 return (set_errno(EFAULT));
348
349 lwp->lwp_ustack = (uintptr_t)arg;
350
351 return (0);
352 }
353 }
354
355
356 #ifdef _SYSCALL32_IMPL
357
358 /*
359 * Save user context for 32-bit processes.
360 */
361 void
362 savecontext32(ucontext32_t *ucp, const k_sigset_t *mask, struct fq32 *dfq)
363 {
364 proc_t *p = ttoproc(curthread);
365 klwp_t *lwp = ttolwp(curthread);
366 fpregset_t fpregs;
367
368 /*
369 * We assign to every field through uc_mcontext.fpregs.fpu_en,
370 * but we have to bzero() everything after that.
371 */
372 bzero(&ucp->uc_mcontext.fpregs.fpu_en, sizeof (ucontext32_t) -
373 offsetof(ucontext32_t, uc_mcontext.fpregs.fpu_en));
374 /*
375 * There is an unused hole in the ucontext32_t structure; zero-fill
376 * it so that we don't expose kernel data to the user.
377 */
378 (&ucp->uc_stack.ss_flags)[1] = 0;
379
380 /*
381 * Flushing the user windows isn't strictly necessary; we do
382 * it to maintain backward compatibility.
383 */
384 (void) flush_user_windows_to_stack(NULL);
385
386 ucp->uc_flags = UC_ALL;
387 ucp->uc_link = (caddr32_t)lwp->lwp_oldcontext;
388
389 /*
390 * Try to copyin() the ustack if one is registered. If the stack
391 * has zero size, this indicates that stack bounds checking has
392 * been disabled for this LWP. If stack bounds checking is disabled
393 * or the copyin() fails, we fall back to the legacy behavior.
394 */
395 if (lwp->lwp_ustack == NULL ||
396 copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
397 sizeof (ucp->uc_stack)) != 0 ||
398 ucp->uc_stack.ss_size == 0) {
399
400 if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
401 ucp->uc_stack.ss_sp =
402 (caddr32_t)(uintptr_t)lwp->lwp_sigaltstack.ss_sp;
403 ucp->uc_stack.ss_size =
404 (size32_t)lwp->lwp_sigaltstack.ss_size;
405 ucp->uc_stack.ss_flags = SS_ONSTACK;
406 } else {
407 ucp->uc_stack.ss_sp =
408 (caddr32_t)(uintptr_t)p->p_usrstack - p->p_stksize;
409 ucp->uc_stack.ss_size =
410 (size32_t)p->p_stksize;
411 ucp->uc_stack.ss_flags = 0;
412 }
413 }
414
415 getgregs32(lwp, ucp->uc_mcontext.gregs);
416 getfpregs(lwp, &fpregs);
417 fpuregset_nto32(&fpregs, &ucp->uc_mcontext.fpregs, dfq);
418
419 if (ucp->uc_mcontext.fpregs.fpu_en == 0)
420 ucp->uc_flags &= ~UC_FPU;
421 ucp->uc_mcontext.gwins = (caddr32_t)NULL;
422
423 if (mask != NULL) {
424 /*
425 * Save signal mask (the 32- and 64-bit sigset_t structures are
426 * identical).
427 */
428 sigktou(mask, (sigset_t *)&ucp->uc_sigmask);
429 } else {
430 ucp->uc_flags &= ~UC_SIGMASK;
431 bzero(&ucp->uc_sigmask, sizeof (ucp->uc_sigmask));
432 }
433 }
434
435 int
436 getsetcontext32(int flag, void *arg)
437 {
438 ucontext32_t uc;
439 ucontext_t ucnat;
440 struct _fq fpu_qnat[MAXFPQ]; /* to hold "native" floating queue */
441 struct fq32 fpu_q[MAXFPQ]; /* to hold 32 bit floating queue */
442 fpregset32_t *fpp;
443 gwindows32_t *gwin = NULL; /* to hold windows */
444 caddr_t xregs;
445 int xregs_size = 0;
446 extern int nwindows;
447 klwp_t *lwp = ttolwp(curthread);
448 ucontext32_t *ucp;
449 uint32_t ustack32;
450 stack32_t dummy_stk32;
451
452 /*
453 * In future releases, when the ucontext structure grows,
454 * getcontext should be modified to only return the fields
455 * specified in the uc_flags. That way, the structure can grow
456 * and still be binary compatible will all .o's which will only
457 * have old fields defined in uc_flags
458 */
459
460 switch (flag) {
461 default:
462 return (set_errno(EINVAL));
463
464 case GETCONTEXT:
465 schedctl_finish_sigblock(curthread);
466 savecontext32(&uc, &curthread->t_hold, NULL);
467 if (uc.uc_flags & UC_SIGMASK)
468 SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
469 /*
470 * When using floating point it should not be possible to
471 * get here with a fpu_qcnt other than zero since we go
472 * to great pains to handle all outstanding FP exceptions
473 * before any system call code gets executed. However we
474 * clear fpu_q and fpu_qcnt here before copyout anyway -
475 * this will prevent us from interpreting the garbage we
476 * get back (when FP is not enabled) as valid queue data on
477 * a later setcontext(2).
478 */
479 uc.uc_mcontext.fpregs.fpu_qcnt = 0;
480 uc.uc_mcontext.fpregs.fpu_q = (caddr32_t)NULL;
481
482 if (copyout(&uc, arg, sizeof (ucontext32_t)))
483 return (set_errno(EFAULT));
484 return (0);
485
486 case SETCONTEXT:
487 ucp = arg;
488 if (ucp == NULL)
489 exit(CLD_EXITED, 0);
490 /*
491 * Don't copyin filler or floating state unless we need it.
492 * The ucontext_t struct and fields are specified in the ABI.
493 */
494 if (copyin(ucp, &uc, sizeof (uc) - sizeof (uc.uc_filler) -
495 sizeof (uc.uc_mcontext.fpregs) -
496 sizeof (uc.uc_mcontext.xrs) -
497 sizeof (uc.uc_mcontext.filler))) {
498 return (set_errno(EFAULT));
499 }
500 if (uc.uc_flags & UC_SIGMASK)
501 SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
502 if (copyin(&ucp->uc_mcontext.xrs, &uc.uc_mcontext.xrs,
503 sizeof (uc.uc_mcontext.xrs))) {
504 return (set_errno(EFAULT));
505 }
506 fpp = &uc.uc_mcontext.fpregs;
507 if (uc.uc_flags & UC_FPU) {
508 /*
509 * Need to copyin floating point state
510 */
511 if (copyin(&ucp->uc_mcontext.fpregs,
512 &uc.uc_mcontext.fpregs,
513 sizeof (uc.uc_mcontext.fpregs)))
514 return (set_errno(EFAULT));
515 /* if floating queue not empty */
516 if ((fpp->fpu_q) && (fpp->fpu_qcnt)) {
517 if (fpp->fpu_qcnt > MAXFPQ ||
518 fpp->fpu_q_entrysize <= 0 ||
519 fpp->fpu_q_entrysize > sizeof (struct fq32))
520 return (set_errno(EINVAL));
521 if (copyin((void *)(uintptr_t)fpp->fpu_q, fpu_q,
522 fpp->fpu_qcnt * fpp->fpu_q_entrysize))
523 return (set_errno(EFAULT));
524 } else {
525 fpp->fpu_qcnt = 0; /* avoid confusion later */
526 }
527 } else {
528 fpp->fpu_qcnt = 0;
529 }
530
531 if (uc.uc_mcontext.gwins) { /* if windows in context */
532 size_t gwin_size;
533
534 /*
535 * We do the same computation here to determine
536 * how many bytes of gwindows_t to copy in that
537 * is also done in sendsig() to decide how many
538 * bytes to copy out. We just *know* that wbcnt
539 * is the first element of the structure.
540 */
541 gwin = kmem_zalloc(sizeof (gwindows32_t), KM_SLEEP);
542 if (copyin((void *)(uintptr_t)uc.uc_mcontext.gwins,
543 &gwin->wbcnt, sizeof (gwin->wbcnt))) {
544 kmem_free(gwin, sizeof (gwindows32_t));
545 return (set_errno(EFAULT));
546 }
547 if (gwin->wbcnt < 0 || gwin->wbcnt > nwindows) {
548 kmem_free(gwin, sizeof (gwindows32_t));
549 return (set_errno(EINVAL));
550 }
551 gwin_size = gwin->wbcnt * sizeof (struct rwindow32) +
552 SPARC_MAXREGWINDOW * sizeof (caddr32_t) +
553 sizeof (int32_t);
554 if (gwin_size > sizeof (gwindows32_t) ||
555 copyin((void *)(uintptr_t)uc.uc_mcontext.gwins,
556 gwin, gwin_size)) {
557 kmem_free(gwin, sizeof (gwindows32_t));
558 return (set_errno(EFAULT));
559 }
560 /* restorecontext() should ignore this */
561 uc.uc_mcontext.gwins = (caddr32_t)0;
562 }
563
564 ucontext_32ton(&uc, &ucnat, fpu_q, fpu_qnat);
565
566 /*
567 * get extra register state if any exists
568 */
569 if (xregs_hasptr32(lwp, &uc) &&
570 ((xregs_size = xregs_getsize(curproc)) > 0)) {
571 xregs = kmem_zalloc(xregs_size, KM_SLEEP);
572 if (copyin((void *)(uintptr_t)xregs_getptr32(lwp, &uc),
573 xregs, xregs_size)) {
574 kmem_free(xregs, xregs_size);
575 if (gwin)
576 kmem_free(gwin, sizeof (gwindows32_t));
577 return (set_errno(EFAULT));
578 }
579 xregs_setptr(lwp, &ucnat, xregs);
580 } else {
581 xregs_clrptr(lwp, &ucnat);
582 }
583
584 restorecontext(&ucnat);
585
586 if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0)) {
587 (void) copyout(&uc.uc_stack,
588 (stack32_t *)lwp->lwp_ustack, sizeof (stack32_t));
589 }
590
591 if (gwin)
592 setgwins32(lwp, gwin);
593
594 /*
595 * free extra register state area
596 */
597 if (xregs_size)
598 kmem_free(xregs, xregs_size);
599
600 if (gwin)
601 kmem_free(gwin, sizeof (gwindows32_t));
602
603 return (0);
604
605 case GETUSTACK:
606 ustack32 = (uint32_t)lwp->lwp_ustack;
607 if (copyout(&ustack32, arg, sizeof (caddr32_t)))
608 return (set_errno(EFAULT));
609
610 return (0);
611
612 case SETUSTACK:
613 if (copyin(arg, &dummy_stk32, sizeof (dummy_stk32)))
614 return (set_errno(EFAULT));
615
616 lwp->lwp_ustack = (uintptr_t)arg;
617
618 return (0);
619 }
620 }
621
622 #endif /* _SYSCALL32_IMPL */