Print this page
manifest


  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2015 Joyent, Inc.
  24  */
  25 /*
  26  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
  27  * Use is subject to license terms.
  28  */
  29 
  30 /*      Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T     */
  31 /*        All Rights Reserved   */
  32 




  33 #include <sys/param.h>
  34 #include <sys/types.h>
  35 #include <sys/vmparam.h>
  36 #include <sys/systm.h>
  37 #include <sys/signal.h>
  38 #include <sys/stack.h>
  39 #include <sys/regset.h>
  40 #include <sys/privregs.h>
  41 #include <sys/frame.h>
  42 #include <sys/proc.h>
  43 #include <sys/brand.h>
  44 #include <sys/psw.h>
  45 #include <sys/ucontext.h>
  46 #include <sys/asm_linkage.h>
  47 #include <sys/errno.h>
  48 #include <sys/archsystm.h>
  49 #include <sys/schedctl.h>
  50 #include <sys/debug.h>
  51 #include <sys/sysmacros.h>
  52 #include <sys/sdt.h>
  53 
  54 /*
























  55  * Save user context.











  56  */
  57 void
  58 savecontext(ucontext_t *ucp, const k_sigset_t *mask)

  59 {
  60         proc_t *p = ttoproc(curthread);
  61         klwp_t *lwp = ttolwp(curthread);
  62         struct regs *rp = lwptoregs(lwp);




  63 


  64         /*
  65          * We unconditionally assign to every field through the end
  66          * of the gregs, but we need to bzero() everything -after- that
  67          * to avoid having any kernel stack garbage escape to userland.





  68          */













  69         bzero(&ucp->uc_mcontext.fpregs, sizeof (ucontext_t) -
  70             offsetof(ucontext_t, uc_mcontext.fpregs));

  71 
  72         ucp->uc_flags = UC_ALL;
  73         ucp->uc_link = (struct ucontext *)lwp->lwp_oldcontext;
  74 
  75         /*
  76          * Try to copyin() the ustack if one is registered. If the stack
  77          * has zero size, this indicates that stack bounds checking has
  78          * been disabled for this LWP. If stack bounds checking is disabled
  79          * or the copyin() fails, we fall back to the legacy behavior.
  80          */
  81         if (lwp->lwp_ustack == (uintptr_t)NULL ||
  82             copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
  83             sizeof (ucp->uc_stack)) != 0 ||
  84             ucp->uc_stack.ss_size == 0) {
  85 
  86                 if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
  87                         ucp->uc_stack = lwp->lwp_sigaltstack;
  88                 } else {
  89                         ucp->uc_stack.ss_sp = p->p_usrstack - p->p_stksize;
  90                         ucp->uc_stack.ss_size = p->p_stksize;


  97          * arrange for single-stepping and turn off the trace flag.
  98          */
  99         if ((rp->r_ps & PS_T) || (lwp->lwp_pcb.pcb_flags & REQUEST_STEP)) {
 100                 /*
 101                  * Clear PS_T so that saved user context won't have trace
 102                  * flag set.
 103                  */
 104                 rp->r_ps &= ~PS_T;
 105 
 106                 if (!(lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP)) {
 107                         lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
 108                         /*
 109                          * trap() always checks DEBUG_PENDING before
 110                          * checking for any pending signal. This at times
 111                          * can potentially lead to DEBUG_PENDING not being
 112                          * honoured. (for eg: the lwp is stopped by
 113                          * stop_on_fault() called from trap(), after being
 114                          * awakened it might see a pending signal and call
 115                          * savecontext(), however on the way back to userland
 116                          * there is no place it can be detected). Hence in
 117                          * anticipation of such occassions, set AST flag for
 118                          * the thread which will make the thread take an
 119                          * excursion through trap() where it will be handled
 120                          * appropriately.
 121                          */
 122                         aston(curthread);
 123                 }
 124         }
 125 
 126         getgregs(lwp, ucp->uc_mcontext.gregs);
 127         if (lwp->lwp_pcb.pcb_fpu.fpu_flags & FPU_EN)

 128                 getfpregs(lwp, &ucp->uc_mcontext.fpregs);
 129         else
 130                 ucp->uc_flags &= ~UC_FPU;
 131 
 132         if (mask != NULL) {
 133                 /*
 134                  * Save signal mask.
 135                  */
 136                 sigktou(mask, &ucp->uc_sigmask);
 137         } else {
 138                 ucp->uc_flags &= ~UC_SIGMASK;
 139                 bzero(&ucp->uc_sigmask, sizeof (ucp->uc_sigmask));
 140         }
 141 



























 142         if (PROC_IS_BRANDED(p) && BROP(p)->b_savecontext != NULL) {
 143                 /*
 144                  * Allow the brand the chance to modify the context we
 145                  * saved:
 146                  */

 147                 BROP(p)->b_savecontext(ucp);
 148         }


 149 }
 150 
 151 /*
 152  * Restore user context.
 153  */
 154 void
 155 restorecontext(ucontext_t *ucp)
 156 {
 157         kthread_t *t = curthread;
 158         klwp_t *lwp = ttolwp(t);
 159         proc_t *p = lwptoproc(lwp);
 160 
 161         if (PROC_IS_BRANDED(p) && BROP(p)->b_restorecontext != NULL) {
 162                 /*
 163                  * Allow the brand the chance to modify the context before
 164                  * we restore it:
 165                  */
 166                 BROP(p)->b_restorecontext(ucp);
 167         }
 168 


 176                         lwp->lwp_sigaltstack = ucp->uc_stack;
 177                 else
 178                         lwp->lwp_sigaltstack.ss_flags &= ~SS_ONSTACK;
 179         }
 180 
 181         if (ucp->uc_flags & UC_CPU) {
 182                 /*
 183                  * If the trace flag is set, mark the lwp to take a
 184                  * single-step trap on return to user level (below).
 185                  * The x86 lcall interface and sysenter has already done this,
 186                  * and turned off the flag, but amd64 syscall interface has not.
 187                  */
 188                 if (lwptoregs(lwp)->r_ps & PS_T)
 189                         lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
 190                 setgregs(lwp, ucp->uc_mcontext.gregs);
 191                 lwp->lwp_eosys = JUSTRETURN;
 192                 t->t_post_sys = 1;
 193                 aston(curthread);
 194         }
 195 
 196         if (ucp->uc_flags & UC_FPU)









 197                 setfpregs(lwp, &ucp->uc_mcontext.fpregs);

 198 
 199         if (ucp->uc_flags & UC_SIGMASK) {
 200                 /*
 201                  * We don't need to acquire p->p_lock here;
 202                  * we are manipulating thread-private data.
 203                  */
 204                 schedctl_finish_sigblock(t);
 205                 sigutok(&ucp->uc_sigmask, &t->t_hold);
 206                 if (sigcheck(ttoproc(t), t))
 207                         t->t_sig_check = 1;
 208         }
 209 }
 210 
 211 
 212 int
 213 getsetcontext(int flag, void *arg)
 214 {
 215         ucontext_t uc;
 216         ucontext_t *ucp;
 217         klwp_t *lwp = ttolwp(curthread);

 218         stack_t dummy_stk;
 219         proc_t *p = lwptoproc(lwp);

 220 
 221         /*
 222          * In future releases, when the ucontext structure grows,
 223          * getcontext should be modified to only return the fields
 224          * specified in the uc_flags.  That way, the structure can grow
 225          * and still be binary compatible will all .o's which will only
 226          * have old fields defined in uc_flags
 227          */
 228 
 229         switch (flag) {
 230         default:
 231                 return (set_errno(EINVAL));
 232 
 233         case GETCONTEXT:
 234                 schedctl_finish_sigblock(curthread);
 235                 savecontext(&uc, &curthread->t_hold);


 236                 if (uc.uc_flags & UC_SIGMASK)
 237                         SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
 238                 if (copyout(&uc, arg, sizeof (uc)))
 239                         return (set_errno(EFAULT));
 240                 return (0);
 241 































 242         case SETCONTEXT:
 243                 ucp = arg;
 244                 if (ucp == NULL)
 245                         exit(CLD_EXITED, 0);
 246                 /*
 247                  * Don't copyin filler or floating state unless we need it.
 248                  * The ucontext_t struct and fields are specified in the ABI.
 249                  */
 250                 if (copyin(ucp, &uc, sizeof (ucontext_t) -
 251                     sizeof (uc.uc_filler) -
 252                     sizeof (uc.uc_mcontext.fpregs))) {
 253                         return (set_errno(EFAULT));
 254                 }
 255                 if (uc.uc_flags & UC_SIGMASK)
 256                         SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
 257 
 258                 if ((uc.uc_flags & UC_FPU) &&
 259                     copyin(&ucp->uc_mcontext.fpregs, &uc.uc_mcontext.fpregs,
 260                     sizeof (uc.uc_mcontext.fpregs))) {
 261                         return (set_errno(EFAULT));
 262                 }
 263 
 264                 /*
 265                  * If this is a branded process, copy in the brand-private
 266                  * data:
 267                  */
 268                 if (PROC_IS_BRANDED(p) && copyin(&ucp->uc_brand_data,
 269                     &uc.uc_brand_data, sizeof (uc.uc_brand_data)) != 0) {
 270                         return (set_errno(EFAULT));
 271                 }
 272 















 273                 restorecontext(&uc);
 274 
 275                 if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0))
 276                         (void) copyout(&uc.uc_stack, (stack_t *)lwp->lwp_ustack,
 277                             sizeof (uc.uc_stack));
 278                 return (0);
 279 
 280         case GETUSTACK:
 281                 if (copyout(&lwp->lwp_ustack, arg, sizeof (caddr_t)))
 282                         return (set_errno(EFAULT));
 283                 return (0);
 284 
 285         case SETUSTACK:
 286                 if (copyin(arg, &dummy_stk, sizeof (dummy_stk)))
 287                         return (set_errno(EFAULT));
 288                 lwp->lwp_ustack = (uintptr_t)arg;
 289                 return (0);
 290         }
 291 }
 292 
 293 #ifdef _SYSCALL32_IMPL
 294 
 295 /*
 296  * Save user context for 32-bit processes.
 297  */
 298 void
 299 savecontext32(ucontext32_t *ucp, const k_sigset_t *mask)

 300 {
 301         proc_t *p = ttoproc(curthread);
 302         klwp_t *lwp = ttolwp(curthread);
 303         struct regs *rp = lwptoregs(lwp);





 304 











 305         bzero(&ucp->uc_mcontext.fpregs, sizeof (ucontext32_t) -
 306             offsetof(ucontext32_t, uc_mcontext.fpregs));

 307 
 308         ucp->uc_flags = UC_ALL;
 309         ucp->uc_link = (caddr32_t)lwp->lwp_oldcontext;
 310 
 311         if (lwp->lwp_ustack == (uintptr_t)NULL ||
 312             copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
 313             sizeof (ucp->uc_stack)) != 0 ||
 314             ucp->uc_stack.ss_size == 0) {
 315 
 316                 if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
 317                         ucp->uc_stack.ss_sp =
 318                             (caddr32_t)(uintptr_t)lwp->lwp_sigaltstack.ss_sp;
 319                         ucp->uc_stack.ss_size =
 320                             (size32_t)lwp->lwp_sigaltstack.ss_size;
 321                         ucp->uc_stack.ss_flags = SS_ONSTACK;
 322                 } else {
 323                         ucp->uc_stack.ss_sp = (caddr32_t)(uintptr_t)
 324                             (p->p_usrstack - p->p_stksize);
 325                         ucp->uc_stack.ss_size = (size32_t)p->p_stksize;
 326                         ucp->uc_stack.ss_flags = 0;


 331          * If either the trace flag or REQUEST_STEP is set, arrange
 332          * for single-stepping and turn off the trace flag.
 333          */
 334         if ((rp->r_ps & PS_T) || (lwp->lwp_pcb.pcb_flags & REQUEST_STEP)) {
 335                 /*
 336                  * Clear PS_T so that saved user context won't have trace
 337                  * flag set.
 338                  */
 339                 rp->r_ps &= ~PS_T;
 340 
 341                 if (!(lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP)) {
 342                         lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
 343                         /*
 344                          * See comments in savecontext().
 345                          */
 346                         aston(curthread);
 347                 }
 348         }
 349 
 350         getgregs32(lwp, ucp->uc_mcontext.gregs);
 351         if (lwp->lwp_pcb.pcb_fpu.fpu_flags & FPU_EN)

 352                 getfpregs32(lwp, &ucp->uc_mcontext.fpregs);
 353         else
 354                 ucp->uc_flags &= ~UC_FPU;
 355 
 356         if (mask != NULL) {
 357                 /*
 358                  * Save signal mask.
 359                  */
 360                 sigktou(mask, &ucp->uc_sigmask);
 361         } else {
 362                 ucp->uc_flags &= ~UC_SIGMASK;
 363                 bzero(&ucp->uc_sigmask, sizeof (ucp->uc_sigmask));
 364         }
 365 






















 366         if (PROC_IS_BRANDED(p) && BROP(p)->b_savecontext32 != NULL) {
 367                 /*
 368                  * Allow the brand the chance to modify the context we
 369                  * saved:
 370                  */

 371                 BROP(p)->b_savecontext32(ucp);
 372         }


 373 }
 374 
 375 int
 376 getsetcontext32(int flag, void *arg)
 377 {
 378         ucontext32_t uc;
 379         ucontext_t ucnat;
 380         ucontext32_t *ucp;
 381         klwp_t *lwp = ttolwp(curthread);
 382         caddr32_t ustack32;
 383         stack32_t dummy_stk32;
 384         proc_t *p = lwptoproc(lwp);

 385 
 386         switch (flag) {
 387         default:
 388                 return (set_errno(EINVAL));
 389 
 390         case GETCONTEXT:
 391                 schedctl_finish_sigblock(curthread);
 392                 savecontext32(&uc, &curthread->t_hold);


 393                 if (uc.uc_flags & UC_SIGMASK)
 394                         SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
 395                 if (copyout(&uc, arg, sizeof (uc)))
 396                         return (set_errno(EFAULT));
 397                 return (0);
 398 



















 399         case SETCONTEXT:
 400                 ucp = arg;
 401                 if (ucp == NULL)
 402                         exit(CLD_EXITED, 0);
 403                 if (copyin(ucp, &uc, sizeof (uc) -
 404                     sizeof (uc.uc_filler) -
 405                     sizeof (uc.uc_mcontext.fpregs))) {
 406                         return (set_errno(EFAULT));
 407                 }
 408                 if (uc.uc_flags & UC_SIGMASK)
 409                         SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
 410                 if ((uc.uc_flags & UC_FPU) &&
 411                     copyin(&ucp->uc_mcontext.fpregs, &uc.uc_mcontext.fpregs,
 412                     sizeof (uc.uc_mcontext.fpregs))) {
 413                         return (set_errno(EFAULT));
 414                 }
 415 
 416                 /*
 417                  * If this is a branded process, copy in the brand-private
 418                  * data:
 419                  */
 420                 if (PROC_IS_BRANDED(p) && copyin(&ucp->uc_brand_data,
 421                     &uc.uc_brand_data, sizeof (uc.uc_brand_data)) != 0) {
 422                         return (set_errno(EFAULT));
 423                 }
 424 







 425                 ucontext_32ton(&uc, &ucnat);








 426                 restorecontext(&ucnat);
 427 
 428                 if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0))
 429                         (void) copyout(&uc.uc_stack,
 430                             (stack32_t *)lwp->lwp_ustack, sizeof (uc.uc_stack));
 431                 return (0);
 432 
 433         case GETUSTACK:
 434                 ustack32 = (caddr32_t)lwp->lwp_ustack;
 435                 if (copyout(&ustack32, arg, sizeof (ustack32)))
 436                         return (set_errno(EFAULT));
 437                 return (0);
 438 
 439         case SETUSTACK:
 440                 if (copyin(arg, &dummy_stk32, sizeof (dummy_stk32)))
 441                         return (set_errno(EFAULT));
 442                 lwp->lwp_ustack = (uintptr_t)arg;
 443                 return (0);
 444         }
 445 }


  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2015 Joyent, Inc.
  24  */
  25 /*
  26  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
  27  * Use is subject to license terms.
  28  */
  29 
  30 /*      Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T     */
  31 /*        All Rights Reserved   */
  32 
  33 /*
  34  * Copyright 2023 Oxide Computer Company
  35  */
  36 
  37 #include <sys/param.h>
  38 #include <sys/types.h>
  39 #include <sys/vmparam.h>
  40 #include <sys/systm.h>
  41 #include <sys/signal.h>
  42 #include <sys/stack.h>
  43 #include <sys/regset.h>
  44 #include <sys/privregs.h>
  45 #include <sys/frame.h>
  46 #include <sys/proc.h>
  47 #include <sys/brand.h>
  48 #include <sys/psw.h>
  49 #include <sys/ucontext.h>
  50 #include <sys/asm_linkage.h>
  51 #include <sys/errno.h>
  52 #include <sys/archsystm.h>
  53 #include <sys/schedctl.h>
  54 #include <sys/debug.h>
  55 #include <sys/sysmacros.h>
  56 #include <sys/sdt.h>
  57 
  58 /*
  59  * This is a wrapper around copyout_noerr that returns a guaranteed error code.
  60  * Because we're using copyout_noerr(), we need to bound the time we're under an
  61  * on_fault/no_fault and attempt to do so only while we're actually copying data
  62  * out. The main reason for this is because we're being called back from the
  63  * FPU, which is being held with a kpreempt_disable() and related, we can't use
  64  * a larger on_fault()/no_fault() as that would both hide legitimate errors we
  65  * make, masquerading as user issues, and it gets trickier to reason about the
  66  * correct restoration of our state.
  67  */
  68 static int
  69 savecontext_copyout(const void *kaddr, void *uaddr, size_t size)
  70 {
  71         label_t ljb;
  72         if (!on_fault(&ljb)) {
  73                 copyout_noerr(kaddr, uaddr, size);
  74                 no_fault();
  75                 return (0);
  76         } else {
  77                 no_fault();
  78                 return (EFAULT);
  79         }
  80 }
  81 
  82 /*
  83  * Save user context.
  84  *
  85  * Generally speaking ucp is a pointer to kernel memory. In the traditional
  86  * version of this (when flags is 0), then we just write and fill out all of the
  87  * ucontext_t without any care for what was there ahead of this. However, when
  88  * we extended the state to push additional data when user pointers in the
  89  * ucontext_t are valid (currently only uc_xsave), then we will copy out that
  90  * extended state to the user pointer.
  91  *
  92  * We allow the copying to happen in two different ways mostly because this is
  93  * also used in the signal handling context where we must be much more careful
  94  * about how to copy out data.
  95  */
  96 
  97 int
  98 savecontext(ucontext_t *ucp, const k_sigset_t *mask, savecontext_flags_t flags)
  99 {
 100         proc_t *p = ttoproc(curthread);
 101         klwp_t *lwp = ttolwp(curthread);
 102         struct regs *rp = lwptoregs(lwp);
 103         boolean_t need_xsave = B_FALSE;
 104         boolean_t fpu_en;
 105         long user_xsave = 0;
 106         int ret;
 107 
 108         VERIFY0(flags & ~(SAVECTXT_F_EXTD | SAVECTXT_F_ONFAULT));
 109 
 110         /*
 111          * We unconditionally assign to every field through the end
 112          * of the gregs, but we need to bzero() everything -after- that
 113          * to avoid having any kernel stack garbage escape to userland.
 114          *
 115          * If we have been asked to save extended state, then we must make sure
 116          * that we don't clobber that value. We must also determine if the
 117          * processor has xsave state. If it does not, then we just simply honor
 118          * the pointer, but do not write anything out and do not set the flag.
 119          */
 120         if ((flags & SAVECTXT_F_EXTD) != 0) {
 121                 user_xsave = ucp->uc_xsave;
 122                 if (fpu_xsave_enabled() && user_xsave != 0) {
 123                         need_xsave = B_TRUE;
 124                 }
 125         } else {
 126                 /*
 127                  * The only other flag that we have right now is about modifying
 128                  * the copyout behavior when we're copying out extended
 129                  * information. If it's not here, we should not do anything.
 130                  */
 131                 VERIFY0(flags);
 132         }
 133         bzero(&ucp->uc_mcontext.fpregs, sizeof (ucontext_t) -
 134             offsetof(ucontext_t, uc_mcontext.fpregs));
 135         ucp->uc_xsave = user_xsave;
 136 
 137         ucp->uc_flags = UC_ALL;
 138         ucp->uc_link = (struct ucontext *)lwp->lwp_oldcontext;
 139 
 140         /*
 141          * Try to copyin() the ustack if one is registered. If the stack
 142          * has zero size, this indicates that stack bounds checking has
 143          * been disabled for this LWP. If stack bounds checking is disabled
 144          * or the copyin() fails, we fall back to the legacy behavior.
 145          */
 146         if (lwp->lwp_ustack == (uintptr_t)NULL ||
 147             copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
 148             sizeof (ucp->uc_stack)) != 0 ||
 149             ucp->uc_stack.ss_size == 0) {
 150 
 151                 if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
 152                         ucp->uc_stack = lwp->lwp_sigaltstack;
 153                 } else {
 154                         ucp->uc_stack.ss_sp = p->p_usrstack - p->p_stksize;
 155                         ucp->uc_stack.ss_size = p->p_stksize;


 162          * arrange for single-stepping and turn off the trace flag.
 163          */
 164         if ((rp->r_ps & PS_T) || (lwp->lwp_pcb.pcb_flags & REQUEST_STEP)) {
 165                 /*
 166                  * Clear PS_T so that saved user context won't have trace
 167                  * flag set.
 168                  */
 169                 rp->r_ps &= ~PS_T;
 170 
 171                 if (!(lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP)) {
 172                         lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
 173                         /*
 174                          * trap() always checks DEBUG_PENDING before
 175                          * checking for any pending signal. This at times
 176                          * can potentially lead to DEBUG_PENDING not being
 177                          * honoured. (for eg: the lwp is stopped by
 178                          * stop_on_fault() called from trap(), after being
 179                          * awakened it might see a pending signal and call
 180                          * savecontext(), however on the way back to userland
 181                          * there is no place it can be detected). Hence in
 182                          * anticipation of such occasions, set AST flag for
 183                          * the thread which will make the thread take an
 184                          * excursion through trap() where it will be handled
 185                          * appropriately.
 186                          */
 187                         aston(curthread);
 188                 }
 189         }
 190 
 191         getgregs(lwp, ucp->uc_mcontext.gregs);
 192         fpu_en = (lwp->lwp_pcb.pcb_fpu.fpu_flags & FPU_EN) != 0;
 193         if (fpu_en)
 194                 getfpregs(lwp, &ucp->uc_mcontext.fpregs);
 195         else
 196                 ucp->uc_flags &= ~UC_FPU;
 197 
 198         if (mask != NULL) {
 199                 /*
 200                  * Save signal mask.
 201                  */
 202                 sigktou(mask, &ucp->uc_sigmask);
 203         } else {
 204                 ucp->uc_flags &= ~UC_SIGMASK;
 205                 bzero(&ucp->uc_sigmask, sizeof (ucp->uc_sigmask));
 206         }
 207 
 208         /*
 209          * Determine if we need to get the reset of the xsave context out here.
 210          * If the thread doesn't actually have the FPU enabled, then we don't
 211          * actually need to do this. We also don't have to if it wasn't
 212          * requested.
 213          */
 214         if (!need_xsave || !fpu_en) {
 215                 return (0);
 216         }
 217 
 218         ucp->uc_flags |= UC_XSAVE;
 219 
 220         /*
 221          * While you might be asking why and contemplating despair, just know
 222          * that some things need to just be done in the face of signal (half the
 223          * reason this function exists). Basically when in signal context we
 224          * can't trigger watch points. This means we need to tell the FPU copy
 225          * logic to actually use the on_fault/no_fault and the non-error form of
 226          * copyout (which still checks if it's a user address at least).
 227          */
 228         if ((flags & SAVECTXT_F_ONFAULT) != 0) {
 229                 ret = fpu_signal_copyout(lwp, ucp->uc_xsave,
 230                     savecontext_copyout);
 231         } else {
 232                 ret = fpu_signal_copyout(lwp, ucp->uc_xsave, copyout);
 233         }
 234 
 235         if (PROC_IS_BRANDED(p) && BROP(p)->b_savecontext != NULL) {
 236                 /*
 237                  * Allow the brand the chance to modify the context we
 238                  * saved:
 239                  */
 240                 /* XXX KEBE SAYS FIX ME! */
 241                 BROP(p)->b_savecontext(ucp);
 242         }
 243 
 244         return (ret);
 245 }
 246 
 247 /*
 248  * Restore user context.
 249  */
 250 void
 251 restorecontext(ucontext_t *ucp)
 252 {
 253         kthread_t *t = curthread;
 254         klwp_t *lwp = ttolwp(t);
 255         proc_t *p = lwptoproc(lwp);
 256 
 257         if (PROC_IS_BRANDED(p) && BROP(p)->b_restorecontext != NULL) {
 258                 /*
 259                  * Allow the brand the chance to modify the context before
 260                  * we restore it:
 261                  */
 262                 BROP(p)->b_restorecontext(ucp);
 263         }
 264 


 272                         lwp->lwp_sigaltstack = ucp->uc_stack;
 273                 else
 274                         lwp->lwp_sigaltstack.ss_flags &= ~SS_ONSTACK;
 275         }
 276 
 277         if (ucp->uc_flags & UC_CPU) {
 278                 /*
 279                  * If the trace flag is set, mark the lwp to take a
 280                  * single-step trap on return to user level (below).
 281                  * The x86 lcall interface and sysenter has already done this,
 282                  * and turned off the flag, but amd64 syscall interface has not.
 283                  */
 284                 if (lwptoregs(lwp)->r_ps & PS_T)
 285                         lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
 286                 setgregs(lwp, ucp->uc_mcontext.gregs);
 287                 lwp->lwp_eosys = JUSTRETURN;
 288                 t->t_post_sys = 1;
 289                 aston(curthread);
 290         }
 291 
 292         /*
 293          * The logic to copy in the ucontex_t takes care of combining the UC_FPU
 294          * and UC_XSAVE, so at this point only one of them should be set, if
 295          * any.
 296          */
 297         if (ucp->uc_flags & UC_XSAVE) {
 298                 ASSERT0(ucp->uc_flags & UC_FPU);
 299                 ASSERT3U((uintptr_t)ucp->uc_xsave, >=, _kernelbase);
 300                 fpu_set_xsave(lwp, (const void *)ucp->uc_xsave);
 301         } else if (ucp->uc_flags & UC_FPU) {
 302                 setfpregs(lwp, &ucp->uc_mcontext.fpregs);
 303         }
 304 
 305         if (ucp->uc_flags & UC_SIGMASK) {
 306                 /*
 307                  * We don't need to acquire p->p_lock here;
 308                  * we are manipulating thread-private data.
 309                  */
 310                 schedctl_finish_sigblock(t);
 311                 sigutok(&ucp->uc_sigmask, &t->t_hold);
 312                 if (sigcheck(ttoproc(t), t))
 313                         t->t_sig_check = 1;
 314         }
 315 }
 316 
 317 
 318 int
 319 getsetcontext(int flag, void *arg)
 320 {
 321         ucontext_t uc;
 322         ucontext_t *ucp;
 323         klwp_t *lwp = ttolwp(curthread);
 324         void *fpu = NULL;
 325         stack_t dummy_stk;
 326         proc_t *p = lwptoproc(lwp);
 327         int ret;
 328 
 329         /*
 330          * In future releases, when the ucontext structure grows,
 331          * getcontext should be modified to only return the fields
 332          * specified in the uc_flags.  That way, the structure can grow
 333          * and still be binary compatible will all .o's which will only
 334          * have old fields defined in uc_flags
 335          */
 336 
 337         switch (flag) {
 338         default:
 339                 return (set_errno(EINVAL));
 340 
 341         case GETCONTEXT:
 342                 schedctl_finish_sigblock(curthread);
 343                 ret = savecontext(&uc, &curthread->t_hold, SAVECTXT_F_NONE);
 344                 if (ret != 0)
 345                         return (set_errno(ret));
 346                 if (uc.uc_flags & UC_SIGMASK)
 347                         SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
 348                 if (copyout(&uc, arg, sizeof (uc)))
 349                         return (set_errno(EFAULT));
 350                 return (0);
 351 
 352         /*
 353          * In the case of GETCONTEXT_EXTD, we've theoretically been given all
 354          * the required pointers of the appropriate length by libc in the
 355          * ucontext_t. We must first copyin the offsets that we care about to
 356          * seed the known extensions. Right now that is just the uc_xsave
 357          * member. As we are setting uc_flags, we only look at the members we
 358          * need to care about.
 359          *
 360          * The main reason that we have a different entry point is that we don't
 361          * want to assume that callers have always properly zeroed their
 362          * ucontext_t ahead of calling into libc. In fact, it often is just
 363          * declared on the stack so we can't assume that at all. Instead,
 364          * getcontext_extd does require that.
 365          */
 366         case GETCONTEXT_EXTD:
 367                 schedctl_finish_sigblock(curthread);
 368                 ucp = arg;
 369                 if (copyin(&ucp->uc_xsave, &uc.uc_xsave,
 370                     sizeof (uc.uc_xsave)) != 0) {
 371                         return (set_errno(EFAULT));
 372                 }
 373                 ret = savecontext(&uc, &curthread->t_hold, SAVECTXT_F_EXTD);
 374                 if (ret != 0)
 375                         return (set_errno(ret));
 376                 if (uc.uc_flags & UC_SIGMASK)
 377                         SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
 378                 if (copyout(&uc, arg, sizeof (uc)))
 379                         return (set_errno(EFAULT));
 380                 return (0);
 381 
 382 
 383         case SETCONTEXT:
 384                 ucp = arg;
 385                 if (ucp == NULL)
 386                         exit(CLD_EXITED, 0);
 387                 /*
 388                  * Don't copyin filler or floating state unless we need it.
 389                  * The ucontext_t struct and fields are specified in the ABI.
 390                  */
 391                 if (copyin(ucp, &uc, offsetof(ucontext_t, uc_brand_data) -

 392                     sizeof (uc.uc_mcontext.fpregs))) {
 393                         return (set_errno(EFAULT));
 394                 }
 395                 if (uc.uc_flags & UC_SIGMASK)
 396                         SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
 397 
 398                 if ((uc.uc_flags & UC_FPU) &&
 399                     copyin(&ucp->uc_mcontext.fpregs, &uc.uc_mcontext.fpregs,
 400                     sizeof (uc.uc_mcontext.fpregs))) {
 401                         return (set_errno(EFAULT));
 402                 }
 403 
 404                 /*
 405                  * If this is a branded process, copy in the brand-private
 406                  * data:
 407                  */
 408                 if (PROC_IS_BRANDED(p) && copyin(&ucp->uc_brand_data,
 409                     &uc.uc_brand_data, sizeof (uc.uc_brand_data)) != 0) {
 410                         return (set_errno(EFAULT));
 411                 }
 412 
 413                 uc.uc_xsave = 0;
 414                 if ((uc.uc_flags & UC_XSAVE) != 0) {
 415                         int ret;
 416 
 417                         if (copyin(&ucp->uc_xsave, &uc.uc_xsave,
 418                             sizeof (uc.uc_xsave)) != 0) {
 419                                 return (set_errno(EFAULT));
 420                         }
 421 
 422                         ret = fpu_signal_copyin(lwp, &uc);
 423                         if (ret != 0) {
 424                                 return (set_errno(ret));
 425                         }
 426                 }
 427 
 428                 restorecontext(&uc);
 429 
 430                 if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0))
 431                         (void) copyout(&uc.uc_stack, (stack_t *)lwp->lwp_ustack,
 432                             sizeof (uc.uc_stack));
 433                 return (0);
 434 
 435         case GETUSTACK:
 436                 if (copyout(&lwp->lwp_ustack, arg, sizeof (caddr_t)))
 437                         return (set_errno(EFAULT));
 438                 return (0);
 439 
 440         case SETUSTACK:
 441                 if (copyin(arg, &dummy_stk, sizeof (dummy_stk)))
 442                         return (set_errno(EFAULT));
 443                 lwp->lwp_ustack = (uintptr_t)arg;
 444                 return (0);
 445         }
 446 }
 447 
 448 #ifdef _SYSCALL32_IMPL
 449 
 450 /*
 451  * Save user context for 32-bit processes.
 452  */
 453 int
 454 savecontext32(ucontext32_t *ucp, const k_sigset_t *mask,
 455     savecontext_flags_t flags)
 456 {
 457         proc_t *p = ttoproc(curthread);
 458         klwp_t *lwp = ttolwp(curthread);
 459         struct regs *rp = lwptoregs(lwp);
 460         boolean_t need_xsave = B_FALSE;
 461         boolean_t fpu_en;
 462         int32_t user_xsave = 0;
 463         uintptr_t uaddr;
 464         int ret;
 465 
 466         /*
 467          * See savecontext for an explanation of this.
 468          */
 469         if ((flags & SAVECTXT_F_EXTD) != 0) {
 470                 user_xsave = ucp->uc_xsave;
 471                 if (fpu_xsave_enabled() && user_xsave != 0) {
 472                         need_xsave = B_TRUE;
 473                 }
 474         } else {
 475                 VERIFY0(flags);
 476         }
 477         bzero(&ucp->uc_mcontext.fpregs, sizeof (ucontext32_t) -
 478             offsetof(ucontext32_t, uc_mcontext.fpregs));
 479         ucp->uc_xsave = user_xsave;
 480 
 481         ucp->uc_flags = UC_ALL;
 482         ucp->uc_link = (caddr32_t)lwp->lwp_oldcontext;
 483 
 484         if (lwp->lwp_ustack == (uintptr_t)NULL ||
 485             copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
 486             sizeof (ucp->uc_stack)) != 0 ||
 487             ucp->uc_stack.ss_size == 0) {
 488 
 489                 if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
 490                         ucp->uc_stack.ss_sp =
 491                             (caddr32_t)(uintptr_t)lwp->lwp_sigaltstack.ss_sp;
 492                         ucp->uc_stack.ss_size =
 493                             (size32_t)lwp->lwp_sigaltstack.ss_size;
 494                         ucp->uc_stack.ss_flags = SS_ONSTACK;
 495                 } else {
 496                         ucp->uc_stack.ss_sp = (caddr32_t)(uintptr_t)
 497                             (p->p_usrstack - p->p_stksize);
 498                         ucp->uc_stack.ss_size = (size32_t)p->p_stksize;
 499                         ucp->uc_stack.ss_flags = 0;


 504          * If either the trace flag or REQUEST_STEP is set, arrange
 505          * for single-stepping and turn off the trace flag.
 506          */
 507         if ((rp->r_ps & PS_T) || (lwp->lwp_pcb.pcb_flags & REQUEST_STEP)) {
 508                 /*
 509                  * Clear PS_T so that saved user context won't have trace
 510                  * flag set.
 511                  */
 512                 rp->r_ps &= ~PS_T;
 513 
 514                 if (!(lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP)) {
 515                         lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
 516                         /*
 517                          * See comments in savecontext().
 518                          */
 519                         aston(curthread);
 520                 }
 521         }
 522 
 523         getgregs32(lwp, ucp->uc_mcontext.gregs);
 524         fpu_en = (lwp->lwp_pcb.pcb_fpu.fpu_flags & FPU_EN) != 0;
 525         if (fpu_en)
 526                 getfpregs32(lwp, &ucp->uc_mcontext.fpregs);
 527         else
 528                 ucp->uc_flags &= ~UC_FPU;
 529 
 530         if (mask != NULL) {
 531                 /*
 532                  * Save signal mask.
 533                  */
 534                 sigktou(mask, &ucp->uc_sigmask);
 535         } else {
 536                 ucp->uc_flags &= ~UC_SIGMASK;
 537                 bzero(&ucp->uc_sigmask, sizeof (ucp->uc_sigmask));
 538         }
 539 
 540         if (!need_xsave || !fpu_en) {
 541                 return (0);
 542         }
 543 
 544         ucp->uc_flags |= UC_XSAVE;
 545 
 546         /*
 547          * Due to not wanting to change or break programs, the filler in the
 548          * ucontext_t was always declared as a long, which is signed. Because
 549          * this is the 32-bit version, this is an int32_t. We cannot directly go
 550          * to a uintptr_t otherwise we might get sign extension, so we first
 551          * have to go through a uint32_t and then a uintptr_t. Otherwise, see
 552          * savecontext().
 553          */
 554         uaddr = (uintptr_t)(uint32_t)ucp->uc_xsave;
 555         if ((flags & SAVECTXT_F_ONFAULT) != 0) {
 556                 ret = fpu_signal_copyout(lwp, uaddr, savecontext_copyout);
 557         } else {
 558                 ret = fpu_signal_copyout(lwp, uaddr, copyout);
 559         }
 560 
 561 
 562         if (PROC_IS_BRANDED(p) && BROP(p)->b_savecontext32 != NULL) {
 563                 /*
 564                  * Allow the brand the chance to modify the context we
 565                  * saved:
 566                  */
 567                 /* XXX KEBE SAYS FIX ME */
 568                 BROP(p)->b_savecontext32(ucp);
 569         }
 570 
 571         return (ret);
 572 }
 573 
 574 int
 575 getsetcontext32(int flag, void *arg)
 576 {
 577         ucontext32_t uc;
 578         ucontext_t ucnat;
 579         ucontext32_t *ucp;
 580         klwp_t *lwp = ttolwp(curthread);
 581         caddr32_t ustack32;
 582         stack32_t dummy_stk32;
 583         proc_t *p = lwptoproc(lwp);
 584         int ret;
 585 
 586         switch (flag) {
 587         default:
 588                 return (set_errno(EINVAL));
 589 
 590         case GETCONTEXT:
 591                 schedctl_finish_sigblock(curthread);
 592                 ret = savecontext32(&uc, &curthread->t_hold, SAVECTXT_F_NONE);
 593                 if (ret != 0)
 594                         return (set_errno(ret));
 595                 if (uc.uc_flags & UC_SIGMASK)
 596                         SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
 597                 if (copyout(&uc, arg, sizeof (uc)))
 598                         return (set_errno(EFAULT));
 599                 return (0);
 600 
 601         /*
 602          * See getsetcontext() for an explanation of what is going on here.
 603          */
 604         case GETCONTEXT_EXTD:
 605                 schedctl_finish_sigblock(curthread);
 606                 ucp = arg;
 607                 if (copyin(&ucp->uc_xsave, &uc.uc_xsave,
 608                     sizeof (uc.uc_xsave)) != 0) {
 609                         return (set_errno(EFAULT));
 610                 }
 611                 ret = savecontext32(&uc, &curthread->t_hold, SAVECTXT_F_EXTD);
 612                 if (ret != 0)
 613                         return (set_errno(ret));
 614                 if (uc.uc_flags & UC_SIGMASK)
 615                         SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
 616                 if (copyout(&uc, arg, sizeof (uc)))
 617                         return (set_errno(EFAULT));
 618                 return (0);
 619 
 620         case SETCONTEXT:
 621                 ucp = arg;
 622                 if (ucp == NULL)
 623                         exit(CLD_EXITED, 0);
 624                 if (copyin(ucp, &uc, offsetof(ucontext32_t, uc_brand_data) -

 625                     sizeof (uc.uc_mcontext.fpregs))) {
 626                         return (set_errno(EFAULT));
 627                 }
 628                 if (uc.uc_flags & UC_SIGMASK)
 629                         SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
 630                 if ((uc.uc_flags & UC_FPU) &&
 631                     copyin(&ucp->uc_mcontext.fpregs, &uc.uc_mcontext.fpregs,
 632                     sizeof (uc.uc_mcontext.fpregs))) {
 633                         return (set_errno(EFAULT));
 634                 }
 635 
 636                 /*
 637                  * If this is a branded process, copy in the brand-private
 638                  * data:
 639                  */
 640                 if (PROC_IS_BRANDED(p) && copyin(&ucp->uc_brand_data,
 641                     &uc.uc_brand_data, sizeof (uc.uc_brand_data)) != 0) {
 642                         return (set_errno(EFAULT));
 643                 }
 644 
 645                 uc.uc_xsave = 0;
 646                 if ((uc.uc_flags & UC_XSAVE) != 0 &&
 647                     copyin(&ucp->uc_xsave, &uc.uc_xsave,
 648                     sizeof (uc.uc_xsave)) != 0) {
 649                         return (set_errno(EFAULT));
 650                 }
 651 
 652                 ucontext_32ton(&uc, &ucnat);
 653 
 654                 if ((ucnat.uc_flags & UC_XSAVE) != 0) {
 655                         int ret = fpu_signal_copyin(lwp, &ucnat);
 656                         if (ret != 0) {
 657                                 return (set_errno(ret));
 658                         }
 659                 }
 660 
 661                 restorecontext(&ucnat);
 662 
 663                 if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0))
 664                         (void) copyout(&uc.uc_stack,
 665                             (stack32_t *)lwp->lwp_ustack, sizeof (uc.uc_stack));
 666                 return (0);
 667 
 668         case GETUSTACK:
 669                 ustack32 = (caddr32_t)lwp->lwp_ustack;
 670                 if (copyout(&ustack32, arg, sizeof (ustack32)))
 671                         return (set_errno(EFAULT));
 672                 return (0);
 673 
 674         case SETUSTACK:
 675                 if (copyin(arg, &dummy_stk32, sizeof (dummy_stk32)))
 676                         return (set_errno(EFAULT));
 677                 lwp->lwp_ustack = (uintptr_t)arg;
 678                 return (0);
 679         }
 680 }