Print this page


Split Close
Expand all
Collapse all
          --- old/usr/src/uts/intel/syscall/getcontext.c
          +++ new/usr/src/uts/intel/syscall/getcontext.c
↓ open down ↓ 12 lines elided ↑ open up ↑
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  
  22   22  /*
       23 + * Copyright 2015 Joyent, Inc.
       24 + */
       25 +/*
  23   26   * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
  24   27   * Use is subject to license terms.
  25   28   */
  26   29  
  27   30  /*      Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
  28   31  /*        All Rights Reserved   */
  29   32  
  30   33  /*
  31   34   * Copyright 2023 Oxide Computer Company
  32   35   */
↓ open down ↓ 10 lines elided ↑ open up ↑
  43   46  #include <sys/proc.h>
  44   47  #include <sys/brand.h>
  45   48  #include <sys/psw.h>
  46   49  #include <sys/ucontext.h>
  47   50  #include <sys/asm_linkage.h>
  48   51  #include <sys/errno.h>
  49   52  #include <sys/archsystm.h>
  50   53  #include <sys/schedctl.h>
  51   54  #include <sys/debug.h>
  52   55  #include <sys/sysmacros.h>
       56 +#include <sys/sdt.h>
  53   57  
  54   58  /*
  55   59   * This is a wrapper around copyout_noerr that returns a guaranteed error code.
  56   60   * Because we're using copyout_noerr(), we need to bound the time we're under an
  57   61   * on_fault/no_fault and attempt to do so only while we're actually copying data
  58   62   * out. The main reason for this is because we're being called back from the
  59   63   * FPU, which is being held with a kpreempt_disable() and related, we can't use
  60   64   * a larger on_fault()/no_fault() as that would both hide legitimate errors we
  61   65   * make, masquerading as user issues, and it gets trickier to reason about the
  62   66   * correct restoration of our state.
↓ open down ↓ 121 lines elided ↑ open up ↑
 184  188                  }
 185  189          }
 186  190  
 187  191          getgregs(lwp, ucp->uc_mcontext.gregs);
 188  192          fpu_en = (lwp->lwp_pcb.pcb_fpu.fpu_flags & FPU_EN) != 0;
 189  193          if (fpu_en)
 190  194                  getfpregs(lwp, &ucp->uc_mcontext.fpregs);
 191  195          else
 192  196                  ucp->uc_flags &= ~UC_FPU;
 193  197  
 194      -        sigktou(mask, &ucp->uc_sigmask);
      198 +        if (mask != NULL) {
      199 +                /*
      200 +                 * Save signal mask.
      201 +                 */
      202 +                sigktou(mask, &ucp->uc_sigmask);
      203 +        } else {
      204 +                ucp->uc_flags &= ~UC_SIGMASK;
      205 +                bzero(&ucp->uc_sigmask, sizeof (ucp->uc_sigmask));
      206 +        }
 195  207  
 196  208          /*
 197  209           * Determine if we need to get the reset of the xsave context out here.
 198  210           * If the thread doesn't actually have the FPU enabled, then we don't
 199  211           * actually need to do this. We also don't have to if it wasn't
 200  212           * requested.
 201  213           */
 202  214          if (!need_xsave || !fpu_en) {
 203  215                  return (0);
 204  216          }
↓ open down ↓ 8 lines elided ↑ open up ↑
 213  225           * logic to actually use the on_fault/no_fault and the non-error form of
 214  226           * copyout (which still checks if it's a user address at least).
 215  227           */
 216  228          if ((flags & SAVECTXT_F_ONFAULT) != 0) {
 217  229                  ret = fpu_signal_copyout(lwp, ucp->uc_xsave,
 218  230                      savecontext_copyout);
 219  231          } else {
 220  232                  ret = fpu_signal_copyout(lwp, ucp->uc_xsave, copyout);
 221  233          }
 222  234  
      235 +        if (PROC_IS_BRANDED(p) && BROP(p)->b_savecontext != NULL) {
      236 +                /*
      237 +                 * Allow the brand the chance to modify the context we
      238 +                 * saved:
      239 +                 */
      240 +                /* XXX KEBE SAYS FIX ME! */
      241 +                BROP(p)->b_savecontext(ucp);
      242 +        }
      243 +
 223  244          return (ret);
 224  245  }
 225  246  
 226  247  /*
 227  248   * Restore user context.
 228  249   */
 229  250  void
 230  251  restorecontext(ucontext_t *ucp)
 231  252  {
 232  253          kthread_t *t = curthread;
 233  254          klwp_t *lwp = ttolwp(t);
      255 +        proc_t *p = lwptoproc(lwp);
 234  256  
      257 +        if (PROC_IS_BRANDED(p) && BROP(p)->b_restorecontext != NULL) {
      258 +                /*
      259 +                 * Allow the brand the chance to modify the context before
      260 +                 * we restore it:
      261 +                 */
      262 +                BROP(p)->b_restorecontext(ucp);
      263 +        }
      264 +
      265 +        DTRACE_PROBE3(oldcontext__set, klwp_t *, lwp,
      266 +            uintptr_t, lwp->lwp_oldcontext,
      267 +            uintptr_t, (uintptr_t)ucp->uc_link);
 235  268          lwp->lwp_oldcontext = (uintptr_t)ucp->uc_link;
 236  269  
 237  270          if (ucp->uc_flags & UC_STACK) {
 238  271                  if (ucp->uc_stack.ss_flags == SS_ONSTACK)
 239  272                          lwp->lwp_sigaltstack = ucp->uc_stack;
 240  273                  else
 241  274                          lwp->lwp_sigaltstack.ss_flags &= ~SS_ONSTACK;
 242  275          }
 243  276  
 244  277          if (ucp->uc_flags & UC_CPU) {
↓ open down ↓ 38 lines elided ↑ open up ↑
 283  316  
 284  317  
 285  318  int
 286  319  getsetcontext(int flag, void *arg)
 287  320  {
 288  321          ucontext_t uc;
 289  322          ucontext_t *ucp;
 290  323          klwp_t *lwp = ttolwp(curthread);
 291  324          void *fpu = NULL;
 292  325          stack_t dummy_stk;
      326 +        proc_t *p = lwptoproc(lwp);
 293  327          int ret;
 294  328  
 295  329          /*
 296  330           * In future releases, when the ucontext structure grows,
 297  331           * getcontext should be modified to only return the fields
 298  332           * specified in the uc_flags.  That way, the structure can grow
 299  333           * and still be binary compatible will all .o's which will only
 300  334           * have old fields defined in uc_flags
 301  335           */
 302  336  
↓ open down ↓ 44 lines elided ↑ open up ↑
 347  381  
 348  382  
 349  383          case SETCONTEXT:
 350  384                  ucp = arg;
 351  385                  if (ucp == NULL)
 352  386                          exit(CLD_EXITED, 0);
 353  387                  /*
 354  388                   * Don't copyin filler or floating state unless we need it.
 355  389                   * The ucontext_t struct and fields are specified in the ABI.
 356  390                   */
 357      -                if (copyin(ucp, &uc, offsetof(ucontext_t, uc_filler) -
      391 +                if (copyin(ucp, &uc, offsetof(ucontext_t, uc_brand_data) -
 358  392                      sizeof (uc.uc_mcontext.fpregs))) {
 359  393                          return (set_errno(EFAULT));
 360  394                  }
 361  395                  if (uc.uc_flags & UC_SIGMASK)
 362  396                          SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
 363  397  
 364  398                  if ((uc.uc_flags & UC_FPU) &&
 365  399                      copyin(&ucp->uc_mcontext.fpregs, &uc.uc_mcontext.fpregs,
 366  400                      sizeof (uc.uc_mcontext.fpregs))) {
 367  401                          return (set_errno(EFAULT));
 368  402                  }
 369  403  
      404 +                /*
      405 +                 * If this is a branded process, copy in the brand-private
      406 +                 * data:
      407 +                 */
      408 +                if (PROC_IS_BRANDED(p) && copyin(&ucp->uc_brand_data,
      409 +                    &uc.uc_brand_data, sizeof (uc.uc_brand_data)) != 0) {
      410 +                        return (set_errno(EFAULT));
      411 +                }
      412 +
 370  413                  uc.uc_xsave = 0;
 371  414                  if ((uc.uc_flags & UC_XSAVE) != 0) {
 372  415                          int ret;
 373  416  
 374  417                          if (copyin(&ucp->uc_xsave, &uc.uc_xsave,
 375  418                              sizeof (uc.uc_xsave)) != 0) {
 376  419                                  return (set_errno(EFAULT));
 377  420                          }
 378  421  
 379  422                          ret = fpu_signal_copyin(lwp, &uc);
↓ open down ↓ 97 lines elided ↑ open up ↑
 477  520                  }
 478  521          }
 479  522  
 480  523          getgregs32(lwp, ucp->uc_mcontext.gregs);
 481  524          fpu_en = (lwp->lwp_pcb.pcb_fpu.fpu_flags & FPU_EN) != 0;
 482  525          if (fpu_en)
 483  526                  getfpregs32(lwp, &ucp->uc_mcontext.fpregs);
 484  527          else
 485  528                  ucp->uc_flags &= ~UC_FPU;
 486  529  
 487      -        sigktou(mask, &ucp->uc_sigmask);
      530 +        if (mask != NULL) {
      531 +                /*
      532 +                 * Save signal mask.
      533 +                 */
      534 +                sigktou(mask, &ucp->uc_sigmask);
      535 +        } else {
      536 +                ucp->uc_flags &= ~UC_SIGMASK;
      537 +                bzero(&ucp->uc_sigmask, sizeof (ucp->uc_sigmask));
      538 +        }
 488  539  
 489  540          if (!need_xsave || !fpu_en) {
 490  541                  return (0);
 491  542          }
 492  543  
 493  544          ucp->uc_flags |= UC_XSAVE;
 494  545  
 495  546          /*
 496  547           * Due to not wanting to change or break programs, the filler in the
 497  548           * ucontext_t was always declared as a long, which is signed. Because
↓ open down ↓ 2 lines elided ↑ open up ↑
 500  551           * have to go through a uint32_t and then a uintptr_t. Otherwise, see
 501  552           * savecontext().
 502  553           */
 503  554          uaddr = (uintptr_t)(uint32_t)ucp->uc_xsave;
 504  555          if ((flags & SAVECTXT_F_ONFAULT) != 0) {
 505  556                  ret = fpu_signal_copyout(lwp, uaddr, savecontext_copyout);
 506  557          } else {
 507  558                  ret = fpu_signal_copyout(lwp, uaddr, copyout);
 508  559          }
 509  560  
      561 +
      562 +        if (PROC_IS_BRANDED(p) && BROP(p)->b_savecontext32 != NULL) {
      563 +                /*
      564 +                 * Allow the brand the chance to modify the context we
      565 +                 * saved:
      566 +                 */
      567 +                /* XXX KEBE SAYS FIX ME */
      568 +                BROP(p)->b_savecontext32(ucp);
      569 +        }
      570 +
 510  571          return (ret);
 511  572  }
 512  573  
 513  574  int
 514  575  getsetcontext32(int flag, void *arg)
 515  576  {
 516  577          ucontext32_t uc;
 517  578          ucontext_t ucnat;
 518  579          ucontext32_t *ucp;
 519  580          klwp_t *lwp = ttolwp(curthread);
 520  581          caddr32_t ustack32;
 521  582          stack32_t dummy_stk32;
      583 +        proc_t *p = lwptoproc(lwp);
 522  584          int ret;
 523  585  
 524  586          switch (flag) {
 525  587          default:
 526  588                  return (set_errno(EINVAL));
 527  589  
 528  590          case GETCONTEXT:
 529  591                  schedctl_finish_sigblock(curthread);
 530  592                  ret = savecontext32(&uc, &curthread->t_hold, SAVECTXT_F_NONE);
 531  593                  if (ret != 0)
↓ open down ↓ 20 lines elided ↑ open up ↑
 552  614                  if (uc.uc_flags & UC_SIGMASK)
 553  615                          SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
 554  616                  if (copyout(&uc, arg, sizeof (uc)))
 555  617                          return (set_errno(EFAULT));
 556  618                  return (0);
 557  619  
 558  620          case SETCONTEXT:
 559  621                  ucp = arg;
 560  622                  if (ucp == NULL)
 561  623                          exit(CLD_EXITED, 0);
 562      -                if (copyin(ucp, &uc, offsetof(ucontext32_t, uc_filler) -
      624 +                if (copyin(ucp, &uc, offsetof(ucontext32_t, uc_brand_data) -
 563  625                      sizeof (uc.uc_mcontext.fpregs))) {
 564  626                          return (set_errno(EFAULT));
 565  627                  }
 566  628                  if (uc.uc_flags & UC_SIGMASK)
 567  629                          SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
 568  630                  if ((uc.uc_flags & UC_FPU) &&
 569  631                      copyin(&ucp->uc_mcontext.fpregs, &uc.uc_mcontext.fpregs,
 570  632                      sizeof (uc.uc_mcontext.fpregs))) {
 571  633                          return (set_errno(EFAULT));
 572  634                  }
      635 +
      636 +                /*
      637 +                 * If this is a branded process, copy in the brand-private
      638 +                 * data:
      639 +                 */
      640 +                if (PROC_IS_BRANDED(p) && copyin(&ucp->uc_brand_data,
      641 +                    &uc.uc_brand_data, sizeof (uc.uc_brand_data)) != 0) {
      642 +                        return (set_errno(EFAULT));
      643 +                }
 573  644  
 574  645                  uc.uc_xsave = 0;
 575  646                  if ((uc.uc_flags & UC_XSAVE) != 0 &&
 576  647                      copyin(&ucp->uc_xsave, &uc.uc_xsave,
 577  648                      sizeof (uc.uc_xsave)) != 0) {
 578  649                          return (set_errno(EFAULT));
 579  650                  }
 580  651  
 581  652                  ucontext_32ton(&uc, &ucnat);
 582  653  
↓ open down ↓ 29 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX