Print this page
    
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/common/os/mmapobj.c
          +++ new/usr/src/uts/common/os/mmapobj.c
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23   23   * Use is subject to license terms.
  24   24   * Copyright 2014 Joyent, Inc.  All rights reserved.
  25   25   */
  26   26  
  27   27  #include <sys/types.h>
  28   28  #include <sys/sysmacros.h>
  29   29  #include <sys/kmem.h>
  30   30  #include <sys/param.h>
  31   31  #include <sys/systm.h>
  32   32  #include <sys/errno.h>
  33   33  #include <sys/mman.h>
  34   34  #include <sys/cmn_err.h>
  35   35  #include <sys/cred.h>
  36   36  #include <sys/vmsystm.h>
  37   37  #include <sys/machsystm.h>
  38   38  #include <sys/debug.h>
  39   39  #include <vm/as.h>
  40   40  #include <vm/seg.h>
  41   41  #include <sys/vmparam.h>
  42   42  #include <sys/vfs.h>
  43   43  #include <sys/elf.h>
  44   44  #include <sys/machelf.h>
  45   45  #include <sys/corectl.h>
  46   46  #include <sys/exec.h>
  47   47  #include <sys/exechdr.h>
  48   48  #include <sys/autoconf.h>
  49   49  #include <sys/mem.h>
  50   50  #include <vm/seg_dev.h>
  51   51  #include <sys/vmparam.h>
  52   52  #include <sys/mmapobj.h>
  53   53  #include <sys/atomic.h>
  54   54  
  55   55  /*
  56   56   * Theory statement:
  57   57   *
  58   58   * The main driving force behind mmapobj is to interpret and map ELF files
  59   59   * inside of the kernel instead of having the linker be responsible for this.
  60   60   *
  61   61   * mmapobj also supports the AOUT 4.x binary format as well as flat files in
  62   62   * a read only manner.
  63   63   *
  64   64   * When interpreting and mapping an ELF file, mmapobj will map each PT_LOAD
  65   65   * or PT_SUNWBSS segment according to the ELF standard.  Refer to the "Linker
  66   66   * and Libraries Guide" for more information about the standard and mapping
  67   67   * rules.
  68   68   *
  69   69   * Having mmapobj interpret and map objects will allow the kernel to make the
  70   70   * best decision for where to place the mappings for said objects.  Thus, we
  71   71   * can make optimizations inside of the kernel for specific platforms or
  72   72   * cache mapping information to make mapping objects faster.
  73   73   *
  74   74   * The lib_va_hash will be one such optimization.  For each ELF object that
  75   75   * mmapobj is asked to interpret, we will attempt to cache the information
  76   76   * about the PT_LOAD and PT_SUNWBSS sections to speed up future mappings of
  77   77   * the same objects.  We will cache up to LIBVA_CACHED_SEGS (see below) program
  78   78   * headers which should cover a majority of the libraries out there without
  79   79   * wasting space.  In order to make sure that the cached information is valid,
  80   80   * we check the passed in vnode's mtime and ctime to make sure the vnode
  81   81   * has not been modified since the last time we used it.
  82   82   *
  83   83   * In addition, the lib_va_hash may contain a preferred starting VA for the
  84   84   * object which can be useful for platforms which support a shared context.
  85   85   * This will increase the likelyhood that library text can be shared among
  86   86   * many different processes.  We limit the reserved VA space for 32 bit objects
  87   87   * in order to minimize fragmenting the processes address space.
  88   88   *
  89   89   * In addition to the above, the mmapobj interface allows for padding to be
  90   90   * requested before the first mapping and after the last mapping created.
  91   91   * When padding is requested, no additional optimizations will be made for
  92   92   * that request.
  93   93   */
  94   94  
  95   95  /*
  96   96   * Threshold to prevent allocating too much kernel memory to read in the
  97   97   * program headers for an object.  If it requires more than below,
  98   98   * we will use a KM_NOSLEEP allocation to allocate memory to hold all of the
  99   99   * program headers which could possibly fail.  If less memory than below is
 100  100   * needed, then we use a KM_SLEEP allocation and are willing to wait for the
 101  101   * memory if we need to.
 102  102   */
 103  103  size_t mmapobj_alloc_threshold = 65536;
 104  104  
 105  105  /* Debug stats for test coverage */
 106  106  #ifdef DEBUG
 107  107  struct mobj_stats {
 108  108          uint_t  mobjs_unmap_called;
 109  109          uint_t  mobjs_remap_devnull;
 110  110          uint_t  mobjs_lookup_start;
 111  111          uint_t  mobjs_alloc_start;
 112  112          uint_t  mobjs_alloc_vmem;
 113  113          uint_t  mobjs_add_collision;
 114  114          uint_t  mobjs_get_addr;
 115  115          uint_t  mobjs_map_flat_no_padding;
 116  116          uint_t  mobjs_map_flat_padding;
 117  117          uint_t  mobjs_map_ptload_text;
 118  118          uint_t  mobjs_map_ptload_initdata;
 119  119          uint_t  mobjs_map_ptload_preread;
 120  120          uint_t  mobjs_map_ptload_unaligned_text;
 121  121          uint_t  mobjs_map_ptload_unaligned_map_fail;
 122  122          uint_t  mobjs_map_ptload_unaligned_read_fail;
 123  123          uint_t  mobjs_zfoddiff;
 124  124          uint_t  mobjs_zfoddiff_nowrite;
 125  125          uint_t  mobjs_zfodextra;
 126  126          uint_t  mobjs_ptload_failed;
 127  127          uint_t  mobjs_map_elf_no_holes;
 128  128          uint_t  mobjs_unmap_hole;
 129  129          uint_t  mobjs_nomem_header;
 130  130          uint_t  mobjs_inval_header;
 131  131          uint_t  mobjs_overlap_header;
 132  132          uint_t  mobjs_np2_align;
 133  133          uint_t  mobjs_np2_align_overflow;
 134  134          uint_t  mobjs_exec_padding;
 135  135          uint_t  mobjs_exec_addr_mapped;
 136  136          uint_t  mobjs_exec_addr_devnull;
 137  137          uint_t  mobjs_exec_addr_in_use;
 138  138          uint_t  mobjs_lvp_found;
 139  139          uint_t  mobjs_no_loadable_yet;
 140  140          uint_t  mobjs_nothing_to_map;
 141  141          uint_t  mobjs_e2big;
 142  142          uint_t  mobjs_dyn_pad_align;
 143  143          uint_t  mobjs_dyn_pad_noalign;
 144  144          uint_t  mobjs_alloc_start_fail;
 145  145          uint_t  mobjs_lvp_nocache;
 146  146          uint_t  mobjs_extra_padding;
 147  147          uint_t  mobjs_lvp_not_needed;
 148  148          uint_t  mobjs_no_mem_map_sz;
 149  149          uint_t  mobjs_check_exec_failed;
 150  150          uint_t  mobjs_lvp_used;
 151  151          uint_t  mobjs_wrong_model;
 152  152          uint_t  mobjs_noexec_fs;
 153  153          uint_t  mobjs_e2big_et_rel;
 154  154          uint_t  mobjs_et_rel_mapped;
 155  155          uint_t  mobjs_unknown_elf_type;
 156  156          uint_t  mobjs_phent32_too_small;
 157  157          uint_t  mobjs_phent64_too_small;
 158  158          uint_t  mobjs_inval_elf_class;
 159  159          uint_t  mobjs_too_many_phdrs;
 160  160          uint_t  mobjs_no_phsize;
 161  161          uint_t  mobjs_phsize_large;
 162  162          uint_t  mobjs_phsize_xtralarge;
 163  163          uint_t  mobjs_fast_wrong_model;
 164  164          uint_t  mobjs_fast_e2big;
 165  165          uint_t  mobjs_fast;
 166  166          uint_t  mobjs_fast_success;
 167  167          uint_t  mobjs_fast_not_now;
 168  168          uint_t  mobjs_small_file;
 169  169          uint_t  mobjs_read_error;
 170  170          uint_t  mobjs_unsupported;
 171  171          uint_t  mobjs_flat_e2big;
 172  172          uint_t  mobjs_phent_align32;
 173  173          uint_t  mobjs_phent_align64;
 174  174          uint_t  mobjs_lib_va_find_hit;
 175  175          uint_t  mobjs_lib_va_find_delay_delete;
 176  176          uint_t  mobjs_lib_va_find_delete;
 177  177          uint_t  mobjs_lib_va_add_delay_delete;
 178  178          uint_t  mobjs_lib_va_add_delete;
 179  179          uint_t  mobjs_lib_va_create_failure;
 180  180          uint_t  mobjs_min_align;
 181  181  #if defined(__sparc)
 182  182          uint_t  mobjs_aout_uzero_fault;
 183  183          uint_t  mobjs_aout_64bit_try;
 184  184          uint_t  mobjs_aout_noexec;
 185  185          uint_t  mobjs_aout_e2big;
 186  186          uint_t  mobjs_aout_lib;
 187  187          uint_t  mobjs_aout_fixed;
 188  188          uint_t  mobjs_aout_zfoddiff;
 189  189          uint_t  mobjs_aout_map_bss;
 190  190          uint_t  mobjs_aout_bss_fail;
 191  191          uint_t  mobjs_aout_nlist;
 192  192          uint_t  mobjs_aout_addr_in_use;
 193  193  #endif
 194  194  } mobj_stats;
 195  195  
 196  196  #define MOBJ_STAT_ADD(stat)             ((mobj_stats.mobjs_##stat)++)
 197  197  #else
 198  198  #define MOBJ_STAT_ADD(stat)
 199  199  #endif
 200  200  
 201  201  /*
 202  202   * Check if addr is at or above the address space reserved for the stack.
 203  203   * The stack is at the top of the address space for all sparc processes
 204  204   * and 64 bit x86 processes.  For 32 bit x86, the stack is not at the top
 205  205   * of the address space and thus this check wil always return false for
 206  206   * 32 bit x86 processes.
 207  207   */
 208  208  #if defined(__sparc)
 209  209  #define OVERLAPS_STACK(addr, p)                                         \
 210  210          (addr >= (p->p_usrstack - ((p->p_stk_ctl + PAGEOFFSET) & PAGEMASK)))
 211  211  #elif defined(__amd64)
 212  212  #define OVERLAPS_STACK(addr, p)                                         \
 213  213          ((p->p_model == DATAMODEL_LP64) &&                              \
 214  214          (addr >= (p->p_usrstack - ((p->p_stk_ctl + PAGEOFFSET) & PAGEMASK))))
 215  215  #elif defined(__i386)
 216  216  #define OVERLAPS_STACK(addr, p) 0
 217  217  #endif
 218  218  
 219  219  /* lv_flags values - bitmap */
 220  220  #define LV_ELF32        0x1             /* 32 bit ELF file */
 221  221  #define LV_ELF64        0x2             /* 64 bit ELF file */
 222  222  #define LV_DEL          0x4             /* delete when lv_refcnt hits zero */
 223  223  
 224  224  /*
 225  225   * Note: lv_num_segs will denote how many segments this file has and will
 226  226   * only be set after the lv_mps array has been filled out.
 227  227   * lv_mps can only be valid if lv_num_segs is non-zero.
 228  228   */
 229  229  struct lib_va {
 230  230          struct lib_va           *lv_next;
 231  231          caddr_t                 lv_base_va;     /* start va for library */
 232  232          ssize_t                 lv_len;         /* total va span of library */
 233  233          size_t                  lv_align;       /* minimum alignment */
 234  234          uint64_t                lv_nodeid;      /* filesystem node id */
 235  235          uint64_t                lv_fsid;        /* filesystem id */
 236  236          timestruc_t             lv_ctime;       /* last time file was changed */
 237  237          timestruc_t             lv_mtime;       /* or modified */
 238  238          mmapobj_result_t        lv_mps[LIBVA_CACHED_SEGS]; /* cached pheaders */
 239  239          int                     lv_num_segs;    /* # segs for this file */
 240  240          int                     lv_flags;
 241  241          uint_t                  lv_refcnt;      /* number of holds on struct */
 242  242  };
 243  243  
 244  244  #define LIB_VA_SIZE     1024
 245  245  #define LIB_VA_MASK     (LIB_VA_SIZE - 1)
 246  246  #define LIB_VA_MUTEX_SHIFT      3
 247  247  
 248  248  #if (LIB_VA_SIZE & (LIB_VA_SIZE - 1))
 249  249  #error  "LIB_VA_SIZE is not a power of 2"
 250  250  #endif
 251  251  
 252  252  static struct lib_va *lib_va_hash[LIB_VA_SIZE];
 253  253  static kmutex_t lib_va_hash_mutex[LIB_VA_SIZE >> LIB_VA_MUTEX_SHIFT];
 254  254  
 255  255  #define LIB_VA_HASH_MUTEX(index)                                        \
 256  256          (&lib_va_hash_mutex[index >> LIB_VA_MUTEX_SHIFT])
 257  257  
 258  258  #define LIB_VA_HASH(nodeid)                                             \
 259  259          (((nodeid) ^ ((nodeid) << 7) ^ ((nodeid) << 13)) & LIB_VA_MASK)
 260  260  
 261  261  #define LIB_VA_MATCH_ID(arg1, arg2)                                     \
 262  262          ((arg1)->lv_nodeid == (arg2)->va_nodeid &&                      \
 263  263          (arg1)->lv_fsid == (arg2)->va_fsid)
 264  264  
 265  265  #define LIB_VA_MATCH_TIME(arg1, arg2)                                   \
 266  266          ((arg1)->lv_ctime.tv_sec == (arg2)->va_ctime.tv_sec &&          \
 267  267          (arg1)->lv_mtime.tv_sec == (arg2)->va_mtime.tv_sec &&           \
 268  268          (arg1)->lv_ctime.tv_nsec == (arg2)->va_ctime.tv_nsec &&         \
 269  269          (arg1)->lv_mtime.tv_nsec == (arg2)->va_mtime.tv_nsec)
 270  270  
 271  271  #define LIB_VA_MATCH(arg1, arg2)                                        \
 272  272          (LIB_VA_MATCH_ID(arg1, arg2) && LIB_VA_MATCH_TIME(arg1, arg2))
 273  273  
 274  274  /*
 275  275   * lib_va will be used for optimized allocation of address ranges for
 276  276   * libraries, such that subsequent mappings of the same library will attempt
 277  277   * to use the same VA as previous mappings of that library.
 278  278   * In order to map libraries at the same VA in many processes, we need to carve
 279  279   * out our own address space for them which is unique across many processes.
 280  280   * We use different arenas for 32 bit and 64 bit libraries.
 281  281   *
 282  282   * Since the 32 bit address space is relatively small, we limit the number of
 283  283   * libraries which try to use consistent virtual addresses to lib_threshold.
 284  284   * For 64 bit libraries there is no such limit since the address space is large.
 285  285   */
 286  286  static vmem_t *lib_va_32_arena;
 287  287  static vmem_t *lib_va_64_arena;
 288  288  uint_t lib_threshold = 20;      /* modifiable via /etc/system */
 289  289  
 290  290  static kmutex_t lib_va_init_mutex;      /* no need to initialize */
 291  291  
 292  292  /*
 293  293   * Number of 32 bit and 64 bit libraries in lib_va hash.
 294  294   */
 295  295  static uint_t libs_mapped_32 = 0;
 296  296  static uint_t libs_mapped_64 = 0;
 297  297  
 298  298  /*
 299  299   * Free up the resources associated with lvp as well as lvp itself.
 300  300   * We also decrement the number of libraries mapped via a lib_va
 301  301   * cached virtual address.
 302  302   */
 303  303  void
 304  304  lib_va_free(struct lib_va *lvp)
 305  305  {
 306  306          int is_64bit = lvp->lv_flags & LV_ELF64;
 307  307          ASSERT(lvp->lv_refcnt == 0);
 308  308  
 309  309          if (lvp->lv_base_va != NULL) {
 310  310                  vmem_xfree(is_64bit ? lib_va_64_arena : lib_va_32_arena,
 311  311                      lvp->lv_base_va, lvp->lv_len);
 312  312                  if (is_64bit) {
 313  313                          atomic_dec_32(&libs_mapped_64);
 314  314                  } else {
 315  315                          atomic_dec_32(&libs_mapped_32);
 316  316                  }
 317  317          }
 318  318          kmem_free(lvp, sizeof (struct lib_va));
 319  319  }
 320  320  
 321  321  /*
 322  322   * See if the file associated with the vap passed in is in the lib_va hash.
 323  323   * If it is and the file has not been modified since last use, then
 324  324   * return a pointer to that data.  Otherwise, return NULL if the file has
 325  325   * changed or the file was not found in the hash.
 326  326   */
 327  327  static struct lib_va *
 328  328  lib_va_find(vattr_t *vap)
 329  329  {
 330  330          struct lib_va *lvp;
 331  331          struct lib_va *del = NULL;
 332  332          struct lib_va **tmp;
 333  333          uint_t index;
 334  334          index = LIB_VA_HASH(vap->va_nodeid);
 335  335  
 336  336          mutex_enter(LIB_VA_HASH_MUTEX(index));
 337  337          tmp = &lib_va_hash[index];
 338  338          while (*tmp != NULL) {
 339  339                  lvp = *tmp;
 340  340                  if (LIB_VA_MATCH_ID(lvp, vap)) {
 341  341                          if (LIB_VA_MATCH_TIME(lvp, vap)) {
 342  342                                  ASSERT((lvp->lv_flags & LV_DEL) == 0);
 343  343                                  lvp->lv_refcnt++;
 344  344                                  MOBJ_STAT_ADD(lib_va_find_hit);
 345  345                          } else {
 346  346                                  /*
 347  347                                   * file was updated since last use.
 348  348                                   * need to remove it from list.
 349  349                                   */
 350  350                                  del = lvp;
 351  351                                  *tmp = del->lv_next;
 352  352                                  del->lv_next = NULL;
 353  353                                  /*
 354  354                                   * If we can't delete it now, mark it for later
 355  355                                   */
 356  356                                  if (del->lv_refcnt) {
 357  357                                          MOBJ_STAT_ADD(lib_va_find_delay_delete);
 358  358                                          del->lv_flags |= LV_DEL;
 359  359                                          del = NULL;
 360  360                                  }
 361  361                                  lvp = NULL;
 362  362                          }
 363  363                          mutex_exit(LIB_VA_HASH_MUTEX(index));
 364  364                          if (del) {
 365  365                                  ASSERT(del->lv_refcnt == 0);
 366  366                                  MOBJ_STAT_ADD(lib_va_find_delete);
 367  367                                  lib_va_free(del);
 368  368                          }
 369  369                          return (lvp);
 370  370                  }
 371  371                  tmp = &lvp->lv_next;
 372  372          }
 373  373          mutex_exit(LIB_VA_HASH_MUTEX(index));
 374  374          return (NULL);
 375  375  }
 376  376  
 377  377  /*
 378  378   * Add a new entry to the lib_va hash.
 379  379   * Search the hash while holding the appropriate mutex to make sure that the
 380  380   * data is not already in the cache.  If we find data that is in the cache
 381  381   * already and has not been modified since last use, we return NULL.  If it
 382  382   * has been modified since last use, we will remove that entry from
 383  383   * the hash and it will be deleted once it's reference count reaches zero.
 384  384   * If there is no current entry in the hash we will add the new entry and
 385  385   * return it to the caller who is responsible for calling lib_va_release to
 386  386   * drop their reference count on it.
 387  387   *
 388  388   * lv_num_segs will be set to zero since the caller needs to add that
 389  389   * information to the data structure.
 390  390   */
 391  391  static struct lib_va *
 392  392  lib_va_add_hash(caddr_t base_va, ssize_t len, size_t align, vattr_t *vap)
 393  393  {
 394  394          struct lib_va *lvp;
 395  395          uint_t index;
 396  396          model_t model;
 397  397          struct lib_va **tmp;
 398  398          struct lib_va *del = NULL;
 399  399  
 400  400          model = get_udatamodel();
 401  401          index = LIB_VA_HASH(vap->va_nodeid);
 402  402  
 403  403          lvp = kmem_alloc(sizeof (struct lib_va), KM_SLEEP);
 404  404  
 405  405          mutex_enter(LIB_VA_HASH_MUTEX(index));
 406  406  
 407  407          /*
 408  408           * Make sure not adding same data a second time.
 409  409           * The hash chains should be relatively short and adding
 410  410           * is a relatively rare event, so it's worth the check.
 411  411           */
 412  412          tmp = &lib_va_hash[index];
 413  413          while (*tmp != NULL) {
 414  414                  if (LIB_VA_MATCH_ID(*tmp, vap)) {
 415  415                          if (LIB_VA_MATCH_TIME(*tmp, vap)) {
 416  416                                  mutex_exit(LIB_VA_HASH_MUTEX(index));
 417  417                                  kmem_free(lvp, sizeof (struct lib_va));
 418  418                                  return (NULL);
 419  419                          }
 420  420  
 421  421                          /*
 422  422                           * We have the same nodeid and fsid but the file has
 423  423                           * been modified since we last saw it.
 424  424                           * Need to remove the old node and add this new
 425  425                           * one.
 426  426                           * Could probably use a callback mechanism to make
 427  427                           * this cleaner.
 428  428                           */
 429  429                          ASSERT(del == NULL);
 430  430                          del = *tmp;
 431  431                          *tmp = del->lv_next;
 432  432                          del->lv_next = NULL;
 433  433  
 434  434                          /*
 435  435                           * Check to see if we can free it.  If lv_refcnt
 436  436                           * is greater than zero, than some other thread
 437  437                           * has a reference to the one we want to delete
 438  438                           * and we can not delete it.  All of this is done
 439  439                           * under the lib_va_hash_mutex lock so it is atomic.
 440  440                           */
 441  441                          if (del->lv_refcnt) {
 442  442                                  MOBJ_STAT_ADD(lib_va_add_delay_delete);
 443  443                                  del->lv_flags |= LV_DEL;
 444  444                                  del = NULL;
 445  445                          }
 446  446                          /* tmp is already advanced */
 447  447                          continue;
 448  448                  }
 449  449                  tmp = &((*tmp)->lv_next);
 450  450          }
 451  451  
 452  452          lvp->lv_base_va = base_va;
 453  453          lvp->lv_len = len;
 454  454          lvp->lv_align = align;
 455  455          lvp->lv_nodeid = vap->va_nodeid;
 456  456          lvp->lv_fsid = vap->va_fsid;
 457  457          lvp->lv_ctime.tv_sec = vap->va_ctime.tv_sec;
 458  458          lvp->lv_ctime.tv_nsec = vap->va_ctime.tv_nsec;
 459  459          lvp->lv_mtime.tv_sec = vap->va_mtime.tv_sec;
 460  460          lvp->lv_mtime.tv_nsec = vap->va_mtime.tv_nsec;
 461  461          lvp->lv_next = NULL;
 462  462          lvp->lv_refcnt = 1;
 463  463  
 464  464          /* Caller responsible for filling this and lv_mps out */
 465  465          lvp->lv_num_segs = 0;
 466  466  
 467  467          if (model == DATAMODEL_LP64) {
 468  468                  lvp->lv_flags = LV_ELF64;
 469  469          } else {
 470  470                  ASSERT(model == DATAMODEL_ILP32);
 471  471                  lvp->lv_flags = LV_ELF32;
 472  472          }
 473  473  
 474  474          if (base_va != NULL) {
 475  475                  if (model == DATAMODEL_LP64) {
 476  476                          atomic_inc_32(&libs_mapped_64);
 477  477                  } else {
 478  478                          ASSERT(model == DATAMODEL_ILP32);
 479  479                          atomic_inc_32(&libs_mapped_32);
 480  480                  }
 481  481          }
 482  482          ASSERT(*tmp == NULL);
 483  483          *tmp = lvp;
 484  484          mutex_exit(LIB_VA_HASH_MUTEX(index));
 485  485          if (del) {
 486  486                  ASSERT(del->lv_refcnt == 0);
 487  487                  MOBJ_STAT_ADD(lib_va_add_delete);
 488  488                  lib_va_free(del);
 489  489          }
 490  490          return (lvp);
 491  491  }
 492  492  
 493  493  /*
 494  494   * Release the hold on lvp which was acquired by lib_va_find or lib_va_add_hash.
 495  495   * In addition, if this is the last hold and lvp is marked for deletion,
 496  496   * free up it's reserved address space and free the structure.
 497  497   */
 498  498  static void
 499  499  lib_va_release(struct lib_va *lvp)
 500  500  {
 501  501          uint_t index;
 502  502          int to_del = 0;
 503  503  
 504  504          ASSERT(lvp->lv_refcnt > 0);
 505  505  
 506  506          index = LIB_VA_HASH(lvp->lv_nodeid);
 507  507          mutex_enter(LIB_VA_HASH_MUTEX(index));
 508  508          if (--lvp->lv_refcnt == 0 && (lvp->lv_flags & LV_DEL)) {
 509  509                  to_del = 1;
 510  510          }
 511  511          mutex_exit(LIB_VA_HASH_MUTEX(index));
 512  512          if (to_del) {
 513  513                  ASSERT(lvp->lv_next == 0);
 514  514                  lib_va_free(lvp);
 515  515          }
 516  516  }
 517  517  
 518  518  /*
 519  519   * Dummy function for mapping through /dev/null
 520  520   * Normally I would have used mmmmap in common/io/mem.c
 521  521   * but that is a static function, and for /dev/null, it
 522  522   * just returns -1.
 523  523   */
 524  524  /* ARGSUSED */
 525  525  static int
 526  526  mmapobj_dummy(dev_t dev, off_t off, int prot)
 527  527  {
 528  528          return (-1);
 529  529  }
 530  530  
 531  531  /*
 532  532   * Called when an error occurred which requires mmapobj to return failure.
 533  533   * All mapped objects will be unmapped and /dev/null mappings will be
 534  534   * reclaimed if necessary.
 535  535   * num_mapped is the number of elements of mrp which have been mapped, and
 536  536   * num_segs is the total number of elements in mrp.
 537  537   * For e_type ET_EXEC, we need to unmap all of the elements in mrp since
 538  538   * we had already made reservations for them.
 539  539   * If num_mapped equals num_segs, then we know that we had fully mapped
 540  540   * the file and only need to clean up the segments described.
 541  541   * If they are not equal, then for ET_DYN we will unmap the range from the
 542  542   * end of the last mapped segment to the end of the last segment in mrp
 543  543   * since we would have made a reservation for that memory earlier.
 544  544   * If e_type is passed in as zero, num_mapped must equal num_segs.
 545  545   */
 546  546  void
 547  547  mmapobj_unmap(mmapobj_result_t *mrp, int num_mapped, int num_segs,
 548  548      ushort_t e_type)
 549  549  {
 550  550          int i;
 551  551          struct as *as = curproc->p_as;
 552  552          caddr_t addr;
 553  553          size_t size;
 554  554  
 555  555          if (e_type == ET_EXEC) {
 556  556                  num_mapped = num_segs;
 557  557          }
 558  558  #ifdef DEBUG
 559  559          if (e_type == 0) {
 560  560                  ASSERT(num_mapped == num_segs);
 561  561          }
 562  562  #endif
 563  563  
 564  564          MOBJ_STAT_ADD(unmap_called);
 565  565          for (i = 0; i < num_mapped; i++) {
 566  566  
 567  567                  /*
 568  568                   * If we are going to have to create a mapping we need to
 569  569                   * make sure that no one else will use the address we
 570  570                   * need to remap between the time it is unmapped and
 571  571                   * mapped below.
 572  572                   */
 573  573                  if (mrp[i].mr_flags & MR_RESV) {
 574  574                          as_rangelock(as);
 575  575                  }
 576  576                  /* Always need to unmap what we mapped */
 577  577                  (void) as_unmap(as, mrp[i].mr_addr, mrp[i].mr_msize);
 578  578  
 579  579                  /* Need to reclaim /dev/null reservation from earlier */
 580  580                  if (mrp[i].mr_flags & MR_RESV) {
 581  581                          struct segdev_crargs dev_a;
 582  582  
 583  583                          ASSERT(e_type != ET_DYN);
 584  584                          /*
 585  585                           * Use seg_dev segment driver for /dev/null mapping.
 586  586                           */
 587  587                          dev_a.mapfunc = mmapobj_dummy;
 588  588                          dev_a.dev = makedevice(mm_major, M_NULL);
 589  589                          dev_a.offset = 0;
 590  590                          dev_a.type = 0;         /* neither PRIVATE nor SHARED */
 591  591                          dev_a.prot = dev_a.maxprot = (uchar_t)PROT_NONE;
 592  592                          dev_a.hat_attr = 0;
 593  593                          dev_a.hat_flags = 0;
 594  594  
 595  595                          (void) as_map(as, mrp[i].mr_addr, mrp[i].mr_msize,
 596  596                              segdev_create, &dev_a);
 597  597                          MOBJ_STAT_ADD(remap_devnull);
 598  598                          as_rangeunlock(as);
 599  599                  }
 600  600          }
 601  601  
 602  602          if (num_mapped != num_segs) {
 603  603                  ASSERT(e_type == ET_DYN);
 604  604                  /* Need to unmap any reservation made after last mapped seg */
 605  605                  if (num_mapped == 0) {
 606  606                          addr = mrp[0].mr_addr;
 607  607                  } else {
 608  608                          addr = mrp[num_mapped - 1].mr_addr +
 609  609                              mrp[num_mapped - 1].mr_msize;
 610  610                  }
 611  611                  size = (size_t)mrp[num_segs - 1].mr_addr +
 612  612                      mrp[num_segs - 1].mr_msize - (size_t)addr;
 613  613                  (void) as_unmap(as, addr, size);
 614  614  
 615  615                  /*
 616  616                   * Now we need to unmap the holes between mapped segs.
 617  617                   * Note that we have not mapped all of the segments and thus
 618  618                   * the holes between segments would not have been unmapped
 619  619                   * yet.  If num_mapped == num_segs, then all of the holes
 620  620                   * between segments would have already been unmapped.
 621  621                   */
 622  622  
 623  623                  for (i = 1; i < num_mapped; i++) {
 624  624                          addr = mrp[i - 1].mr_addr + mrp[i - 1].mr_msize;
 625  625                          size = mrp[i].mr_addr - addr;
 626  626                          (void) as_unmap(as, addr, size);
 627  627                  }
 628  628          }
 629  629  }
 630  630  
 631  631  /*
 632  632   * We need to add the start address into mrp so that the unmap function
 633  633   * has absolute addresses to use.
 634  634   */
 635  635  static void
 636  636  mmapobj_unmap_exec(mmapobj_result_t *mrp, int num_mapped, caddr_t start_addr)
 637  637  {
 638  638          int i;
 639  639  
 640  640          for (i = 0; i < num_mapped; i++) {
 641  641                  mrp[i].mr_addr += (size_t)start_addr;
 642  642          }
 643  643          mmapobj_unmap(mrp, num_mapped, num_mapped, ET_EXEC);
 644  644  }
 645  645  
 646  646  static caddr_t
 647  647  mmapobj_lookup_start_addr(struct lib_va *lvp)
 648  648  {
 649  649          proc_t *p = curproc;
 650  650          struct as *as = p->p_as;
 651  651          struct segvn_crargs crargs = SEGVN_ZFOD_ARGS(PROT_USER, PROT_ALL);
 652  652          int error;
 653  653          uint_t ma_flags = _MAP_LOW32;
 654  654          caddr_t base = NULL;
 655  655          size_t len;
 656  656          size_t align;
 657  657  
 658  658          ASSERT(lvp != NULL);
 659  659          MOBJ_STAT_ADD(lookup_start);
 660  660  
 661  661          as_rangelock(as);
 662  662  
 663  663          base = lvp->lv_base_va;
 664  664          len = lvp->lv_len;
 665  665  
 666  666          /*
 667  667           * If we don't have an expected base address, or the one that we want
 668  668           * to use is not available or acceptable, go get an acceptable
 669  669           * address range.
 670  670           */
 671  671          if (base == NULL || as_gap(as, len, &base, &len, 0, NULL) ||
 672  672              valid_usr_range(base, len, PROT_ALL, as, as->a_userlimit) !=
 673  673              RANGE_OKAY || OVERLAPS_STACK(base + len, p)) {
 674  674                  if (lvp->lv_flags & LV_ELF64) {
 675  675                          ma_flags = 0;
 676  676                  }
 677  677  
 678  678                  align = lvp->lv_align;
 679  679                  if (align > 1) {
 680  680                          ma_flags |= MAP_ALIGN;
 681  681                  }
 682  682  
 683  683                  base = (caddr_t)align;
 684  684                  map_addr(&base, len, 0, 1, ma_flags);
 685  685          }
 686  686  
 687  687          /*
 688  688           * Need to reserve the address space we're going to use.
 689  689           * Don't reserve swap space since we'll be mapping over this.
 690  690           */
 691  691          if (base != NULL) {
 692  692                  crargs.flags |= MAP_NORESERVE;
 693  693                  error = as_map(as, base, len, segvn_create, &crargs);
 694  694                  if (error) {
 695  695                          base = NULL;
 696  696                  }
 697  697          }
 698  698  
 699  699          as_rangeunlock(as);
 700  700          return (base);
 701  701  }
 702  702  
 703  703  /*
 704  704   * Get the starting address for a given file to be mapped and return it
 705  705   * to the caller.  If we're using lib_va and we need to allocate an address,
 706  706   * we will attempt to allocate it from the global reserved pool such that the
 707  707   * same address can be used in the future for this file.  If we can't use the
 708  708   * reserved address then we just get one that will fit in our address space.
 709  709   *
 710  710   * Returns the starting virtual address for the range to be mapped or NULL
 711  711   * if an error is encountered. If we successfully insert the requested info
 712  712   * into the lib_va hash, then *lvpp will be set to point to this lib_va
 713  713   * structure.  The structure will have a hold on it and thus lib_va_release
 714  714   * needs to be called on it by the caller.  This function will not fill out
 715  715   * lv_mps or lv_num_segs since it does not have enough information to do so.
 716  716   * The caller is responsible for doing this making sure that any modifications
 717  717   * to lv_mps are visible before setting lv_num_segs.
 718  718   */
 719  719  static caddr_t
 720  720  mmapobj_alloc_start_addr(struct lib_va **lvpp, size_t len, int use_lib_va,
 721  721      size_t align, vattr_t *vap)
 722  722  {
 723  723          proc_t *p = curproc;
 724  724          struct as *as = p->p_as;
 725  725          struct segvn_crargs crargs = SEGVN_ZFOD_ARGS(PROT_USER, PROT_ALL);
 726  726          int error;
 727  727          model_t model;
 728  728          uint_t ma_flags = _MAP_LOW32;
 729  729          caddr_t base = NULL;
 730  730          vmem_t *model_vmem;
 731  731          size_t lib_va_start;
 732  732          size_t lib_va_end;
 733  733          size_t lib_va_len;
 734  734  
 735  735          ASSERT(lvpp != NULL);
 736  736  
 737  737          MOBJ_STAT_ADD(alloc_start);
 738  738          model = get_udatamodel();
 739  739  
 740  740          if (model == DATAMODEL_LP64) {
 741  741                  ma_flags = 0;
 742  742                  model_vmem = lib_va_64_arena;
 743  743          } else {
 744  744                  ASSERT(model == DATAMODEL_ILP32);
 745  745                  model_vmem = lib_va_32_arena;
 746  746          }
 747  747  
 748  748          if (align > 1) {
 749  749                  ma_flags |= MAP_ALIGN;
 750  750          }
 751  751          if (use_lib_va) {
 752  752                  /*
 753  753                   * The first time through, we need to setup the lib_va arenas.
 754  754                   * We call map_addr to find a suitable range of memory to map
 755  755                   * the given library, and we will set the highest address
 756  756                   * in our vmem arena to the end of this adddress range.
 757  757                   * We allow up to half of the address space to be used
 758  758                   * for lib_va addresses but we do not prevent any allocations
 759  759                   * in this range from other allocation paths.
 760  760                   */
 761  761                  if (lib_va_64_arena == NULL && model == DATAMODEL_LP64) {
 762  762                          mutex_enter(&lib_va_init_mutex);
 763  763                          if (lib_va_64_arena == NULL) {
 764  764                                  base = (caddr_t)align;
 765  765                                  as_rangelock(as);
 766  766                                  map_addr(&base, len, 0, 1, ma_flags);
 767  767                                  as_rangeunlock(as);
 768  768                                  if (base == NULL) {
 769  769                                          mutex_exit(&lib_va_init_mutex);
 770  770                                          MOBJ_STAT_ADD(lib_va_create_failure);
 771  771                                          goto nolibva;
 772  772                                  }
 773  773                                  lib_va_end = (size_t)base + len;
 774  774                                  lib_va_len = lib_va_end >> 1;
 775  775                                  lib_va_len = P2ROUNDUP(lib_va_len, PAGESIZE);
 776  776                                  lib_va_start = lib_va_end - lib_va_len;
 777  777  
 778  778                                  /*
 779  779                                   * Need to make sure we avoid the address hole.
 780  780                                   * We know lib_va_end is valid but we need to
 781  781                                   * make sure lib_va_start is as well.
 782  782                                   */
 783  783                                  if ((lib_va_end > (size_t)hole_end) &&
 784  784                                      (lib_va_start < (size_t)hole_end)) {
 785  785                                          lib_va_start = P2ROUNDUP(
 786  786                                              (size_t)hole_end, PAGESIZE);
 787  787                                          lib_va_len = lib_va_end - lib_va_start;
 788  788                                  }
 789  789                                  lib_va_64_arena = vmem_create("lib_va_64",
 790  790                                      (void *)lib_va_start, lib_va_len, PAGESIZE,
 791  791                                      NULL, NULL, NULL, 0,
 792  792                                      VM_NOSLEEP | VMC_IDENTIFIER);
 793  793                                  if (lib_va_64_arena == NULL) {
 794  794                                          mutex_exit(&lib_va_init_mutex);
 795  795                                          goto nolibva;
 796  796                                  }
 797  797                          }
 798  798                          model_vmem = lib_va_64_arena;
 799  799                          mutex_exit(&lib_va_init_mutex);
 800  800                  } else if (lib_va_32_arena == NULL &&
 801  801                      model == DATAMODEL_ILP32) {
 802  802                          mutex_enter(&lib_va_init_mutex);
 803  803                          if (lib_va_32_arena == NULL) {
 804  804                                  base = (caddr_t)align;
 805  805                                  as_rangelock(as);
 806  806                                  map_addr(&base, len, 0, 1, ma_flags);
 807  807                                  as_rangeunlock(as);
 808  808                                  if (base == NULL) {
 809  809                                          mutex_exit(&lib_va_init_mutex);
 810  810                                          MOBJ_STAT_ADD(lib_va_create_failure);
 811  811                                          goto nolibva;
 812  812                                  }
 813  813                                  lib_va_end = (size_t)base + len;
 814  814                                  lib_va_len = lib_va_end >> 1;
 815  815                                  lib_va_len = P2ROUNDUP(lib_va_len, PAGESIZE);
 816  816                                  lib_va_start = lib_va_end - lib_va_len;
 817  817                                  lib_va_32_arena = vmem_create("lib_va_32",
 818  818                                      (void *)lib_va_start, lib_va_len, PAGESIZE,
 819  819                                      NULL, NULL, NULL, 0,
 820  820                                      VM_NOSLEEP | VMC_IDENTIFIER);
 821  821                                  if (lib_va_32_arena == NULL) {
 822  822                                          mutex_exit(&lib_va_init_mutex);
 823  823                                          goto nolibva;
 824  824                                  }
 825  825                          }
 826  826                          model_vmem = lib_va_32_arena;
 827  827                          mutex_exit(&lib_va_init_mutex);
 828  828                  }
 829  829  
 830  830                  if (model == DATAMODEL_LP64 || libs_mapped_32 < lib_threshold) {
 831  831                          base = vmem_xalloc(model_vmem, len, align, 0, 0, NULL,
 832  832                              NULL, VM_NOSLEEP | VM_ENDALLOC);
 833  833                          MOBJ_STAT_ADD(alloc_vmem);
 834  834                  }
 835  835  
 836  836                  /*
 837  837                   * Even if the address fails to fit in our address space,
 838  838                   * or we can't use a reserved address,
 839  839                   * we should still save it off in lib_va_hash.
 840  840                   */
 841  841                  *lvpp = lib_va_add_hash(base, len, align, vap);
 842  842  
 843  843                  /*
 844  844                   * Check for collision on insertion and free up our VA space.
 845  845                   * This is expected to be rare, so we'll just reset base to
 846  846                   * NULL instead of looking it up in the lib_va hash.
 847  847                   */
 848  848                  if (*lvpp == NULL) {
 849  849                          if (base != NULL) {
 850  850                                  vmem_xfree(model_vmem, base, len);
 851  851                                  base = NULL;
 852  852                                  MOBJ_STAT_ADD(add_collision);
 853  853                          }
 854  854                  }
 855  855          }
 856  856  
 857  857  nolibva:
 858  858          as_rangelock(as);
 859  859  
 860  860          /*
 861  861           * If we don't have an expected base address, or the one that we want
 862  862           * to use is not available or acceptable, go get an acceptable
 863  863           * address range.
 864  864           */
 865  865          if (base == NULL || as_gap(as, len, &base, &len, 0, NULL) ||
 866  866              valid_usr_range(base, len, PROT_ALL, as, as->a_userlimit) !=
 867  867              RANGE_OKAY || OVERLAPS_STACK(base + len, p)) {
 868  868                  MOBJ_STAT_ADD(get_addr);
 869  869                  base = (caddr_t)align;
 870  870                  map_addr(&base, len, 0, 1, ma_flags);
 871  871          }
 872  872  
 873  873          /*
 874  874           * Need to reserve the address space we're going to use.
 875  875           * Don't reserve swap space since we'll be mapping over this.
 876  876           */
 877  877          if (base != NULL) {
 878  878                  /* Don't reserve swap space since we'll be mapping over this */
 879  879                  crargs.flags |= MAP_NORESERVE;
 880  880                  error = as_map(as, base, len, segvn_create, &crargs);
 881  881                  if (error) {
 882  882                          base = NULL;
 883  883                  }
 884  884          }
 885  885  
 886  886          as_rangeunlock(as);
 887  887          return (base);
 888  888  }
 889  889  
 890  890  /*
 891  891   * Map the file associated with vp into the address space as a single
 892  892   * read only private mapping.
 893  893   * Returns 0 for success, and non-zero for failure to map the file.
 894  894   */
 895  895  static int
 896  896  mmapobj_map_flat(vnode_t *vp, mmapobj_result_t *mrp, size_t padding,
 897  897      cred_t *fcred)
 898  898  {
 899  899          int error = 0;
 900  900          struct as *as = curproc->p_as;
 901  901          caddr_t addr = NULL;
 902  902          caddr_t start_addr;
 903  903          size_t len;
 904  904          size_t pad_len;
 905  905          int prot = PROT_USER | PROT_READ;
 906  906          uint_t ma_flags = _MAP_LOW32;
 907  907          vattr_t vattr;
 908  908          struct segvn_crargs crargs = SEGVN_ZFOD_ARGS(PROT_USER, PROT_ALL);
 909  909  
 910  910          if (get_udatamodel() == DATAMODEL_LP64) {
 911  911                  ma_flags = 0;
 912  912          }
 913  913  
 914  914          vattr.va_mask = AT_SIZE;
 915  915          error = VOP_GETATTR(vp, &vattr, 0, fcred, NULL);
 916  916          if (error) {
 917  917                  return (error);
 918  918          }
 919  919  
 920  920          len = vattr.va_size;
 921  921  
 922  922          ma_flags |= MAP_PRIVATE;
 923  923          if (padding == 0) {
 924  924                  MOBJ_STAT_ADD(map_flat_no_padding);
 925  925                  error = VOP_MAP(vp, 0, as, &addr, len, prot, PROT_ALL,
 926  926                      ma_flags, fcred, NULL);
 927  927                  if (error == 0) {
 928  928                          mrp[0].mr_addr = addr;
 929  929                          mrp[0].mr_msize = len;
 930  930                          mrp[0].mr_fsize = len;
 931  931                          mrp[0].mr_offset = 0;
 932  932                          mrp[0].mr_prot = prot;
 933  933                          mrp[0].mr_flags = 0;
 934  934                  }
 935  935                  return (error);
 936  936          }
 937  937  
 938  938          /* padding was requested so there's more work to be done */
 939  939          MOBJ_STAT_ADD(map_flat_padding);
 940  940  
 941  941          /* No need to reserve swap space now since it will be reserved later */
 942  942          crargs.flags |= MAP_NORESERVE;
 943  943  
 944  944          /* Need to setup padding which can only be in PAGESIZE increments. */
 945  945          ASSERT((padding & PAGEOFFSET) == 0);
 946  946          pad_len = len + (2 * padding);
 947  947  
 948  948          as_rangelock(as);
 949  949          map_addr(&addr, pad_len, 0, 1, ma_flags);
 950  950          error = as_map(as, addr, pad_len, segvn_create, &crargs);
 951  951          as_rangeunlock(as);
 952  952          if (error) {
 953  953                  return (error);
 954  954          }
 955  955          start_addr = addr;
 956  956          addr += padding;
 957  957          ma_flags |= MAP_FIXED;
 958  958          error = VOP_MAP(vp, 0, as, &addr, len, prot, PROT_ALL, ma_flags,
 959  959              fcred, NULL);
 960  960          if (error == 0) {
 961  961                  mrp[0].mr_addr = start_addr;
 962  962                  mrp[0].mr_msize = padding;
 963  963                  mrp[0].mr_fsize = 0;
 964  964                  mrp[0].mr_offset = 0;
 965  965                  mrp[0].mr_prot = 0;
 966  966                  mrp[0].mr_flags = MR_PADDING;
 967  967  
 968  968                  mrp[1].mr_addr = addr;
 969  969                  mrp[1].mr_msize = len;
 970  970                  mrp[1].mr_fsize = len;
 971  971                  mrp[1].mr_offset = 0;
 972  972                  mrp[1].mr_prot = prot;
 973  973                  mrp[1].mr_flags = 0;
 974  974  
 975  975                  mrp[2].mr_addr = addr + P2ROUNDUP(len, PAGESIZE);
 976  976                  mrp[2].mr_msize = padding;
 977  977                  mrp[2].mr_fsize = 0;
 978  978                  mrp[2].mr_offset = 0;
 979  979                  mrp[2].mr_prot = 0;
 980  980                  mrp[2].mr_flags = MR_PADDING;
 981  981          } else {
 982  982                  /* Need to cleanup the as_map from earlier */
 983  983                  (void) as_unmap(as, start_addr, pad_len);
 984  984          }
 985  985          return (error);
 986  986  }
 987  987  
 988  988  /*
 989  989   * Map a PT_LOAD or PT_SUNWBSS section of an executable file into the user's
 990  990   * address space.
 991  991   * vp - vnode to be mapped in
 992  992   * addr - start address
 993  993   * len - length of vp to be mapped
 994  994   * zfodlen - length of zero filled memory after len above
 995  995   * offset - offset into file where mapping should start
 996  996   * prot - protections for this mapping
 997  997   * fcred - credentials for the file associated with vp at open time.
 998  998   */
 999  999  static int
1000 1000  mmapobj_map_ptload(struct vnode *vp, caddr_t addr, size_t len, size_t zfodlen,
1001 1001      off_t offset, int prot, cred_t *fcred)
1002 1002  {
1003 1003          int error = 0;
1004 1004          caddr_t zfodbase, oldaddr;
1005 1005          size_t oldlen;
1006 1006          size_t end;
1007 1007          size_t zfoddiff;
1008 1008          label_t ljb;
1009 1009          struct as *as = curproc->p_as;
1010 1010          model_t model;
1011 1011          int full_page;
1012 1012  
1013 1013          /*
1014 1014           * See if addr and offset are aligned such that we can map in
1015 1015           * full pages instead of partial pages.
1016 1016           */
1017 1017          full_page = (((uintptr_t)addr & PAGEOFFSET) ==
1018 1018              ((uintptr_t)offset & PAGEOFFSET));
1019 1019  
1020 1020          model = get_udatamodel();
1021 1021  
1022 1022          oldaddr = addr;
1023 1023          addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
1024 1024          if (len) {
1025 1025                  spgcnt_t availm, npages;
1026 1026                  int preread;
1027 1027                  uint_t mflag = MAP_PRIVATE | MAP_FIXED;
1028 1028  
1029 1029                  if (model == DATAMODEL_ILP32) {
1030 1030                          mflag |= _MAP_LOW32;
1031 1031                  }
1032 1032                  /* We may need to map in extra bytes */
1033 1033                  oldlen = len;
1034 1034                  len += ((size_t)oldaddr & PAGEOFFSET);
1035 1035  
1036 1036                  if (full_page) {
1037 1037                          offset = (off_t)((uintptr_t)offset & PAGEMASK);
1038 1038                          if ((prot & (PROT_WRITE | PROT_EXEC)) == PROT_EXEC) {
1039 1039                                  mflag |= MAP_TEXT;
1040 1040                                  MOBJ_STAT_ADD(map_ptload_text);
1041 1041                          } else {
1042 1042                                  mflag |= MAP_INITDATA;
1043 1043                                  MOBJ_STAT_ADD(map_ptload_initdata);
1044 1044                          }
1045 1045  
1046 1046                          /*
1047 1047                           * maxprot is passed as PROT_ALL so that mdb can
1048 1048                           * write to this segment.
1049 1049                           */
1050 1050                          if (error = VOP_MAP(vp, (offset_t)offset, as, &addr,
1051 1051                              len, prot, PROT_ALL, mflag, fcred, NULL)) {
1052 1052                                  return (error);
1053 1053                          }
1054 1054  
1055 1055                          /*
1056 1056                           * If the segment can fit and is relatively small, then
1057 1057                           * we prefault the entire segment in.  This is based
1058 1058                           * on the model that says the best working set of a
1059 1059                           * small program is all of its pages.
1060 1060                           * We only do this if freemem will not drop below
1061 1061                           * lotsfree since we don't want to induce paging.
1062 1062                           */
1063 1063                          npages = (spgcnt_t)btopr(len);
1064 1064                          availm = freemem - lotsfree;
1065 1065                          preread = (npages < availm && len < PGTHRESH) ? 1 : 0;
1066 1066  
1067 1067                          /*
1068 1068                           * If we aren't prefaulting the segment,
1069 1069                           * increment "deficit", if necessary to ensure
1070 1070                           * that pages will become available when this
1071 1071                           * process starts executing.
1072 1072                           */
1073 1073                          if (preread == 0 && npages > availm &&
1074 1074                              deficit < lotsfree) {
1075 1075                                  deficit += MIN((pgcnt_t)(npages - availm),
1076 1076                                      lotsfree - deficit);
1077 1077                          }
1078 1078  
1079 1079                          if (preread) {
1080 1080                                  (void) as_faulta(as, addr, len);
1081 1081                                  MOBJ_STAT_ADD(map_ptload_preread);
1082 1082                          }
1083 1083                  } else {
1084 1084                          /*
1085 1085                           * addr and offset were not aligned such that we could
1086 1086                           * use VOP_MAP, thus we need to as_map the memory we
1087 1087                           * need and then read the data in from disk.
1088 1088                           * This code path is a corner case which should never
1089 1089                           * be taken, but hand crafted binaries could trigger
1090 1090                           * this logic and it needs to work correctly.
1091 1091                           */
1092 1092                          MOBJ_STAT_ADD(map_ptload_unaligned_text);
1093 1093                          as_rangelock(as);
1094 1094                          (void) as_unmap(as, addr, len);
1095 1095  
1096 1096                          /*
1097 1097                           * We use zfod_argsp because we need to be able to
1098 1098                           * write to the mapping and then we'll change the
1099 1099                           * protections later if they are incorrect.
1100 1100                           */
1101 1101                          error = as_map(as, addr, len, segvn_create, zfod_argsp);
1102 1102                          as_rangeunlock(as);
1103 1103                          if (error) {
1104 1104                                  MOBJ_STAT_ADD(map_ptload_unaligned_map_fail);
1105 1105                                  return (error);
1106 1106                          }
1107 1107  
1108 1108                          /* Now read in the data from disk */
1109 1109                          error = vn_rdwr(UIO_READ, vp, oldaddr, oldlen, offset,
1110 1110                              UIO_USERSPACE, 0, (rlim64_t)0, fcred, NULL);
1111 1111                          if (error) {
1112 1112                                  MOBJ_STAT_ADD(map_ptload_unaligned_read_fail);
1113 1113                                  return (error);
1114 1114                          }
1115 1115  
1116 1116                          /*
1117 1117                           * Now set protections.
1118 1118                           */
1119 1119                          if (prot != PROT_ZFOD) {
1120 1120                                  (void) as_setprot(as, addr, len, prot);
1121 1121                          }
1122 1122                  }
1123 1123          }
1124 1124  
1125 1125          if (zfodlen) {
1126 1126                  end = (size_t)addr + len;
1127 1127                  zfodbase = (caddr_t)P2ROUNDUP(end, PAGESIZE);
1128 1128                  zfoddiff = (uintptr_t)zfodbase - end;
1129 1129                  if (zfoddiff) {
1130 1130                          /*
1131 1131                           * Before we go to zero the remaining space on the last
1132 1132                           * page, make sure we have write permission.
1133 1133                           *
1134 1134                           * We need to be careful how we zero-fill the last page
1135 1135                           * if the protection does not include PROT_WRITE. Using
1136 1136                           * as_setprot() can cause the VM segment code to call
1137 1137                           * segvn_vpage(), which must allocate a page struct for
1138 1138                           * each page in the segment. If we have a very large
1139 1139                           * segment, this may fail, so we check for that, even
1140 1140                           * though we ignore other return values from as_setprot.
1141 1141                           */
1142 1142                          MOBJ_STAT_ADD(zfoddiff);
1143 1143                          if ((prot & PROT_WRITE) == 0) {
1144 1144                                  if (as_setprot(as, (caddr_t)end, zfoddiff,
1145 1145                                      prot | PROT_WRITE) == ENOMEM)
1146 1146                                          return (ENOMEM);
1147 1147                                  MOBJ_STAT_ADD(zfoddiff_nowrite);
1148 1148                          }
1149 1149                          if (on_fault(&ljb)) {
1150 1150                                  no_fault();
1151 1151                                  if ((prot & PROT_WRITE) == 0) {
1152 1152                                          (void) as_setprot(as, (caddr_t)end,
1153 1153                                              zfoddiff, prot);
1154 1154                                  }
1155 1155                                  return (EFAULT);
1156 1156                          }
1157 1157                          uzero((void *)end, zfoddiff);
1158 1158                          no_fault();
1159 1159  
1160 1160                          /*
1161 1161                           * Remove write protection to return to original state
1162 1162                           */
1163 1163                          if ((prot & PROT_WRITE) == 0) {
1164 1164                                  (void) as_setprot(as, (caddr_t)end,
1165 1165                                      zfoddiff, prot);
1166 1166                          }
1167 1167                  }
1168 1168                  if (zfodlen > zfoddiff) {
1169 1169                          struct segvn_crargs crargs =
1170 1170                              SEGVN_ZFOD_ARGS(prot, PROT_ALL);
1171 1171  
1172 1172                          MOBJ_STAT_ADD(zfodextra);
1173 1173                          zfodlen -= zfoddiff;
1174 1174                          crargs.szc = AS_MAP_NO_LPOOB;
1175 1175  
1176 1176  
1177 1177                          as_rangelock(as);
1178 1178                          (void) as_unmap(as, (caddr_t)zfodbase, zfodlen);
1179 1179                          error = as_map(as, (caddr_t)zfodbase,
1180 1180                              zfodlen, segvn_create, &crargs);
1181 1181                          as_rangeunlock(as);
1182 1182                          if (error) {
1183 1183                                  return (error);
1184 1184                          }
1185 1185                  }
1186 1186          }
1187 1187          return (0);
1188 1188  }
1189 1189  
1190 1190  /*
1191 1191   * Map the ELF file represented by vp into the users address space.  The
1192 1192   * first mapping will start at start_addr and there will be num_elements
1193 1193   * mappings.  The mappings are described by the data in mrp which may be
1194 1194   * modified upon returning from this function.
1195 1195   * Returns 0 for success or errno for failure.
1196 1196   */
1197 1197  static int
1198 1198  mmapobj_map_elf(struct vnode *vp, caddr_t start_addr, mmapobj_result_t *mrp,
1199 1199      int num_elements, cred_t *fcred, ushort_t e_type)
1200 1200  {
1201 1201          int i;
1202 1202          int ret;
1203 1203          caddr_t lo;
1204 1204          caddr_t hi;
1205 1205          struct as *as = curproc->p_as;
1206 1206  
1207 1207          for (i = 0; i < num_elements; i++) {
1208 1208                  caddr_t addr;
1209 1209                  size_t p_memsz;
1210 1210                  size_t p_filesz;
1211 1211                  size_t zfodlen;
1212 1212                  offset_t p_offset;
1213 1213                  size_t dif;
1214 1214                  int prot;
1215 1215  
1216 1216                  /* Always need to adjust mr_addr */
1217 1217                  addr = start_addr + (size_t)(mrp[i].mr_addr);
1218 1218                  mrp[i].mr_addr =
1219 1219                      (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
1220 1220  
1221 1221                  /* Padding has already been mapped */
1222 1222                  if (MR_GET_TYPE(mrp[i].mr_flags) == MR_PADDING) {
1223 1223                          continue;
1224 1224                  }
1225 1225                  p_memsz = mrp[i].mr_msize;
1226 1226                  p_filesz = mrp[i].mr_fsize;
1227 1227                  zfodlen = p_memsz - p_filesz;
1228 1228                  p_offset = mrp[i].mr_offset;
1229 1229                  dif = (uintptr_t)(addr) & PAGEOFFSET;
1230 1230                  prot = mrp[i].mr_prot | PROT_USER;
1231 1231                  ret = mmapobj_map_ptload(vp, addr, p_filesz, zfodlen,
1232 1232                      p_offset, prot, fcred);
1233 1233                  if (ret != 0) {
1234 1234                          MOBJ_STAT_ADD(ptload_failed);
1235 1235                          mmapobj_unmap(mrp, i, num_elements, e_type);
1236 1236                          return (ret);
1237 1237                  }
1238 1238  
1239 1239                  /* Need to cleanup mrp to reflect the actual values used */
1240 1240                  mrp[i].mr_msize += dif;
1241 1241                  mrp[i].mr_offset = (size_t)addr & PAGEOFFSET;
1242 1242          }
1243 1243  
1244 1244          /* Also need to unmap any holes created above */
1245 1245          if (num_elements == 1) {
1246 1246                  MOBJ_STAT_ADD(map_elf_no_holes);
1247 1247                  return (0);
1248 1248          }
1249 1249          if (e_type == ET_EXEC) {
1250 1250                  return (0);
1251 1251          }
1252 1252  
1253 1253          as_rangelock(as);
1254 1254          lo = start_addr;
1255 1255          hi = mrp[0].mr_addr;
1256 1256  
1257 1257          /* Remove holes made by the rest of the segments */
1258 1258          for (i = 0; i < num_elements - 1; i++) {
1259 1259                  lo = (caddr_t)P2ROUNDUP((size_t)(mrp[i].mr_addr) +
1260 1260                      mrp[i].mr_msize, PAGESIZE);
1261 1261                  hi = mrp[i + 1].mr_addr;
1262 1262                  if (lo < hi) {
1263 1263                          /*
1264 1264                           * If as_unmap fails we just use up a bit of extra
1265 1265                           * space
1266 1266                           */
1267 1267                          (void) as_unmap(as, (caddr_t)lo,
1268 1268                              (size_t)hi - (size_t)lo);
1269 1269                          MOBJ_STAT_ADD(unmap_hole);
1270 1270                  }
1271 1271          }
1272 1272          as_rangeunlock(as);
1273 1273  
1274 1274          return (0);
1275 1275  }
1276 1276  
1277 1277  /* Ugly hack to get STRUCT_* macros to work below */
1278 1278  struct myphdr {
1279 1279          Phdr            x;      /* native version */
1280 1280  };
1281 1281  
1282 1282  struct myphdr32 {
1283 1283          Elf32_Phdr      x;
1284 1284  };
1285 1285  
1286 1286  /*
1287 1287   * Calculate and return the number of loadable segments in the ELF Phdr
1288 1288   * represented by phdrbase as well as the len of the total mapping and
1289 1289   * the max alignment that is needed for a given segment.  On success,
1290 1290   * 0 is returned, and *len, *loadable and *align have been filled out.
1291 1291   * On failure, errno will be returned, which in this case is ENOTSUP
1292 1292   * if we were passed an ELF file with overlapping segments.
1293 1293   */
1294 1294  static int
1295 1295  calc_loadable(Ehdr *ehdrp, caddr_t phdrbase, int nphdrs, size_t *len,
1296 1296      int *loadable, size_t *align)
1297 1297  {
1298 1298          int i;
1299 1299          int hsize;
1300 1300          model_t model;
1301 1301          ushort_t e_type = ehdrp->e_type;        /* same offset 32 and 64 bit */
1302 1302          uint_t p_type;
1303 1303          offset_t p_offset;
1304 1304          size_t p_memsz;
1305 1305          size_t p_align;
1306 1306          caddr_t vaddr;
1307 1307          int num_segs = 0;
1308 1308          caddr_t start_addr = NULL;
1309 1309          caddr_t p_end = NULL;
1310 1310          size_t max_align = 0;
1311 1311          size_t min_align = PAGESIZE;    /* needed for vmem_xalloc */
1312 1312          STRUCT_HANDLE(myphdr, mph);
1313 1313  #if defined(__sparc)
1314 1314          extern int vac_size;
1315 1315  
1316 1316          /*
1317 1317           * Want to prevent aliasing by making the start address at least be
1318 1318           * aligned to vac_size.
1319 1319           */
1320 1320          min_align = MAX(PAGESIZE, vac_size);
1321 1321  #endif
1322 1322  
1323 1323          model = get_udatamodel();
1324 1324          STRUCT_SET_HANDLE(mph, model, (struct myphdr *)phdrbase);
1325 1325  
1326 1326          /* hsize alignment should have been checked before calling this func */
1327 1327          if (model == DATAMODEL_LP64) {
1328 1328                  hsize = ehdrp->e_phentsize;
1329 1329                  if (hsize & 7) {
1330 1330                          return (ENOTSUP);
1331 1331                  }
1332 1332          } else {
1333 1333                  ASSERT(model == DATAMODEL_ILP32);
1334 1334                  hsize = ((Elf32_Ehdr *)ehdrp)->e_phentsize;
1335 1335                  if (hsize & 3) {
1336 1336                          return (ENOTSUP);
1337 1337                  }
1338 1338          }
1339 1339  
1340 1340          /*
1341 1341           * Determine the span of all loadable segments and calculate the
1342 1342           * number of loadable segments.
1343 1343           */
1344 1344          for (i = 0; i < nphdrs; i++) {
1345 1345                  p_type = STRUCT_FGET(mph, x.p_type);
1346 1346                  if (p_type == PT_LOAD || p_type == PT_SUNWBSS) {
1347 1347                          vaddr = (caddr_t)(uintptr_t)STRUCT_FGET(mph, x.p_vaddr);
1348 1348                          p_memsz = STRUCT_FGET(mph, x.p_memsz);
1349 1349  
1350 1350                          /*
1351 1351                           * Skip this header if it requests no memory to be
1352 1352                           * mapped.
1353 1353                           */
1354 1354                          if (p_memsz == 0) {
1355 1355                                  STRUCT_SET_HANDLE(mph, model,
1356 1356                                      (struct myphdr *)((size_t)STRUCT_BUF(mph) +
1357 1357                                      hsize));
1358 1358                                  MOBJ_STAT_ADD(nomem_header);
1359 1359                                  continue;
1360 1360                          }
1361 1361                          if (num_segs++ == 0) {
1362 1362                                  /*
1363 1363                                   * While ELF doesn't specify the meaning of
1364 1364                                   * p_vaddr for PT_LOAD segments in ET_DYN
1365 1365                                   * objects, we mandate that is either NULL or
1366 1366                                   * (to accommodate some historical binaries)
1367 1367                                   * within the first page.  (Note that there
1368 1368                                   * exist non-native ET_DYN objects that violate
1369 1369                                   * this constraint that we nonetheless must be
1370 1370                                   * able to execute; see the ET_DYN handling in
1371 1371                                   * mapelfexec() for details.)
1372 1372                                   */
1373 1373                                  if (e_type == ET_DYN &&
1374 1374                                      ((caddr_t)((uintptr_t)vaddr &
1375 1375                                      (uintptr_t)PAGEMASK) != NULL)) {
1376 1376                                          MOBJ_STAT_ADD(inval_header);
1377 1377                                          return (ENOTSUP);
1378 1378                                  }
1379 1379                                  start_addr = vaddr;
1380 1380                                  /*
1381 1381                                   * For the first segment, we need to map from
1382 1382                                   * the beginning of the file, so we will
1383 1383                                   * adjust the size of the mapping to include
1384 1384                                   * this memory.
1385 1385                                   */
1386 1386                                  p_offset = STRUCT_FGET(mph, x.p_offset);
1387 1387                          } else {
1388 1388                                  p_offset = 0;
1389 1389                          }
1390 1390                          /*
1391 1391                           * Check to make sure that this mapping wouldn't
1392 1392                           * overlap a previous mapping.
1393 1393                           */
1394 1394                          if (vaddr < p_end) {
1395 1395                                  MOBJ_STAT_ADD(overlap_header);
1396 1396                                  return (ENOTSUP);
1397 1397                          }
1398 1398  
1399 1399                          p_end = vaddr + p_memsz + p_offset;
1400 1400                          p_end = (caddr_t)P2ROUNDUP((size_t)p_end, PAGESIZE);
1401 1401  
1402 1402                          p_align = STRUCT_FGET(mph, x.p_align);
1403 1403                          if (p_align > 1 && p_align > max_align) {
1404 1404                                  max_align = p_align;
1405 1405                                  if (max_align < min_align) {
1406 1406                                          max_align = min_align;
1407 1407                                          MOBJ_STAT_ADD(min_align);
1408 1408                                  }
1409 1409                          }
1410 1410                  }
1411 1411                  STRUCT_SET_HANDLE(mph, model,
1412 1412                      (struct myphdr *)((size_t)STRUCT_BUF(mph) + hsize));
1413 1413          }
1414 1414  
1415 1415          /*
1416 1416           * The alignment should be a power of 2, if it isn't we forgive it
1417 1417           * and round up.  On overflow, we'll set the alignment to max_align
1418 1418           * rounded down to the nearest power of 2.
1419 1419           */
1420 1420          if (max_align > 0 && !ISP2(max_align)) {
1421 1421                  MOBJ_STAT_ADD(np2_align);
1422 1422                  *align = 2 * (1L << (highbit(max_align) - 1));
1423 1423                  if (*align < max_align ||
1424 1424                      (*align > UINT_MAX && model == DATAMODEL_ILP32)) {
1425 1425                          MOBJ_STAT_ADD(np2_align_overflow);
1426 1426                          *align = 1L << (highbit(max_align) - 1);
1427 1427                  }
1428 1428          } else {
1429 1429                  *align = max_align;
1430 1430          }
1431 1431  
1432 1432          ASSERT(*align >= PAGESIZE || *align == 0);
1433 1433  
1434 1434          *loadable = num_segs;
1435 1435          *len = p_end - start_addr;
1436 1436          return (0);
1437 1437  }
1438 1438  
1439 1439  /*
1440 1440   * Check the address space to see if the virtual addresses to be used are
1441 1441   * available.  If they are not, return errno for failure.  On success, 0
1442 1442   * will be returned, and the virtual addresses for each mmapobj_result_t
1443 1443   * will be reserved.  Note that a reservation could have earlier been made
1444 1444   * for a given segment via a /dev/null mapping.  If that is the case, then
1445 1445   * we can use that VA space for our mappings.
1446 1446   * Note: this function will only be used for ET_EXEC binaries.
1447 1447   */
1448 1448  int
1449 1449  check_exec_addrs(int loadable, mmapobj_result_t *mrp, caddr_t start_addr)
1450 1450  {
1451 1451          int i;
1452 1452          struct as *as = curproc->p_as;
1453 1453          struct segvn_crargs crargs = SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
1454 1454          int ret;
1455 1455          caddr_t myaddr;
1456 1456          size_t mylen;
1457 1457          struct seg *seg;
1458 1458  
1459 1459          /* No need to reserve swap space now since it will be reserved later */
1460 1460          crargs.flags |= MAP_NORESERVE;
1461 1461          as_rangelock(as);
1462 1462          for (i = 0; i < loadable; i++) {
1463 1463  
1464 1464                  myaddr = start_addr + (size_t)mrp[i].mr_addr;
1465 1465                  mylen = mrp[i].mr_msize;
1466 1466  
1467 1467                  /* See if there is a hole in the as for this range */
1468 1468                  if (as_gap(as, mylen, &myaddr, &mylen, 0, NULL) == 0) {
1469 1469                          ASSERT(myaddr == start_addr + (size_t)mrp[i].mr_addr);
1470 1470                          ASSERT(mylen == mrp[i].mr_msize);
1471 1471  
1472 1472  #ifdef DEBUG
1473 1473                          if (MR_GET_TYPE(mrp[i].mr_flags) == MR_PADDING) {
1474 1474                                  MOBJ_STAT_ADD(exec_padding);
1475 1475                          }
1476 1476  #endif
1477 1477                          ret = as_map(as, myaddr, mylen, segvn_create, &crargs);
1478 1478                          if (ret) {
1479 1479                                  as_rangeunlock(as);
1480 1480                                  mmapobj_unmap_exec(mrp, i, start_addr);
1481 1481                                  return (ret);
1482 1482                          }
1483 1483                  } else {
1484 1484                          /*
1485 1485                           * There is a mapping that exists in the range
1486 1486                           * so check to see if it was a "reservation"
1487 1487                           * from /dev/null.  The mapping is from
1488 1488                           * /dev/null if the mapping comes from
1489 1489                           * segdev and the type is neither MAP_SHARED
1490 1490                           * nor MAP_PRIVATE.
1491 1491                           */
1492 1492                          AS_LOCK_ENTER(as, RW_READER);
1493 1493                          seg = as_findseg(as, myaddr, 0);
1494 1494                          MOBJ_STAT_ADD(exec_addr_mapped);
1495 1495                          if (seg && seg->s_ops == &segdev_ops &&
1496 1496                              ((SEGOP_GETTYPE(seg, myaddr) &
1497 1497                              (MAP_SHARED | MAP_PRIVATE)) == 0) &&
1498 1498                              myaddr >= seg->s_base &&
1499 1499                              myaddr + mylen <=
1500 1500                              seg->s_base + seg->s_size) {
1501 1501                                  MOBJ_STAT_ADD(exec_addr_devnull);
1502 1502                                  AS_LOCK_EXIT(as);
1503 1503                                  (void) as_unmap(as, myaddr, mylen);
1504 1504                                  ret = as_map(as, myaddr, mylen, segvn_create,
1505 1505                                      &crargs);
1506 1506                                  mrp[i].mr_flags |= MR_RESV;
1507 1507                                  if (ret) {
1508 1508                                          as_rangeunlock(as);
1509 1509                                          /* Need to remap what we unmapped */
1510 1510                                          mmapobj_unmap_exec(mrp, i + 1,
1511 1511                                              start_addr);
1512 1512                                          return (ret);
1513 1513                                  }
1514 1514                          } else {
1515 1515                                  AS_LOCK_EXIT(as);
1516 1516                                  as_rangeunlock(as);
1517 1517                                  mmapobj_unmap_exec(mrp, i, start_addr);
1518 1518                                  MOBJ_STAT_ADD(exec_addr_in_use);
1519 1519                                  return (EADDRINUSE);
1520 1520                          }
1521 1521                  }
1522 1522          }
1523 1523          as_rangeunlock(as);
1524 1524          return (0);
1525 1525  }
1526 1526  
1527 1527  /*
1528 1528   * Walk through the ELF program headers and extract all useful information
1529 1529   * for PT_LOAD and PT_SUNWBSS segments into mrp.
1530 1530   * Return 0 on success or error on failure.
1531 1531   */
1532 1532  static int
1533 1533  process_phdr(Ehdr *ehdrp, caddr_t phdrbase, int nphdrs, mmapobj_result_t *mrp,
1534 1534      vnode_t *vp, uint_t *num_mapped, size_t padding, cred_t *fcred)
1535 1535  {
1536 1536          int i;
1537 1537          caddr_t start_addr = NULL;
1538 1538          caddr_t vaddr;
1539 1539          size_t len = 0;
1540 1540          size_t lib_len = 0;
1541 1541          int ret;
1542 1542          int prot;
1543 1543          struct lib_va *lvp = NULL;
1544 1544          vattr_t vattr;
1545 1545          struct as *as = curproc->p_as;
1546 1546          int error;
1547 1547          int loadable = 0;
1548 1548          int current = 0;
1549 1549          int use_lib_va = 1;
1550 1550          size_t align = 0;
1551 1551          size_t add_pad = 0;
1552 1552          int hdr_seen = 0;
1553 1553          ushort_t e_type = ehdrp->e_type;        /* same offset 32 and 64 bit */
1554 1554          uint_t p_type;
1555 1555          offset_t p_offset;
1556 1556          size_t p_memsz;
1557 1557          size_t p_filesz;
1558 1558          uint_t p_flags;
1559 1559          int hsize;
1560 1560          model_t model;
1561 1561          STRUCT_HANDLE(myphdr, mph);
1562 1562  
1563 1563          model = get_udatamodel();
1564 1564          STRUCT_SET_HANDLE(mph, model, (struct myphdr *)phdrbase);
1565 1565  
1566 1566          /*
1567 1567           * Need to make sure that hsize is aligned properly.
1568 1568           * For 32bit processes, 4 byte alignment is required.
1569 1569           * For 64bit processes, 8 byte alignment is required.
1570 1570           * If the alignment isn't correct, we need to return failure
1571 1571           * since it could cause an alignment error panic while walking
1572 1572           * the phdr array.
1573 1573           */
1574 1574          if (model == DATAMODEL_LP64) {
1575 1575                  hsize = ehdrp->e_phentsize;
1576 1576                  if (hsize & 7) {
1577 1577                          MOBJ_STAT_ADD(phent_align64);
1578 1578                          return (ENOTSUP);
1579 1579                  }
1580 1580          } else {
1581 1581                  ASSERT(model == DATAMODEL_ILP32);
1582 1582                  hsize = ((Elf32_Ehdr *)ehdrp)->e_phentsize;
1583 1583                  if (hsize & 3) {
1584 1584                          MOBJ_STAT_ADD(phent_align32);
1585 1585                          return (ENOTSUP);
1586 1586                  }
1587 1587          }
1588 1588  
1589 1589          if (padding != 0) {
1590 1590                  use_lib_va = 0;
1591 1591          }
1592 1592          if (e_type == ET_DYN) {
1593 1593                  vattr.va_mask = AT_FSID | AT_NODEID | AT_CTIME | AT_MTIME;
1594 1594                  error = VOP_GETATTR(vp, &vattr, 0, fcred, NULL);
1595 1595                  if (error) {
1596 1596                          return (error);
1597 1597                  }
1598 1598                  /* Check to see if we already have a description for this lib */
1599 1599                  lvp = lib_va_find(&vattr);
1600 1600  
1601 1601                  if (lvp != NULL) {
1602 1602                          MOBJ_STAT_ADD(lvp_found);
1603 1603                          if (use_lib_va) {
1604 1604                                  start_addr = mmapobj_lookup_start_addr(lvp);
1605 1605                                  if (start_addr == NULL) {
1606 1606                                          lib_va_release(lvp);
1607 1607                                          return (ENOMEM);
1608 1608                                  }
1609 1609                          }
1610 1610  
1611 1611                          /*
1612 1612                           * loadable may be zero if the original allocator
1613 1613                           * of lvp hasn't finished setting it up but the rest
1614 1614                           * of the fields will be accurate.
1615 1615                           */
1616 1616                          loadable = lvp->lv_num_segs;
1617 1617                          len = lvp->lv_len;
1618 1618                          align = lvp->lv_align;
1619 1619                  }
1620 1620          }
1621 1621  
1622 1622          /*
1623 1623           * Determine the span of all loadable segments and calculate the
1624 1624           * number of loadable segments, the total len spanned by the mappings
1625 1625           * and the max alignment, if we didn't get them above.
1626 1626           */
1627 1627          if (loadable == 0) {
1628 1628                  MOBJ_STAT_ADD(no_loadable_yet);
1629 1629                  ret = calc_loadable(ehdrp, phdrbase, nphdrs, &len,
1630 1630                      &loadable, &align);
1631 1631                  if (ret != 0) {
1632 1632                          /*
1633 1633                           * Since it'd be an invalid file, we shouldn't have
1634 1634                           * cached it previously.
1635 1635                           */
1636 1636                          ASSERT(lvp == NULL);
1637 1637                          return (ret);
1638 1638                  }
1639 1639  #ifdef DEBUG
1640 1640                  if (lvp) {
1641 1641                          ASSERT(len == lvp->lv_len);
1642 1642                          ASSERT(align == lvp->lv_align);
1643 1643                  }
1644 1644  #endif
1645 1645          }
1646 1646  
1647 1647          /* Make sure there's something to map. */
1648 1648          if (len == 0 || loadable == 0) {
1649 1649                  /*
1650 1650                   * Since it'd be an invalid file, we shouldn't have
1651 1651                   * cached it previously.
1652 1652                   */
1653 1653                  ASSERT(lvp == NULL);
1654 1654                  MOBJ_STAT_ADD(nothing_to_map);
1655 1655                  return (ENOTSUP);
1656 1656          }
1657 1657  
1658 1658          lib_len = len;
1659 1659          if (padding != 0) {
1660 1660                  loadable += 2;
1661 1661          }
1662 1662          if (loadable > *num_mapped) {
1663 1663                  *num_mapped = loadable;
1664 1664                  /* cleanup previous reservation */
1665 1665                  if (start_addr) {
1666 1666                          (void) as_unmap(as, start_addr, lib_len);
1667 1667                  }
1668 1668                  MOBJ_STAT_ADD(e2big);
1669 1669                  if (lvp) {
1670 1670                          lib_va_release(lvp);
1671 1671                  }
1672 1672                  return (E2BIG);
1673 1673          }
1674 1674  
1675 1675          /*
1676 1676           * We now know the size of the object to map and now we need to
1677 1677           * get the start address to map it at.  It's possible we already
1678 1678           * have it if we found all the info we need in the lib_va cache.
1679 1679           */
1680 1680          if (e_type == ET_DYN && start_addr == NULL) {
1681 1681                  /*
1682 1682                   * Need to make sure padding does not throw off
1683 1683                   * required alignment.  We can only specify an
1684 1684                   * alignment for the starting address to be mapped,
1685 1685                   * so we round padding up to the alignment and map
1686 1686                   * from there and then throw out the extra later.
1687 1687                   */
1688 1688                  if (padding != 0) {
1689 1689                          if (align > 1) {
1690 1690                                  add_pad = P2ROUNDUP(padding, align);
1691 1691                                  len += add_pad;
1692 1692                                  MOBJ_STAT_ADD(dyn_pad_align);
1693 1693                          } else {
1694 1694                                  MOBJ_STAT_ADD(dyn_pad_noalign);
1695 1695                                  len += padding; /* at beginning */
1696 1696                          }
1697 1697                          len += padding; /* at end of mapping */
1698 1698                  }
1699 1699                  /*
1700 1700                   * At this point, if lvp is non-NULL, then above we
1701 1701                   * already found it in the cache but did not get
1702 1702                   * the start address since we were not going to use lib_va.
1703 1703                   * Since we know that lib_va will not be used, it's safe
1704 1704                   * to call mmapobj_alloc_start_addr and know that lvp
1705 1705                   * will not be modified.
1706 1706                   */
1707 1707                  ASSERT(lvp ? use_lib_va == 0 : 1);
1708 1708                  start_addr = mmapobj_alloc_start_addr(&lvp, len,
1709 1709                      use_lib_va, align, &vattr);
1710 1710                  if (start_addr == NULL) {
1711 1711                          if (lvp) {
1712 1712                                  lib_va_release(lvp);
1713 1713                          }
1714 1714                          MOBJ_STAT_ADD(alloc_start_fail);
1715 1715                          return (ENOMEM);
1716 1716                  }
1717 1717                  /*
1718 1718                   * If we can't cache it, no need to hang on to it.
1719 1719                   * Setting lv_num_segs to non-zero will make that
1720 1720                   * field active and since there are too many segments
1721 1721                   * to cache, all future users will not try to use lv_mps.
1722 1722                   */
1723 1723                  if (lvp != NULL && loadable > LIBVA_CACHED_SEGS && use_lib_va) {
1724 1724                          lvp->lv_num_segs = loadable;
1725 1725                          lib_va_release(lvp);
1726 1726                          lvp = NULL;
1727 1727                          MOBJ_STAT_ADD(lvp_nocache);
1728 1728                  }
1729 1729                  /*
1730 1730                   * Free the beginning of the mapping if the padding
1731 1731                   * was not aligned correctly.
1732 1732                   */
1733 1733                  if (padding != 0 && add_pad != padding) {
1734 1734                          (void) as_unmap(as, start_addr,
1735 1735                              add_pad - padding);
1736 1736                          start_addr += (add_pad - padding);
1737 1737                          MOBJ_STAT_ADD(extra_padding);
1738 1738                  }
1739 1739          }
1740 1740  
1741 1741          /*
1742 1742           * At this point, we have reserved the virtual address space
1743 1743           * for our mappings.  Now we need to start filling out the mrp
1744 1744           * array to describe all of the individual mappings we are going
1745 1745           * to return.
1746 1746           * For ET_EXEC there has been no memory reservation since we are
1747 1747           * using fixed addresses.  While filling in the mrp array below,
1748 1748           * we will have the first segment biased to start at addr 0
1749 1749           * and the rest will be biased by this same amount.  Thus if there
1750 1750           * is padding, the first padding will start at addr 0, and the next
1751 1751           * segment will start at the value of padding.
1752 1752           */
1753 1753  
1754 1754          /* We'll fill out padding later, so start filling in mrp at index 1 */
1755 1755          if (padding != 0) {
1756 1756                  current = 1;
1757 1757          }
1758 1758  
1759 1759          /* If we have no more need for lvp let it go now */
1760 1760          if (lvp != NULL && use_lib_va == 0) {
1761 1761                  lib_va_release(lvp);
1762 1762                  MOBJ_STAT_ADD(lvp_not_needed);
1763 1763                  lvp = NULL;
1764 1764          }
1765 1765  
1766 1766          /* Now fill out the mrp structs from the program headers */
1767 1767          STRUCT_SET_HANDLE(mph, model, (struct myphdr *)phdrbase);
1768 1768          for (i = 0; i < nphdrs; i++) {
1769 1769                  p_type = STRUCT_FGET(mph, x.p_type);
1770 1770                  if (p_type == PT_LOAD || p_type == PT_SUNWBSS) {
1771 1771                          vaddr = (caddr_t)(uintptr_t)STRUCT_FGET(mph, x.p_vaddr);
1772 1772                          p_memsz = STRUCT_FGET(mph, x.p_memsz);
1773 1773                          p_filesz = STRUCT_FGET(mph, x.p_filesz);
1774 1774                          p_offset = STRUCT_FGET(mph, x.p_offset);
1775 1775                          p_flags = STRUCT_FGET(mph, x.p_flags);
1776 1776  
1777 1777                          /*
1778 1778                           * Skip this header if it requests no memory to be
1779 1779                           * mapped.
1780 1780                           */
1781 1781                          if (p_memsz == 0) {
1782 1782                                  STRUCT_SET_HANDLE(mph, model,
1783 1783                                      (struct myphdr *)((size_t)STRUCT_BUF(mph) +
1784 1784                                      hsize));
1785 1785                                  MOBJ_STAT_ADD(no_mem_map_sz);
1786 1786                                  continue;
1787 1787                          }
1788 1788  
1789 1789                          prot = 0;
1790 1790                          if (p_flags & PF_R)
1791 1791                                  prot |= PROT_READ;
1792 1792                          if (p_flags & PF_W)
1793 1793                                  prot |= PROT_WRITE;
1794 1794                          if (p_flags & PF_X)
1795 1795                                  prot |= PROT_EXEC;
1796 1796  
1797 1797                          ASSERT(current < loadable);
1798 1798                          mrp[current].mr_msize = p_memsz;
1799 1799                          mrp[current].mr_fsize = p_filesz;
1800 1800                          mrp[current].mr_offset = p_offset;
1801 1801                          mrp[current].mr_prot = prot;
1802 1802  
1803 1803                          if (hdr_seen == 0 && p_filesz != 0) {
1804 1804                                  mrp[current].mr_flags = MR_HDR_ELF;
1805 1805                                  /*
1806 1806                                   * We modify mr_offset because we
1807 1807                                   * need to map the ELF header as well, and if
1808 1808                                   * we didn't then the header could be left out
1809 1809                                   * of the mapping that we will create later.
1810 1810                                   * Since we're removing the offset, we need to
1811 1811                                   * account for that in the other fields as well
1812 1812                                   * since we will be mapping the memory from 0
1813 1813                                   * to p_offset.
1814 1814                                   */
1815 1815                                  if (e_type == ET_DYN) {
1816 1816                                          mrp[current].mr_offset = 0;
1817 1817                                          mrp[current].mr_msize += p_offset;
1818 1818                                          mrp[current].mr_fsize += p_offset;
1819 1819                                  } else {
1820 1820                                          ASSERT(e_type == ET_EXEC);
1821 1821                                          /*
1822 1822                                           * Save off the start addr which will be
1823 1823                                           * our bias for the rest of the
1824 1824                                           * ET_EXEC mappings.
1825 1825                                           */
1826 1826                                          start_addr = vaddr - padding;
1827 1827                                  }
1828 1828                                  mrp[current].mr_addr = (caddr_t)padding;
1829 1829                                  hdr_seen = 1;
1830 1830                          } else {
1831 1831                                  if (e_type == ET_EXEC) {
1832 1832                                          /* bias mr_addr */
1833 1833                                          mrp[current].mr_addr =
1834 1834                                              vaddr - (size_t)start_addr;
1835 1835                                  } else {
1836 1836                                          mrp[current].mr_addr = vaddr + padding;
1837 1837                                  }
1838 1838                                  mrp[current].mr_flags = 0;
1839 1839                          }
1840 1840                          current++;
1841 1841                  }
1842 1842  
1843 1843                  /* Move to next phdr */
1844 1844                  STRUCT_SET_HANDLE(mph, model,
1845 1845                      (struct myphdr *)((size_t)STRUCT_BUF(mph) +
1846 1846                      hsize));
1847 1847          }
1848 1848  
1849 1849          /* Now fill out the padding segments */
1850 1850          if (padding != 0) {
1851 1851                  mrp[0].mr_addr = NULL;
1852 1852                  mrp[0].mr_msize = padding;
1853 1853                  mrp[0].mr_fsize = 0;
1854 1854                  mrp[0].mr_offset = 0;
1855 1855                  mrp[0].mr_prot = 0;
1856 1856                  mrp[0].mr_flags = MR_PADDING;
1857 1857  
1858 1858                  /* Setup padding for the last segment */
1859 1859                  ASSERT(current == loadable - 1);
1860 1860                  mrp[current].mr_addr = (caddr_t)lib_len + padding;
1861 1861                  mrp[current].mr_msize = padding;
1862 1862                  mrp[current].mr_fsize = 0;
1863 1863                  mrp[current].mr_offset = 0;
1864 1864                  mrp[current].mr_prot = 0;
1865 1865                  mrp[current].mr_flags = MR_PADDING;
1866 1866          }
1867 1867  
1868 1868          /*
1869 1869           * Need to make sure address ranges desired are not in use or
1870 1870           * are previously allocated reservations from /dev/null.  For
1871 1871           * ET_DYN, we already made sure our address range was free.
1872 1872           */
1873 1873          if (e_type == ET_EXEC) {
1874 1874                  ret = check_exec_addrs(loadable, mrp, start_addr);
1875 1875                  if (ret != 0) {
1876 1876                          ASSERT(lvp == NULL);
1877 1877                          MOBJ_STAT_ADD(check_exec_failed);
1878 1878                          return (ret);
1879 1879                  }
1880 1880          }
1881 1881  
1882 1882          /* Finish up our business with lvp. */
1883 1883          if (lvp) {
1884 1884                  ASSERT(e_type == ET_DYN);
1885 1885                  if (lvp->lv_num_segs == 0 && loadable <= LIBVA_CACHED_SEGS) {
1886 1886                          bcopy(mrp, lvp->lv_mps,
1887 1887                              loadable * sizeof (mmapobj_result_t));
1888 1888                          membar_producer();
1889 1889                  }
1890 1890                  /*
1891 1891                   * Setting lv_num_segs to a non-zero value indicates that
1892 1892                   * lv_mps is now valid and can be used by other threads.
1893 1893                   * So, the above stores need to finish before lv_num_segs
1894 1894                   * is updated. lv_mps is only valid if lv_num_segs is
1895 1895                   * greater than LIBVA_CACHED_SEGS.
1896 1896                   */
1897 1897                  lvp->lv_num_segs = loadable;
1898 1898                  lib_va_release(lvp);
1899 1899                  MOBJ_STAT_ADD(lvp_used);
1900 1900          }
1901 1901  
1902 1902          /* Now that we have mrp completely filled out go map it */
1903 1903          ret = mmapobj_map_elf(vp, start_addr, mrp, loadable, fcred, e_type);
1904 1904          if (ret == 0) {
1905 1905                  *num_mapped = loadable;
1906 1906          }
1907 1907  
1908 1908          return (ret);
1909 1909  }
1910 1910  
1911 1911  /*
1912 1912   * Take the ELF file passed in, and do the work of mapping it.
1913 1913   * num_mapped in - # elements in user buffer
1914 1914   * num_mapped out - # sections mapped and length of mrp array if
1915 1915   *                      no errors.
1916 1916   */
1917 1917  static int
1918 1918  doelfwork(Ehdr *ehdrp, vnode_t *vp, mmapobj_result_t *mrp,
1919 1919      uint_t *num_mapped, size_t padding, cred_t *fcred)
1920 1920  {
1921 1921          int error;
1922 1922          offset_t phoff;
1923 1923          int nphdrs;
1924 1924          unsigned char ei_class;
1925 1925          unsigned short phentsize;
1926 1926          ssize_t phsizep;
1927 1927          caddr_t phbasep;
1928 1928          int to_map;
1929 1929          model_t model;
1930 1930  
1931 1931          ei_class = ehdrp->e_ident[EI_CLASS];
1932 1932          model = get_udatamodel();
1933 1933          if ((model == DATAMODEL_ILP32 && ei_class == ELFCLASS64) ||
1934 1934              (model == DATAMODEL_LP64 && ei_class == ELFCLASS32)) {
1935 1935                  MOBJ_STAT_ADD(wrong_model);
1936 1936                  return (ENOTSUP);
1937 1937          }
1938 1938  
1939 1939          /* Can't execute code from "noexec" mounted filesystem. */
1940 1940          if (ehdrp->e_type == ET_EXEC &&
1941 1941              (vp->v_vfsp->vfs_flag & VFS_NOEXEC) != 0) {
1942 1942                  MOBJ_STAT_ADD(noexec_fs);
1943 1943                  return (EACCES);
1944 1944          }
1945 1945  
1946 1946          /*
1947 1947           * Relocatable and core files are mapped as a single flat file
1948 1948           * since no interpretation is done on them by mmapobj.
1949 1949           */
1950 1950          if (ehdrp->e_type == ET_REL || ehdrp->e_type == ET_CORE) {
1951 1951                  to_map = padding ? 3 : 1;
1952 1952                  if (*num_mapped < to_map) {
1953 1953                          *num_mapped = to_map;
1954 1954                          MOBJ_STAT_ADD(e2big_et_rel);
1955 1955                          return (E2BIG);
1956 1956                  }
1957 1957                  error = mmapobj_map_flat(vp, mrp, padding, fcred);
1958 1958                  if (error == 0) {
1959 1959                          *num_mapped = to_map;
1960 1960                          mrp[padding ? 1 : 0].mr_flags = MR_HDR_ELF;
1961 1961                          MOBJ_STAT_ADD(et_rel_mapped);
1962 1962                  }
1963 1963                  return (error);
1964 1964          }
1965 1965  
1966 1966          /* Check for an unknown ELF type */
1967 1967          if (ehdrp->e_type != ET_EXEC && ehdrp->e_type != ET_DYN) {
1968 1968                  MOBJ_STAT_ADD(unknown_elf_type);
1969 1969                  return (ENOTSUP);
1970 1970          }
1971 1971  
1972 1972          if (ei_class == ELFCLASS32) {
1973 1973                  Elf32_Ehdr *e32hdr = (Elf32_Ehdr *)ehdrp;
1974 1974                  ASSERT(model == DATAMODEL_ILP32);
1975 1975                  nphdrs = e32hdr->e_phnum;
1976 1976                  phentsize = e32hdr->e_phentsize;
1977 1977                  if (phentsize < sizeof (Elf32_Phdr)) {
1978 1978                          MOBJ_STAT_ADD(phent32_too_small);
1979 1979                          return (ENOTSUP);
1980 1980                  }
1981 1981                  phoff = e32hdr->e_phoff;
1982 1982          } else if (ei_class == ELFCLASS64) {
1983 1983                  Elf64_Ehdr *e64hdr = (Elf64_Ehdr *)ehdrp;
1984 1984                  ASSERT(model == DATAMODEL_LP64);
1985 1985                  nphdrs = e64hdr->e_phnum;
1986 1986                  phentsize = e64hdr->e_phentsize;
1987 1987                  if (phentsize < sizeof (Elf64_Phdr)) {
1988 1988                          MOBJ_STAT_ADD(phent64_too_small);
1989 1989                          return (ENOTSUP);
1990 1990                  }
1991 1991                  phoff = e64hdr->e_phoff;
1992 1992          } else {
1993 1993                  /* fallthrough case for an invalid ELF class */
1994 1994                  MOBJ_STAT_ADD(inval_elf_class);
1995 1995                  return (ENOTSUP);
1996 1996          }
1997 1997  
1998 1998          /*
1999 1999           * nphdrs should only have this value for core files which are handled
2000 2000           * above as a single mapping.  If other file types ever use this
2001 2001           * sentinel, then we'll add the support needed to handle this here.
2002 2002           */
2003 2003          if (nphdrs == PN_XNUM) {
2004 2004                  MOBJ_STAT_ADD(too_many_phdrs);
2005 2005                  return (ENOTSUP);
2006 2006          }
2007 2007  
2008 2008          phsizep = nphdrs * phentsize;
2009 2009  
2010 2010          if (phsizep == 0) {
2011 2011                  MOBJ_STAT_ADD(no_phsize);
2012 2012                  return (ENOTSUP);
2013 2013          }
2014 2014  
2015 2015          /* Make sure we only wait for memory if it's a reasonable request */
2016 2016          if (phsizep > mmapobj_alloc_threshold) {
2017 2017                  MOBJ_STAT_ADD(phsize_large);
2018 2018                  if ((phbasep = kmem_alloc(phsizep, KM_NOSLEEP)) == NULL) {
2019 2019                          MOBJ_STAT_ADD(phsize_xtralarge);
2020 2020                          return (ENOMEM);
2021 2021                  }
2022 2022          } else {
2023 2023                  phbasep = kmem_alloc(phsizep, KM_SLEEP);
2024 2024          }
2025 2025  
2026 2026          if ((error = vn_rdwr(UIO_READ, vp, phbasep, phsizep,
2027 2027              (offset_t)phoff, UIO_SYSSPACE, 0, (rlim64_t)0,
2028 2028              fcred, NULL)) != 0) {
2029 2029                  kmem_free(phbasep, phsizep);
2030 2030                  return (error);
2031 2031          }
2032 2032  
2033 2033          /* Now process the phdr's */
2034 2034          error = process_phdr(ehdrp, phbasep, nphdrs, mrp, vp, num_mapped,
2035 2035              padding, fcred);
2036 2036          kmem_free(phbasep, phsizep);
2037 2037          return (error);
2038 2038  }
2039 2039  
2040 2040  #if defined(__sparc)
2041 2041  /*
2042 2042   * Hack to support 64 bit kernels running AOUT 4.x programs.
2043 2043   * This is the sizeof (struct nlist) for a 32 bit kernel.
2044 2044   * Since AOUT programs are 32 bit only, they will never use the 64 bit
2045 2045   * sizeof (struct nlist) and thus creating a #define is the simplest
2046 2046   * way around this since this is a format which is not being updated.
2047 2047   * This will be used in the place of sizeof (struct nlist) below.
2048 2048   */
2049 2049  #define NLIST_SIZE      (0xC)
2050 2050  
2051 2051  static int
2052 2052  doaoutwork(vnode_t *vp, mmapobj_result_t *mrp,
2053 2053      uint_t *num_mapped, struct exec *hdr, cred_t *fcred)
2054 2054  {
2055 2055          int error;
2056 2056          size_t size;
2057 2057          size_t osize;
2058 2058          size_t nsize;   /* nlist size */
2059 2059          size_t msize;
2060 2060          size_t zfoddiff;
2061 2061          caddr_t addr;
2062 2062          caddr_t start_addr;
2063 2063          struct as *as = curproc->p_as;
2064 2064          int prot = PROT_USER | PROT_READ | PROT_EXEC;
2065 2065          uint_t mflag = MAP_PRIVATE | _MAP_LOW32;
2066 2066          offset_t off = 0;
2067 2067          int segnum = 0;
2068 2068          uint_t to_map;
2069 2069          int is_library = 0;
2070 2070          struct segvn_crargs crargs = SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
2071 2071  
2072 2072          /* Only 32bit apps supported by this file format */
2073 2073          if (get_udatamodel() != DATAMODEL_ILP32) {
2074 2074                  MOBJ_STAT_ADD(aout_64bit_try);
2075 2075                  return (ENOTSUP);
2076 2076          }
2077 2077  
2078 2078          /* Check to see if this is a library */
2079 2079          if (hdr->a_magic == ZMAGIC && hdr->a_entry < PAGESIZE) {
2080 2080                  is_library = 1;
2081 2081          }
2082 2082  
2083 2083          /* Can't execute code from "noexec" mounted filesystem. */
2084 2084          if (((vp->v_vfsp->vfs_flag & VFS_NOEXEC) != 0) && (is_library == 0)) {
2085 2085                  MOBJ_STAT_ADD(aout_noexec);
2086 2086                  return (EACCES);
2087 2087          }
2088 2088  
2089 2089          /*
2090 2090           * There are 2 ways to calculate the mapped size of executable:
2091 2091           * 1) rounded text size + data size + bss size.
2092 2092           * 2) starting offset for text + text size + data size + text relocation
2093 2093           *    size + data relocation size + room for nlist data structure.
2094 2094           *
2095 2095           * The larger of the two sizes will be used to map this binary.
2096 2096           */
2097 2097          osize = P2ROUNDUP(hdr->a_text, PAGESIZE) + hdr->a_data + hdr->a_bss;
2098 2098  
2099 2099          off = hdr->a_magic == ZMAGIC ? 0 : sizeof (struct exec);
2100 2100  
2101 2101          nsize = off + hdr->a_text + hdr->a_data + hdr->a_trsize +
2102 2102              hdr->a_drsize + NLIST_SIZE;
2103 2103  
2104 2104          size = MAX(osize, nsize);
2105 2105          if (size != nsize) {
2106 2106                  nsize = 0;
2107 2107          }
2108 2108  
2109 2109          /*
2110 2110           * 1 seg for text and 1 seg for initialized data.
2111 2111           * 1 seg for bss (if can't fit in leftover space of init data)
2112 2112           * 1 seg for nlist if needed.
2113 2113           */
2114 2114          to_map = 2 + (nsize ? 1 : 0) +
2115 2115              (hdr->a_bss > PAGESIZE - P2PHASE(hdr->a_data, PAGESIZE) ? 1 : 0);
2116 2116          if (*num_mapped < to_map) {
2117 2117                  *num_mapped = to_map;
2118 2118                  MOBJ_STAT_ADD(aout_e2big);
2119 2119                  return (E2BIG);
2120 2120          }
2121 2121  
2122 2122          /* Reserve address space for the whole mapping */
2123 2123          if (is_library) {
2124 2124                  /* We'll let VOP_MAP below pick our address for us */
2125 2125                  addr = NULL;
2126 2126                  MOBJ_STAT_ADD(aout_lib);
2127 2127          } else {
2128 2128                  /*
2129 2129                   * default start address for fixed binaries from AOUT 4.x
2130 2130                   * standard.
2131 2131                   */
2132 2132                  MOBJ_STAT_ADD(aout_fixed);
2133 2133                  mflag |= MAP_FIXED;
2134 2134                  addr = (caddr_t)0x2000;
2135 2135                  as_rangelock(as);
2136 2136                  if (as_gap(as, size, &addr, &size, 0, NULL) != 0) {
2137 2137                          as_rangeunlock(as);
2138 2138                          MOBJ_STAT_ADD(aout_addr_in_use);
2139 2139                          return (EADDRINUSE);
2140 2140                  }
2141 2141                  crargs.flags |= MAP_NORESERVE;
2142 2142                  error = as_map(as, addr, size, segvn_create, &crargs);
2143 2143                  ASSERT(addr == (caddr_t)0x2000);
2144 2144                  as_rangeunlock(as);
2145 2145          }
2146 2146  
2147 2147          start_addr = addr;
2148 2148          osize = size;
2149 2149  
2150 2150          /*
2151 2151           * Map as large as we need, backed by file, this will be text, and
2152 2152           * possibly the nlist segment.  We map over this mapping for bss and
2153 2153           * initialized data segments.
2154 2154           */
2155 2155          error = VOP_MAP(vp, off, as, &addr, size, prot, PROT_ALL,
2156 2156              mflag, fcred, NULL);
2157 2157          if (error) {
2158 2158                  if (!is_library) {
2159 2159                          (void) as_unmap(as, start_addr, osize);
2160 2160                  }
2161 2161                  return (error);
2162 2162          }
2163 2163  
2164 2164          /* pickup the value of start_addr and osize for libraries */
2165 2165          start_addr = addr;
2166 2166          osize = size;
2167 2167  
2168 2168          /*
2169 2169           * We have our initial reservation/allocation so we need to use fixed
2170 2170           * addresses from now on.
2171 2171           */
2172 2172          mflag |= MAP_FIXED;
2173 2173  
2174 2174          mrp[0].mr_addr = addr;
2175 2175          mrp[0].mr_msize = hdr->a_text;
2176 2176          mrp[0].mr_fsize = hdr->a_text;
2177 2177          mrp[0].mr_offset = 0;
2178 2178          mrp[0].mr_prot = PROT_READ | PROT_EXEC;
2179 2179          mrp[0].mr_flags = MR_HDR_AOUT;
2180 2180  
2181 2181  
2182 2182          /*
2183 2183           * Map initialized data. We are mapping over a portion of the
2184 2184           * previous mapping which will be unmapped in VOP_MAP below.
2185 2185           */
2186 2186          off = P2ROUNDUP((offset_t)(hdr->a_text), PAGESIZE);
2187 2187          msize = off;
2188 2188          addr += off;
2189 2189          size = hdr->a_data;
2190 2190          error = VOP_MAP(vp, off, as, &addr, size, PROT_ALL, PROT_ALL,
2191 2191              mflag, fcred, NULL);
2192 2192          if (error) {
2193 2193                  (void) as_unmap(as, start_addr, osize);
2194 2194                  return (error);
2195 2195          }
2196 2196          msize += size;
2197 2197          mrp[1].mr_addr = addr;
2198 2198          mrp[1].mr_msize = size;
2199 2199          mrp[1].mr_fsize = size;
2200 2200          mrp[1].mr_offset = 0;
2201 2201          mrp[1].mr_prot = PROT_READ | PROT_WRITE | PROT_EXEC;
2202 2202          mrp[1].mr_flags = 0;
2203 2203  
2204 2204          /* Need to zero out remainder of page */
2205 2205          addr += hdr->a_data;
2206 2206          zfoddiff = P2PHASE((size_t)addr, PAGESIZE);
2207 2207          if (zfoddiff) {
2208 2208                  label_t ljb;
2209 2209  
2210 2210                  MOBJ_STAT_ADD(aout_zfoddiff);
2211 2211                  zfoddiff = PAGESIZE - zfoddiff;
2212 2212                  if (on_fault(&ljb)) {
2213 2213                          no_fault();
2214 2214                          MOBJ_STAT_ADD(aout_uzero_fault);
2215 2215                          (void) as_unmap(as, start_addr, osize);
2216 2216                          return (EFAULT);
2217 2217                  }
2218 2218                  uzero(addr, zfoddiff);
2219 2219                  no_fault();
2220 2220          }
2221 2221          msize += zfoddiff;
2222 2222          segnum = 2;
2223 2223  
2224 2224          /* Map bss */
2225 2225          if (hdr->a_bss > zfoddiff) {
2226 2226                  struct segvn_crargs crargs =
2227 2227                      SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
2228 2228                  MOBJ_STAT_ADD(aout_map_bss);
2229 2229                  addr += zfoddiff;
2230 2230                  size = hdr->a_bss - zfoddiff;
2231 2231                  as_rangelock(as);
2232 2232                  (void) as_unmap(as, addr, size);
2233 2233                  error = as_map(as, addr, size, segvn_create, &crargs);
2234 2234                  as_rangeunlock(as);
2235 2235                  msize += size;
2236 2236  
2237 2237                  if (error) {
2238 2238                          MOBJ_STAT_ADD(aout_bss_fail);
2239 2239                          (void) as_unmap(as, start_addr, osize);
2240 2240                          return (error);
2241 2241                  }
2242 2242                  mrp[2].mr_addr = addr;
2243 2243                  mrp[2].mr_msize = size;
2244 2244                  mrp[2].mr_fsize = 0;
2245 2245                  mrp[2].mr_offset = 0;
2246 2246                  mrp[2].mr_prot = PROT_READ | PROT_WRITE | PROT_EXEC;
2247 2247                  mrp[2].mr_flags = 0;
2248 2248  
2249 2249                  addr += size;
2250 2250                  segnum = 3;
2251 2251          }
2252 2252  
2253 2253          /*
2254 2254           * If we have extra bits left over, we need to include that in how
2255 2255           * much we mapped to make sure the nlist logic is correct
2256 2256           */
2257 2257          msize = P2ROUNDUP(msize, PAGESIZE);
2258 2258  
2259 2259          if (nsize && msize < nsize) {
2260 2260                  MOBJ_STAT_ADD(aout_nlist);
2261 2261                  mrp[segnum].mr_addr = addr;
2262 2262                  mrp[segnum].mr_msize = nsize - msize;
2263 2263                  mrp[segnum].mr_fsize = 0;
2264 2264                  mrp[segnum].mr_offset = 0;
2265 2265                  mrp[segnum].mr_prot = PROT_READ | PROT_EXEC;
2266 2266                  mrp[segnum].mr_flags = 0;
2267 2267          }
2268 2268  
2269 2269          *num_mapped = to_map;
2270 2270          return (0);
2271 2271  }
2272 2272  #endif
2273 2273  
2274 2274  /*
2275 2275   * These are the two types of files that we can interpret and we want to read
2276 2276   * in enough info to cover both types when looking at the initial header.
2277 2277   */
2278 2278  #define MAX_HEADER_SIZE (MAX(sizeof (Ehdr), sizeof (struct exec)))
2279 2279  
2280 2280  /*
2281 2281   * Map vp passed in in an interpreted manner.  ELF and AOUT files will be
2282 2282   * interpreted and mapped appropriately for execution.
2283 2283   * num_mapped in - # elements in mrp
2284 2284   * num_mapped out - # sections mapped and length of mrp array if
2285 2285   *                  no errors or E2BIG returned.
2286 2286   *
2287 2287   * Returns 0 on success, errno value on failure.
2288 2288   */
2289 2289  static int
2290 2290  mmapobj_map_interpret(vnode_t *vp, mmapobj_result_t *mrp,
2291 2291      uint_t *num_mapped, size_t padding, cred_t *fcred)
2292 2292  {
2293 2293          int error = 0;
2294 2294          vattr_t vattr;
2295 2295          struct lib_va *lvp;
2296 2296          caddr_t start_addr;
2297 2297          model_t model;
2298 2298  
2299 2299          /*
2300 2300           * header has to be aligned to the native size of ulong_t in order
2301 2301           * to avoid an unaligned access when dereferencing the header as
2302 2302           * a ulong_t.  Thus we allocate our array on the stack of type
2303 2303           * ulong_t and then have header, which we dereference later as a char
2304 2304           * array point at lheader.
2305 2305           */
2306 2306          ulong_t lheader[(MAX_HEADER_SIZE / (sizeof (ulong_t))) + 1];
2307 2307          caddr_t header = (caddr_t)&lheader;
2308 2308  
2309 2309          vattr.va_mask = AT_FSID | AT_NODEID | AT_CTIME | AT_MTIME | AT_SIZE;
2310 2310          error = VOP_GETATTR(vp, &vattr, 0, fcred, NULL);
2311 2311          if (error) {
2312 2312                  return (error);
2313 2313          }
2314 2314  
2315 2315          /*
2316 2316           * Check lib_va to see if we already have a full description
2317 2317           * for this library.  This is the fast path and only used for
2318 2318           * ET_DYN ELF files (dynamic libraries).
2319 2319           */
2320 2320          if (padding == 0 && (lvp = lib_va_find(&vattr)) != NULL) {
2321 2321                  int num_segs;
2322 2322  
2323 2323                  model = get_udatamodel();
2324 2324                  if ((model == DATAMODEL_ILP32 &&
2325 2325                      lvp->lv_flags & LV_ELF64) ||
2326 2326                      (model == DATAMODEL_LP64 &&
2327 2327                      lvp->lv_flags & LV_ELF32)) {
2328 2328                          lib_va_release(lvp);
2329 2329                          MOBJ_STAT_ADD(fast_wrong_model);
2330 2330                          return (ENOTSUP);
2331 2331                  }
2332 2332                  num_segs = lvp->lv_num_segs;
2333 2333                  if (*num_mapped < num_segs) {
2334 2334                          *num_mapped = num_segs;
2335 2335                          lib_va_release(lvp);
2336 2336                          MOBJ_STAT_ADD(fast_e2big);
2337 2337                          return (E2BIG);
2338 2338                  }
2339 2339  
2340 2340                  /*
2341 2341                   * Check to see if we have all the mappable program headers
2342 2342                   * cached.
2343 2343                   */
2344 2344                  if (num_segs <= LIBVA_CACHED_SEGS && num_segs != 0) {
2345 2345                          MOBJ_STAT_ADD(fast);
2346 2346                          start_addr = mmapobj_lookup_start_addr(lvp);
2347 2347                          if (start_addr == NULL) {
2348 2348                                  lib_va_release(lvp);
2349 2349                                  return (ENOMEM);
2350 2350                          }
2351 2351  
2352 2352                          bcopy(lvp->lv_mps, mrp,
2353 2353                              num_segs * sizeof (mmapobj_result_t));
2354 2354  
2355 2355                          error = mmapobj_map_elf(vp, start_addr, mrp,
2356 2356                              num_segs, fcred, ET_DYN);
2357 2357  
2358 2358                          lib_va_release(lvp);
2359 2359                          if (error == 0) {
2360 2360                                  *num_mapped = num_segs;
2361 2361                                  MOBJ_STAT_ADD(fast_success);
2362 2362                          }
2363 2363                          return (error);
2364 2364                  }
2365 2365                  MOBJ_STAT_ADD(fast_not_now);
2366 2366  
2367 2367                  /* Release it for now since we'll look it up below */
2368 2368                  lib_va_release(lvp);
2369 2369          }
2370 2370  
2371 2371          /*
2372 2372           * Time to see if this is a file we can interpret.  If it's smaller
2373 2373           * than this, then we can't interpret it.
2374 2374           */
2375 2375          if (vattr.va_size < MAX_HEADER_SIZE) {
2376 2376                  MOBJ_STAT_ADD(small_file);
2377 2377                  return (ENOTSUP);
2378 2378          }
2379 2379  
2380 2380          if ((error = vn_rdwr(UIO_READ, vp, header, MAX_HEADER_SIZE, 0,
2381 2381              UIO_SYSSPACE, 0, (rlim64_t)0, fcred, NULL)) != 0) {
2382 2382                  MOBJ_STAT_ADD(read_error);
2383 2383                  return (error);
2384 2384          }
2385 2385  
2386 2386          /* Verify file type */
2387 2387          if (header[EI_MAG0] == ELFMAG0 && header[EI_MAG1] == ELFMAG1 &&
2388 2388              header[EI_MAG2] == ELFMAG2 && header[EI_MAG3] == ELFMAG3) {
2389 2389                  return (doelfwork((Ehdr *)lheader, vp, mrp, num_mapped,
2390 2390                      padding, fcred));
2391 2391          }
2392 2392  
2393 2393  #if defined(__sparc)
2394 2394          /* On sparc, check for 4.X AOUT format */
2395 2395          switch (((struct exec *)header)->a_magic) {
2396 2396          case OMAGIC:
2397 2397          case ZMAGIC:
2398 2398          case NMAGIC:
2399 2399                  return (doaoutwork(vp, mrp, num_mapped,
2400 2400                      (struct exec *)lheader, fcred));
2401 2401          }
2402 2402  #endif
2403 2403  
2404 2404          /* Unsupported type */
2405 2405          MOBJ_STAT_ADD(unsupported);
2406 2406          return (ENOTSUP);
2407 2407  }
2408 2408  
2409 2409  /*
2410 2410   * Given a vnode, map it as either a flat file or interpret it and map
2411 2411   * it according to the rules of the file type.
2412 2412   * *num_mapped will contain the size of the mmapobj_result_t array passed in.
2413 2413   * If padding is non-zero, the mappings will be padded by that amount
2414 2414   * rounded up to the nearest pagesize.
2415 2415   * If the mapping is successful, *num_mapped will contain the number of
2416 2416   * distinct mappings created, and mrp will point to the array of
2417 2417   * mmapobj_result_t's which describe these mappings.
2418 2418   *
2419 2419   * On error, -1 is returned and errno is set appropriately.
2420 2420   * A special error case will set errno to E2BIG when there are more than
2421 2421   * *num_mapped mappings to be created and *num_mapped will be set to the
2422 2422   * number of mappings needed.
2423 2423   */
2424 2424  int
2425 2425  mmapobj(vnode_t *vp, uint_t flags, mmapobj_result_t *mrp,
2426 2426      uint_t *num_mapped, size_t padding, cred_t *fcred)
2427 2427  {
2428 2428          int to_map;
2429 2429          int error = 0;
2430 2430  
2431 2431          ASSERT((padding & PAGEOFFSET) == 0);
2432 2432          ASSERT((flags & ~MMOBJ_ALL_FLAGS) == 0);
2433 2433          ASSERT(num_mapped != NULL);
2434 2434          ASSERT((flags & MMOBJ_PADDING) ? padding != 0 : padding == 0);
2435 2435  
2436 2436          if ((flags & MMOBJ_INTERPRET) == 0) {
2437 2437                  to_map = padding ? 3 : 1;
2438 2438                  if (*num_mapped < to_map) {
2439 2439                          *num_mapped = to_map;
2440 2440                          MOBJ_STAT_ADD(flat_e2big);
2441 2441                          return (E2BIG);
2442 2442                  }
2443 2443                  error = mmapobj_map_flat(vp, mrp, padding, fcred);
2444 2444  
2445 2445                  if (error) {
2446 2446                          return (error);
2447 2447                  }
2448 2448                  *num_mapped = to_map;
2449 2449                  return (0);
2450 2450          }
2451 2451  
2452 2452          error = mmapobj_map_interpret(vp, mrp, num_mapped, padding, fcred);
2453 2453          return (error);
2454 2454  }
  
    | 
      ↓ open down ↓ | 
    2454 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX