Print this page
    
NEX-18463 Parallel dump produces corrupted dump file
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
NEX-5177 backport illumos 6345 remove xhat support
Reviewed by: Kevin Crowe <kevin.crowe@nexenta.com>
6345 remove xhat support
Reviewed by: Dan McDonald <danmcd@omniti.com>
Reviewed by: Garrett D'Amore <garrett@damore.org>
Approved by: Hans Rosenfeld <rosenfeld@grumpf.hope-2000.org>
NEX-5164 backport illumos 6514 AS_* lock macros simplification
Reviewed by: Kevin Crowe <kevin.crowe@nexenta.com>
6514 AS_* lock macros simplification
Reviewed by: Piotr Jasiukajtis <estibi@me.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Albert Lee <trisk@omniti.com>
Approved by: Dan McDonald <danmcd@omniti.com>
re #13613 rb4516 Tunables needs volatile keyword
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/sfmmu/vm/hat_sfmmu.c
          +++ new/usr/src/uts/sfmmu/vm/hat_sfmmu.c
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  
    | 
      ↓ open down ↓ | 
    14 lines elided | 
    
      ↑ open up ↑ | 
  
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
  23   23   */
  24   24  /*
  25      - * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
       25 + * Copyright 2018 Nexenta Systems, Inc.  All rights reserved.
  26   26   * Copyright 2016 Gary Mills
  27   27   */
  28   28  
  29   29  /*
  30   30   * VM - Hardware Address Translation management for Spitfire MMU.
  31   31   *
  32   32   * This file implements the machine specific hardware translation
  33   33   * needed by the VM system.  The machine independent interface is
  34   34   * described in <vm/hat.h> while the machine dependent interface
  35   35   * and data structures are described in <vm/hat_sfmmu.h>.
  36   36   *
  37   37   * The hat layer manages the address translation hardware as a cache
  38   38   * driven by calls from the higher levels in the VM system.
  39   39   */
  40   40  
  41   41  #include <sys/types.h>
  42   42  #include <sys/kstat.h>
  43   43  #include <vm/hat.h>
  44   44  #include <vm/hat_sfmmu.h>
  45   45  #include <vm/page.h>
  46   46  #include <sys/pte.h>
  47   47  #include <sys/systm.h>
  48   48  #include <sys/mman.h>
  49   49  #include <sys/sysmacros.h>
  50   50  #include <sys/machparam.h>
  51   51  #include <sys/vtrace.h>
  52   52  #include <sys/kmem.h>
  53   53  #include <sys/mmu.h>
  54   54  #include <sys/cmn_err.h>
  55   55  #include <sys/cpu.h>
  56   56  #include <sys/cpuvar.h>
  57   57  #include <sys/debug.h>
  58   58  #include <sys/lgrp.h>
  59   59  #include <sys/archsystm.h>
  60   60  #include <sys/machsystm.h>
  61   61  #include <sys/vmsystm.h>
  62   62  #include <vm/as.h>
  63   63  #include <vm/seg.h>
  64   64  #include <vm/seg_kp.h>
  65   65  #include <vm/seg_kmem.h>
  66   66  #include <vm/seg_kpm.h>
  67   67  #include <vm/rm.h>
  68   68  #include <sys/t_lock.h>
  69   69  #include <sys/obpdefs.h>
  70   70  #include <sys/vm_machparam.h>
  71   71  #include <sys/var.h>
  72   72  #include <sys/trap.h>
  73   73  #include <sys/machtrap.h>
  74   74  #include <sys/scb.h>
  75   75  #include <sys/bitmap.h>
  76   76  #include <sys/machlock.h>
  77   77  #include <sys/membar.h>
  78   78  #include <sys/atomic.h>
  79   79  #include <sys/cpu_module.h>
  80   80  #include <sys/prom_debug.h>
  81   81  #include <sys/ksynch.h>
  82   82  #include <sys/mem_config.h>
  83   83  #include <sys/mem_cage.h>
  84   84  #include <vm/vm_dep.h>
  85   85  #include <sys/fpu/fpusystm.h>
  86   86  #include <vm/mach_kpm.h>
  87   87  #include <sys/callb.h>
  88   88  
  89   89  #ifdef  DEBUG
  90   90  #define SFMMU_VALIDATE_HMERID(hat, rid, saddr, len)                     \
  91   91          if (SFMMU_IS_SHMERID_VALID(rid)) {                              \
  92   92                  caddr_t _eaddr = (saddr) + (len);                       \
  93   93                  sf_srd_t *_srdp;                                        \
  94   94                  sf_region_t *_rgnp;                                     \
  95   95                  ASSERT((rid) < SFMMU_MAX_HME_REGIONS);                  \
  96   96                  ASSERT(SF_RGNMAP_TEST(hat->sfmmu_hmeregion_map, rid));  \
  97   97                  ASSERT((hat) != ksfmmup);                               \
  98   98                  _srdp = (hat)->sfmmu_srdp;                              \
  99   99                  ASSERT(_srdp != NULL);                                  \
 100  100                  ASSERT(_srdp->srd_refcnt != 0);                         \
 101  101                  _rgnp = _srdp->srd_hmergnp[(rid)];                      \
 102  102                  ASSERT(_rgnp != NULL && _rgnp->rgn_id == rid);          \
 103  103                  ASSERT(_rgnp->rgn_refcnt != 0);                         \
 104  104                  ASSERT(!(_rgnp->rgn_flags & SFMMU_REGION_FREE));        \
 105  105                  ASSERT((_rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) ==   \
 106  106                      SFMMU_REGION_HME);                                  \
 107  107                  ASSERT((saddr) >= _rgnp->rgn_saddr);                    \
 108  108                  ASSERT((saddr) < _rgnp->rgn_saddr + _rgnp->rgn_size);   \
 109  109                  ASSERT(_eaddr > _rgnp->rgn_saddr);                      \
 110  110                  ASSERT(_eaddr <= _rgnp->rgn_saddr + _rgnp->rgn_size);   \
 111  111          }
 112  112  
 113  113  #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid)              \
 114  114  {                                                                        \
 115  115                  caddr_t _hsva;                                           \
 116  116                  caddr_t _heva;                                           \
 117  117                  caddr_t _rsva;                                           \
 118  118                  caddr_t _reva;                                           \
 119  119                  int     _ttesz = get_hblk_ttesz(hmeblkp);                \
 120  120                  int     _flagtte;                                        \
 121  121                  ASSERT((srdp)->srd_refcnt != 0);                         \
 122  122                  ASSERT((rid) < SFMMU_MAX_HME_REGIONS);                   \
 123  123                  ASSERT((rgnp)->rgn_id == rid);                           \
 124  124                  ASSERT(!((rgnp)->rgn_flags & SFMMU_REGION_FREE));        \
 125  125                  ASSERT(((rgnp)->rgn_flags & SFMMU_REGION_TYPE_MASK) ==   \
 126  126                      SFMMU_REGION_HME);                                   \
 127  127                  ASSERT(_ttesz <= (rgnp)->rgn_pgszc);                     \
 128  128                  _hsva = (caddr_t)get_hblk_base(hmeblkp);                 \
 129  129                  _heva = get_hblk_endaddr(hmeblkp);                       \
 130  130                  _rsva = (caddr_t)P2ALIGN(                                \
 131  131                      (uintptr_t)(rgnp)->rgn_saddr, HBLK_MIN_BYTES);       \
 132  132                  _reva = (caddr_t)P2ROUNDUP(                              \
 133  133                      (uintptr_t)((rgnp)->rgn_saddr + (rgnp)->rgn_size),   \
 134  134                      HBLK_MIN_BYTES);                                     \
 135  135                  ASSERT(_hsva >= _rsva);                                  \
 136  136                  ASSERT(_hsva < _reva);                                   \
 137  137                  ASSERT(_heva > _rsva);                                   \
 138  138                  ASSERT(_heva <= _reva);                                  \
 139  139                  _flagtte = (_ttesz < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ :  \
 140  140                          _ttesz;                                          \
 141  141                  ASSERT(rgnp->rgn_hmeflags & (0x1 << _flagtte));          \
 142  142  }
 143  143  
 144  144  #else /* DEBUG */
 145  145  #define SFMMU_VALIDATE_HMERID(hat, rid, addr, len)
 146  146  #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid)
 147  147  #endif /* DEBUG */
 148  148  
 149  149  #if defined(SF_ERRATA_57)
 150  150  extern caddr_t errata57_limit;
 151  151  #endif
 152  152  
 153  153  #define HME8BLK_SZ_RND          ((roundup(HME8BLK_SZ, sizeof (int64_t))) /  \
 154  154                                  (sizeof (int64_t)))
 155  155  #define HBLK_RESERVE            ((struct hme_blk *)hblk_reserve)
 156  156  
 157  157  #define HBLK_RESERVE_CNT        128
 158  158  #define HBLK_RESERVE_MIN        20
 159  159  
 160  160  static struct hme_blk           *freehblkp;
 161  161  static kmutex_t                 freehblkp_lock;
 162  162  static int                      freehblkcnt;
 163  163  
 164  164  static int64_t                  hblk_reserve[HME8BLK_SZ_RND];
 165  165  static kmutex_t                 hblk_reserve_lock;
 166  166  static kthread_t                *hblk_reserve_thread;
 167  167  
 168  168  static nucleus_hblk8_info_t     nucleus_hblk8;
 169  169  static nucleus_hblk1_info_t     nucleus_hblk1;
 170  170  
 171  171  /*
 172  172   * Data to manage per-cpu hmeblk pending queues, hmeblks are queued here
 173  173   * after the initial phase of removing an hmeblk from the hash chain, see
 174  174   * the detailed comment in sfmmu_hblk_hash_rm() for further details.
 175  175   */
 176  176  static cpu_hme_pend_t           *cpu_hme_pend;
 177  177  static uint_t                   cpu_hme_pend_thresh;
 178  178  /*
 179  179   * SFMMU specific hat functions
 180  180   */
 181  181  void    hat_pagecachectl(struct page *, int);
 182  182  
 183  183  /* flags for hat_pagecachectl */
 184  184  #define HAT_CACHE       0x1
 185  185  #define HAT_UNCACHE     0x2
 186  186  #define HAT_TMPNC       0x4
 187  187  
 188  188  /*
 189  189   * Flag to allow the creation of non-cacheable translations
 190  190   * to system memory. It is off by default. At the moment this
 191  191   * flag is used by the ecache error injector. The error injector
 192  192   * will turn it on when creating such a translation then shut it
 193  193   * off when it's finished.
 194  194   */
 195  195  
 196  196  int     sfmmu_allow_nc_trans = 0;
 197  197  
 198  198  /*
 199  199   * Flag to disable large page support.
 200  200   *      value of 1 => disable all large pages.
 201  201   *      bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively.
 202  202   *
 203  203   * For example, use the value 0x4 to disable 512K pages.
 204  204   *
 205  205   */
 206  206  #define LARGE_PAGES_OFF         0x1
 207  207  
 208  208  /*
 209  209   * The disable_large_pages and disable_ism_large_pages variables control
 210  210   * hat_memload_array and the page sizes to be used by ISM and the kernel.
 211  211   *
 212  212   * The disable_auto_data_large_pages and disable_auto_text_large_pages variables
 213  213   * are only used to control which OOB pages to use at upper VM segment creation
 214  214   * time, and are set in hat_init_pagesizes and used in the map_pgsz* routines.
 215  215   * Their values may come from platform or CPU specific code to disable page
 216  216   * sizes that should not be used.
 217  217   *
 218  218   * WARNING: 512K pages are currently not supported for ISM/DISM.
 219  219   */
 220  220  uint_t  disable_large_pages = 0;
 221  221  uint_t  disable_ism_large_pages = (1 << TTE512K);
 222  222  uint_t  disable_auto_data_large_pages = 0;
 223  223  uint_t  disable_auto_text_large_pages = 0;
 224  224  
 225  225  /*
 226  226   * Private sfmmu data structures for hat management
 227  227   */
 228  228  static struct kmem_cache *sfmmuid_cache;
 229  229  static struct kmem_cache *mmuctxdom_cache;
 230  230  
 231  231  /*
 232  232   * Private sfmmu data structures for tsb management
 233  233   */
 234  234  static struct kmem_cache *sfmmu_tsbinfo_cache;
 235  235  static struct kmem_cache *sfmmu_tsb8k_cache;
 236  236  static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX];
 237  237  static vmem_t *kmem_bigtsb_arena;
 238  238  static vmem_t *kmem_tsb_arena;
 239  239  
 240  240  /*
 241  241   * sfmmu static variables for hmeblk resource management.
 242  242   */
 243  243  static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */
 244  244  static struct kmem_cache *sfmmu8_cache;
 245  245  static struct kmem_cache *sfmmu1_cache;
 246  246  static struct kmem_cache *pa_hment_cache;
 247  247  
 248  248  static kmutex_t         ism_mlist_lock; /* mutex for ism mapping list */
 249  249  /*
 250  250   * private data for ism
 251  251   */
 252  252  static struct kmem_cache *ism_blk_cache;
 253  253  static struct kmem_cache *ism_ment_cache;
 254  254  #define ISMID_STARTADDR NULL
 255  255  
 256  256  /*
 257  257   * Region management data structures and function declarations.
 258  258   */
 259  259  
 260  260  static void     sfmmu_leave_srd(sfmmu_t *);
 261  261  static int      sfmmu_srdcache_constructor(void *, void *, int);
 262  262  static void     sfmmu_srdcache_destructor(void *, void *);
 263  263  static int      sfmmu_rgncache_constructor(void *, void *, int);
 264  264  static void     sfmmu_rgncache_destructor(void *, void *);
 265  265  static int      sfrgnmap_isnull(sf_region_map_t *);
 266  266  static int      sfhmergnmap_isnull(sf_hmeregion_map_t *);
 267  267  static int      sfmmu_scdcache_constructor(void *, void *, int);
 268  268  static void     sfmmu_scdcache_destructor(void *, void *);
 269  269  static void     sfmmu_rgn_cb_noop(caddr_t, caddr_t, caddr_t,
 270  270      size_t, void *, u_offset_t);
 271  271  
 272  272  static uint_t srd_hashmask = SFMMU_MAX_SRD_BUCKETS - 1;
 273  273  static sf_srd_bucket_t *srd_buckets;
 274  274  static struct kmem_cache *srd_cache;
 275  275  static uint_t srd_rgn_hashmask = SFMMU_MAX_REGION_BUCKETS - 1;
 276  276  static struct kmem_cache *region_cache;
 277  277  static struct kmem_cache *scd_cache;
 278  278  
 279  279  #ifdef sun4v
 280  280  int use_bigtsb_arena = 1;
 281  281  #else
 282  282  int use_bigtsb_arena = 0;
 283  283  #endif
 284  284  
 285  285  /* External /etc/system tunable, for turning on&off the shctx support */
 286  286  int disable_shctx = 0;
 287  287  /* Internal variable, set by MD if the HW supports shctx feature */
 288  288  int shctx_on = 0;
 289  289  
 290  290  #ifdef DEBUG
 291  291  static void check_scd_sfmmu_list(sfmmu_t **, sfmmu_t *, int);
 292  292  #endif
 293  293  static void sfmmu_to_scd_list(sfmmu_t **, sfmmu_t *);
 294  294  static void sfmmu_from_scd_list(sfmmu_t **, sfmmu_t *);
 295  295  
 296  296  static sf_scd_t *sfmmu_alloc_scd(sf_srd_t *, sf_region_map_t *);
 297  297  static void sfmmu_find_scd(sfmmu_t *);
 298  298  static void sfmmu_join_scd(sf_scd_t *, sfmmu_t *);
 299  299  static void sfmmu_finish_join_scd(sfmmu_t *);
 300  300  static void sfmmu_leave_scd(sfmmu_t *, uchar_t);
 301  301  static void sfmmu_destroy_scd(sf_srd_t *, sf_scd_t *, sf_region_map_t *);
 302  302  static int sfmmu_alloc_scd_tsbs(sf_srd_t *, sf_scd_t *);
 303  303  static void sfmmu_free_scd_tsbs(sfmmu_t *);
 304  304  static void sfmmu_tsb_inv_ctx(sfmmu_t *);
 305  305  static int find_ism_rid(sfmmu_t *, sfmmu_t *, caddr_t, uint_t *);
 306  306  static void sfmmu_ism_hatflags(sfmmu_t *, int);
 307  307  static int sfmmu_srd_lock_held(sf_srd_t *);
 308  308  static void sfmmu_remove_scd(sf_scd_t **, sf_scd_t *);
 309  309  static void sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *);
 310  310  static void sfmmu_link_scd_to_regions(sf_srd_t *, sf_scd_t *);
 311  311  static void sfmmu_unlink_scd_from_regions(sf_srd_t *, sf_scd_t *);
 312  312  static void sfmmu_link_to_hmeregion(sfmmu_t *, sf_region_t *);
 313  313  static void sfmmu_unlink_from_hmeregion(sfmmu_t *, sf_region_t *);
 314  314  
 315  315  /*
 316  316   * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists,
 317  317   * HAT flags, synchronizing TLB/TSB coherency, and context management.
 318  318   * The lock is hashed on the sfmmup since the case where we need to lock
 319  319   * all processes is rare but does occur (e.g. we need to unload a shared
 320  320   * mapping from all processes using the mapping).  We have a lot of buckets,
 321  321   * and each slab of sfmmu_t's can use about a quarter of them, giving us
 322  322   * a fairly good distribution without wasting too much space and overhead
 323  323   * when we have to grab them all.
 324  324   */
 325  325  #define SFMMU_NUM_LOCK  128             /* must be power of two */
 326  326  hatlock_t       hat_lock[SFMMU_NUM_LOCK];
 327  327  
 328  328  /*
 329  329   * Hash algorithm optimized for a small number of slabs.
 330  330   *  7 is (highbit((sizeof sfmmu_t)) - 1)
 331  331   * This hash algorithm is based upon the knowledge that sfmmu_t's come from a
 332  332   * kmem_cache, and thus they will be sequential within that cache.  In
 333  333   * addition, each new slab will have a different "color" up to cache_maxcolor
 334  334   * which will skew the hashing for each successive slab which is allocated.
 335  335   * If the size of sfmmu_t changed to a larger size, this algorithm may need
 336  336   * to be revisited.
 337  337   */
 338  338  #define TSB_HASH_SHIFT_BITS (7)
 339  339  #define PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS)
 340  340  
 341  341  #ifdef DEBUG
 342  342  int tsb_hash_debug = 0;
 343  343  #define TSB_HASH(sfmmup)        \
 344  344          (tsb_hash_debug ? &hat_lock[0] : \
 345  345          &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)])
 346  346  #else   /* DEBUG */
 347  347  #define TSB_HASH(sfmmup)        &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]
 348  348  #endif  /* DEBUG */
 349  349  
 350  350  
 351  351  /* sfmmu_replace_tsb() return codes. */
 352  352  typedef enum tsb_replace_rc {
 353  353          TSB_SUCCESS,
 354  354          TSB_ALLOCFAIL,
 355  355          TSB_LOSTRACE,
 356  356          TSB_ALREADY_SWAPPED,
 357  357          TSB_CANTGROW
 358  358  } tsb_replace_rc_t;
 359  359  
 360  360  /*
 361  361   * Flags for TSB allocation routines.
 362  362   */
 363  363  #define TSB_ALLOC       0x01
 364  364  #define TSB_FORCEALLOC  0x02
 365  365  #define TSB_GROW        0x04
 366  366  #define TSB_SHRINK      0x08
 367  367  #define TSB_SWAPIN      0x10
 368  368  
 369  369  /*
 370  370   * Support for HAT callbacks.
 371  371   */
 372  372  #define SFMMU_MAX_RELOC_CALLBACKS       10
 373  373  int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS;
 374  374  static id_t sfmmu_cb_nextid = 0;
 375  375  static id_t sfmmu_tsb_cb_id;
 376  376  struct sfmmu_callback *sfmmu_cb_table;
 377  377  
 378  378  kmutex_t        kpr_mutex;
 379  379  kmutex_t        kpr_suspendlock;
 380  380  kthread_t       *kreloc_thread;
 381  381  
 382  382  /*
 383  383   * Enable VA->PA translation sanity checking on DEBUG kernels.
 384  384   * Disabled by default.  This is incompatible with some
 385  385   * drivers (error injector, RSM) so if it breaks you get
 386  386   * to keep both pieces.
 387  387   */
 388  388  int hat_check_vtop = 0;
 389  389  
 390  390  /*
 391  391   * Private sfmmu routines (prototypes)
 392  392   */
 393  393  static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t);
 394  394  static struct   hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t,
 395  395                          struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t,
 396  396                          uint_t);
 397  397  static caddr_t  sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t,
 398  398                          caddr_t, demap_range_t *, uint_t);
 399  399  static caddr_t  sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t,
 400  400                          caddr_t, int);
 401  401  static void     sfmmu_hblk_free(struct hme_blk **);
 402  402  static void     sfmmu_hblks_list_purge(struct hme_blk **, int);
 403  403  static uint_t   sfmmu_get_free_hblk(struct hme_blk **, uint_t);
 404  404  static uint_t   sfmmu_put_free_hblk(struct hme_blk *, uint_t);
 405  405  static struct hme_blk *sfmmu_hblk_steal(int);
 406  406  static int      sfmmu_steal_this_hblk(struct hmehash_bucket *,
 407  407                          struct hme_blk *, uint64_t, struct hme_blk *);
 408  408  static caddr_t  sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t);
 409  409  
 410  410  static void     hat_do_memload_array(struct hat *, caddr_t, size_t,
 411  411                      struct page **, uint_t, uint_t, uint_t);
 412  412  static void     hat_do_memload(struct hat *, caddr_t, struct page *,
 413  413                      uint_t, uint_t, uint_t);
 414  414  static void     sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **,
 415  415                      uint_t, uint_t, pgcnt_t, uint_t);
 416  416  void            sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *,
 417  417                          uint_t);
 418  418  static int      sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **,
 419  419                          uint_t, uint_t);
 420  420  static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *,
 421  421                                          caddr_t, int, uint_t);
 422  422  static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *,
 423  423                          struct hmehash_bucket *, caddr_t, uint_t, uint_t,
 424  424                          uint_t);
 425  425  static int      sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *,
 426  426                          caddr_t, page_t **, uint_t, uint_t);
 427  427  static void     sfmmu_tteload_release_hashbucket(struct hmehash_bucket *);
 428  428  
 429  429  static int      sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int);
 430  430  static pfn_t    sfmmu_uvatopfn(caddr_t, sfmmu_t *, tte_t *);
 431  431  void            sfmmu_memtte(tte_t *, pfn_t, uint_t, int);
 432  432  #ifdef VAC
 433  433  static void     sfmmu_vac_conflict(struct hat *, caddr_t, page_t *);
 434  434  static int      sfmmu_vacconflict_array(caddr_t, page_t *, int *);
 435  435  int     tst_tnc(page_t *pp, pgcnt_t);
 436  436  void    conv_tnc(page_t *pp, int);
 437  437  #endif
 438  438  
 439  439  static void     sfmmu_get_ctx(sfmmu_t *);
 440  440  static void     sfmmu_free_sfmmu(sfmmu_t *);
 441  441  
 442  442  static void     sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *);
 443  443  static void     sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int);
 444  444  
 445  445  cpuset_t        sfmmu_pageunload(page_t *, struct sf_hment *, int);
 446  446  static void     hat_pagereload(struct page *, struct page *);
 447  447  static cpuset_t sfmmu_pagesync(page_t *, struct sf_hment *, uint_t);
 448  448  #ifdef VAC
 449  449  void    sfmmu_page_cache_array(page_t *, int, int, pgcnt_t);
 450  450  static void     sfmmu_page_cache(page_t *, int, int, int);
 451  451  #endif
 452  452  
 453  453  cpuset_t        sfmmu_rgntlb_demap(caddr_t, sf_region_t *,
 454  454      struct hme_blk *, int);
 455  455  static void     sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
 456  456                          pfn_t, int, int, int, int);
 457  457  static void     sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
 458  458                          pfn_t, int);
 459  459  static void     sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int);
 460  460  static void     sfmmu_tlb_range_demap(demap_range_t *);
 461  461  static void     sfmmu_invalidate_ctx(sfmmu_t *);
 462  462  static void     sfmmu_sync_mmustate(sfmmu_t *);
 463  463  
 464  464  static void     sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t);
 465  465  static int      sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t,
 466  466                          sfmmu_t *);
 467  467  static void     sfmmu_tsb_free(struct tsb_info *);
 468  468  static void     sfmmu_tsbinfo_free(struct tsb_info *);
 469  469  static int      sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t,
 470  470                          sfmmu_t *);
 471  471  static void     sfmmu_tsb_chk_reloc(sfmmu_t *, hatlock_t *);
 472  472  static void     sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *);
 473  473  static int      sfmmu_select_tsb_szc(pgcnt_t);
 474  474  static void     sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int);
 475  475  #define         sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \
 476  476          sfmmu_mod_tsb(sfmmup, vaddr, tte, szc)
 477  477  #define         sfmmu_unload_tsb(sfmmup, vaddr, szc)    \
 478  478          sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc)
 479  479  static void     sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *);
 480  480  static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t,
 481  481      hatlock_t *, uint_t);
 482  482  static void     sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int);
 483  483  
 484  484  #ifdef VAC
 485  485  void    sfmmu_cache_flush(pfn_t, int);
 486  486  void    sfmmu_cache_flushcolor(int, pfn_t);
 487  487  #endif
 488  488  static caddr_t  sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t,
 489  489                          caddr_t, demap_range_t *, uint_t, int);
 490  490  
 491  491  static uint64_t sfmmu_vtop_attr(uint_t, int mode, tte_t *);
 492  492  static uint_t   sfmmu_ptov_attr(tte_t *);
 493  493  static caddr_t  sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t,
 494  494                          caddr_t, demap_range_t *, uint_t);
 495  495  static uint_t   sfmmu_vtop_prot(uint_t, uint_t *);
 496  496  static int      sfmmu_idcache_constructor(void *, void *, int);
 497  497  static void     sfmmu_idcache_destructor(void *, void *);
 498  498  static int      sfmmu_hblkcache_constructor(void *, void *, int);
 499  499  static void     sfmmu_hblkcache_destructor(void *, void *);
 500  500  static void     sfmmu_hblkcache_reclaim(void *);
 501  501  static void     sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *,
 502  502                          struct hmehash_bucket *);
 503  503  static void     sfmmu_hblk_hash_rm(struct hmehash_bucket *, struct hme_blk *,
 504  504                          struct hme_blk *, struct hme_blk **, int);
 505  505  static void     sfmmu_hblk_hash_add(struct hmehash_bucket *, struct hme_blk *,
 506  506                          uint64_t);
 507  507  static struct hme_blk *sfmmu_check_pending_hblks(int);
 508  508  static void     sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int);
 509  509  static void     sfmmu_cleanup_rhblk(sf_srd_t *, caddr_t, uint_t, int);
 510  510  static void     sfmmu_unload_hmeregion_va(sf_srd_t *, uint_t, caddr_t, caddr_t,
 511  511                          int, caddr_t *);
 512  512  static void     sfmmu_unload_hmeregion(sf_srd_t *, sf_region_t *);
 513  513  
 514  514  static void     sfmmu_rm_large_mappings(page_t *, int);
 515  515  
 516  516  static void     hat_lock_init(void);
 517  517  static void     hat_kstat_init(void);
 518  518  static int      sfmmu_kstat_percpu_update(kstat_t *ksp, int rw);
 519  519  static void     sfmmu_set_scd_rttecnt(sf_srd_t *, sf_scd_t *);
 520  520  static  int     sfmmu_is_rgnva(sf_srd_t *, caddr_t, ulong_t, ulong_t);
 521  521  static void     sfmmu_check_page_sizes(sfmmu_t *, int);
 522  522  int     fnd_mapping_sz(page_t *);
 523  523  static void     iment_add(struct ism_ment *,  struct hat *);
 524  524  static void     iment_sub(struct ism_ment *, struct hat *);
 525  525  static pgcnt_t  ism_tsb_entries(sfmmu_t *, int szc);
 526  526  extern void     sfmmu_setup_tsbinfo(sfmmu_t *);
 527  527  extern void     sfmmu_clear_utsbinfo(void);
 528  528  
 529  529  static void             sfmmu_ctx_wrap_around(mmu_ctx_t *, boolean_t);
 530  530  
 531  531  extern int vpm_enable;
 532  532  
 533  533  /* kpm globals */
 534  534  #ifdef  DEBUG
 535  535  /*
 536  536   * Enable trap level tsbmiss handling
 537  537   */
 538  538  int     kpm_tsbmtl = 1;
 539  539  
 540  540  /*
 541  541   * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the
 542  542   * required TLB shootdowns in this case, so handle w/ care. Off by default.
 543  543   */
 544  544  int     kpm_tlb_flush;
 545  545  #endif  /* DEBUG */
 546  546  
 547  547  static void     *sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int);
 548  548  
 549  549  #ifdef DEBUG
 550  550  static void     sfmmu_check_hblk_flist();
 551  551  #endif
 552  552  
 553  553  /*
 554  554   * Semi-private sfmmu data structures.  Some of them are initialize in
 555  555   * startup or in hat_init. Some of them are private but accessed by
 556  556   * assembly code or mach_sfmmu.c
 557  557   */
 558  558  struct hmehash_bucket *uhme_hash;       /* user hmeblk hash table */
 559  559  struct hmehash_bucket *khme_hash;       /* kernel hmeblk hash table */
 560  560  uint64_t        uhme_hash_pa;           /* PA of uhme_hash */
 561  561  uint64_t        khme_hash_pa;           /* PA of khme_hash */
 562  562  int             uhmehash_num;           /* # of buckets in user hash table */
 563  563  int             khmehash_num;           /* # of buckets in kernel hash table */
 564  564  
 565  565  uint_t          max_mmu_ctxdoms = 0;    /* max context domains in the system */
 566  566  mmu_ctx_t       **mmu_ctxs_tbl;         /* global array of context domains */
 567  567  uint64_t        mmu_saved_gnum = 0;     /* to init incoming MMUs' gnums */
 568  568  
 569  569  #define DEFAULT_NUM_CTXS_PER_MMU 8192
 570  570  static uint_t   nctxs = DEFAULT_NUM_CTXS_PER_MMU;
 571  571  
 572  572  int             cache;                  /* describes system cache */
 573  573  
 574  574  caddr_t         ktsb_base;              /* kernel 8k-indexed tsb base address */
 575  575  uint64_t        ktsb_pbase;             /* kernel 8k-indexed tsb phys address */
 576  576  int             ktsb_szcode;            /* kernel 8k-indexed tsb size code */
 577  577  int             ktsb_sz;                /* kernel 8k-indexed tsb size */
 578  578  
 579  579  caddr_t         ktsb4m_base;            /* kernel 4m-indexed tsb base address */
 580  580  uint64_t        ktsb4m_pbase;           /* kernel 4m-indexed tsb phys address */
 581  581  int             ktsb4m_szcode;          /* kernel 4m-indexed tsb size code */
 582  582  int             ktsb4m_sz;              /* kernel 4m-indexed tsb size */
 583  583  
 584  584  uint64_t        kpm_tsbbase;            /* kernel seg_kpm 4M TSB base address */
 585  585  int             kpm_tsbsz;              /* kernel seg_kpm 4M TSB size code */
 586  586  uint64_t        kpmsm_tsbbase;          /* kernel seg_kpm 8K TSB base address */
 587  587  int             kpmsm_tsbsz;            /* kernel seg_kpm 8K TSB size code */
 588  588  
 589  589  #ifndef sun4v
 590  590  int             utsb_dtlb_ttenum = -1;  /* index in TLB for utsb locked TTE */
 591  591  int             utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */
 592  592  int             dtlb_resv_ttenum;       /* index in TLB of first reserved TTE */
 593  593  caddr_t         utsb_vabase;            /* reserved kernel virtual memory */
 594  594  caddr_t         utsb4m_vabase;          /* for trap handler TSB accesses */
 595  595  #endif /* sun4v */
 596  596  uint64_t        tsb_alloc_bytes = 0;    /* bytes allocated to TSBs */
 597  597  vmem_t          *kmem_tsb_default_arena[NLGRPS_MAX];    /* For dynamic TSBs */
 598  598  vmem_t          *kmem_bigtsb_default_arena[NLGRPS_MAX]; /* dynamic 256M TSBs */
 599  599  
 600  600  /*
 601  601   * Size to use for TSB slabs.  Future platforms that support page sizes
 602  602   * larger than 4M may wish to change these values, and provide their own
 603  603   * assembly macros for building and decoding the TSB base register contents.
 604  604   * Note disable_large_pages will override the value set here.
 605  605   */
 606  606  static  uint_t tsb_slab_ttesz = TTE4M;
 607  607  size_t  tsb_slab_size = MMU_PAGESIZE4M;
 608  608  uint_t  tsb_slab_shift = MMU_PAGESHIFT4M;
 609  609  /* PFN mask for TTE */
 610  610  size_t  tsb_slab_mask = MMU_PAGEOFFSET4M >> MMU_PAGESHIFT;
 611  611  
 612  612  /*
 613  613   * Size to use for TSB slabs.  These are used only when 256M tsb arenas
 614  614   * exist.
 615  615   */
 616  616  static uint_t   bigtsb_slab_ttesz = TTE256M;
 617  617  static size_t   bigtsb_slab_size = MMU_PAGESIZE256M;
 618  618  static uint_t   bigtsb_slab_shift = MMU_PAGESHIFT256M;
 619  619  /* 256M page alignment for 8K pfn */
 620  620  static size_t   bigtsb_slab_mask = MMU_PAGEOFFSET256M >> MMU_PAGESHIFT;
 621  621  
 622  622  /* largest TSB size to grow to, will be smaller on smaller memory systems */
 623  623  static int      tsb_max_growsize = 0;
 624  624  
 625  625  /*
 626  626   * Tunable parameters dealing with TSB policies.
 627  627   */
 628  628  
 629  629  /*
 630  630   * This undocumented tunable forces all 8K TSBs to be allocated from
 631  631   * the kernel heap rather than from the kmem_tsb_default_arena arenas.
 632  632   */
 633  633  #ifdef  DEBUG
 634  634  int     tsb_forceheap = 0;
 635  635  #endif  /* DEBUG */
 636  636  
 637  637  /*
 638  638   * Decide whether to use per-lgroup arenas, or one global set of
 639  639   * TSB arenas.  The default is not to break up per-lgroup, since
 640  640   * most platforms don't recognize any tangible benefit from it.
 641  641   */
 642  642  int     tsb_lgrp_affinity = 0;
 643  643  
 644  644  /*
 645  645   * Used for growing the TSB based on the process RSS.
 646  646   * tsb_rss_factor is based on the smallest TSB, and is
 647  647   * shifted by the TSB size to determine if we need to grow.
 648  648   * The default will grow the TSB if the number of TTEs for
 649  649   * this page size exceeds 75% of the number of TSB entries,
 650  650   * which should _almost_ eliminate all conflict misses
 651  651   * (at the expense of using up lots and lots of memory).
 652  652   */
  
    | 
      ↓ open down ↓ | 
    617 lines elided | 
    
      ↑ open up ↑ | 
  
 653  653  #define TSB_RSS_FACTOR          (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75)
 654  654  #define SFMMU_RSS_TSBSIZE(tsbszc)       (tsb_rss_factor << tsbszc)
 655  655  #define SELECT_TSB_SIZECODE(pgcnt) ( \
 656  656          (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \
 657  657          default_tsb_size)
 658  658  #define TSB_OK_SHRINK() \
 659  659          (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree)
 660  660  #define TSB_OK_GROW()   \
 661  661          (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree)
 662  662  
 663      -int     enable_tsb_rss_sizing = 1;
 664      -int     tsb_rss_factor  = (int)TSB_RSS_FACTOR;
      663 +volatile int    enable_tsb_rss_sizing = 1;
      664 +volatile int    tsb_rss_factor = (int)TSB_RSS_FACTOR;
 665  665  
 666  666  /* which TSB size code to use for new address spaces or if rss sizing off */
 667      -int default_tsb_size = TSB_8K_SZCODE;
      667 +volatile int default_tsb_size = TSB_8K_SZCODE;
 668  668  
 669  669  static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */
 670      -uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */
      670 +volatile uint64_t tsb_alloc_hiwater_factor;     /* tsb_alloc_hiwater =  */
      671 +                                                /*      physmem / this  */
 671  672  #define TSB_ALLOC_HIWATER_FACTOR_DEFAULT        32
 672  673  
 673  674  #ifdef DEBUG
 674  675  static int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */
 675  676  static int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */
 676  677  static int tsb_alloc_mtbf = 0;  /* fail allocation every n attempts */
 677  678  static int tsb_alloc_fail_mtbf = 0;
 678  679  static int tsb_alloc_count = 0;
 679  680  #endif /* DEBUG */
 680  681  
 681  682  /* if set to 1, will remap valid TTEs when growing TSB. */
 682  683  int tsb_remap_ttes = 1;
 683  684  
 684  685  /*
 685  686   * If we have more than this many mappings, allocate a second TSB.
 686  687   * This default is chosen because the I/D fully associative TLBs are
 687  688   * assumed to have at least 8 available entries. Platforms with a
 688  689   * larger fully-associative TLB could probably override the default.
 689  690   */
 690  691  
 691  692  #ifdef sun4v
 692  693  int tsb_sectsb_threshold = 0;
 693  694  #else
 694  695  int tsb_sectsb_threshold = 8;
 695  696  #endif
 696  697  
 697  698  /*
 698  699   * kstat data
 699  700   */
 700  701  struct sfmmu_global_stat sfmmu_global_stat;
 701  702  struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat;
 702  703  
 703  704  /*
 704  705   * Global data
 705  706   */
 706  707  sfmmu_t         *ksfmmup;               /* kernel's hat id */
 707  708  
 708  709  #ifdef DEBUG
 709  710  static void     chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *);
 710  711  #endif
 711  712  
 712  713  /* sfmmu locking operations */
 713  714  static kmutex_t *sfmmu_mlspl_enter(struct page *, int);
 714  715  static int      sfmmu_mlspl_held(struct page *, int);
 715  716  
 716  717  kmutex_t *sfmmu_page_enter(page_t *);
 717  718  void    sfmmu_page_exit(kmutex_t *);
 718  719  int     sfmmu_page_spl_held(struct page *);
 719  720  
 720  721  /* sfmmu internal locking operations - accessed directly */
 721  722  static void     sfmmu_mlist_reloc_enter(page_t *, page_t *,
 722  723                                  kmutex_t **, kmutex_t **);
 723  724  static void     sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *);
 724  725  static hatlock_t *
 725  726                  sfmmu_hat_enter(sfmmu_t *);
 726  727  static hatlock_t *
 727  728                  sfmmu_hat_tryenter(sfmmu_t *);
 728  729  static void     sfmmu_hat_exit(hatlock_t *);
 729  730  static void     sfmmu_hat_lock_all(void);
 730  731  static void     sfmmu_hat_unlock_all(void);
 731  732  static void     sfmmu_ismhat_enter(sfmmu_t *, int);
 732  733  static void     sfmmu_ismhat_exit(sfmmu_t *, int);
 733  734  
 734  735  kpm_hlk_t       *kpmp_table;
 735  736  uint_t          kpmp_table_sz;  /* must be a power of 2 */
 736  737  uchar_t         kpmp_shift;
 737  738  
 738  739  kpm_shlk_t      *kpmp_stable;
 739  740  uint_t          kpmp_stable_sz; /* must be a power of 2 */
 740  741  
 741  742  /*
 742  743   * SPL_TABLE_SIZE is 2 * NCPU, but no smaller than 128.
 743  744   * SPL_SHIFT is log2(SPL_TABLE_SIZE).
 744  745   */
 745  746  #if ((2*NCPU_P2) > 128)
 746  747  #define SPL_SHIFT       ((unsigned)(NCPU_LOG2 + 1))
 747  748  #else
 748  749  #define SPL_SHIFT       7U
 749  750  #endif
 750  751  #define SPL_TABLE_SIZE  (1U << SPL_SHIFT)
 751  752  #define SPL_MASK        (SPL_TABLE_SIZE - 1)
 752  753  
 753  754  /*
 754  755   * We shift by PP_SHIFT to take care of the low-order 0 bits of a page_t
 755  756   * and by multiples of SPL_SHIFT to get as many varied bits as we can.
 756  757   */
 757  758  #define SPL_INDEX(pp) \
 758  759          ((((uintptr_t)(pp) >> PP_SHIFT) ^ \
 759  760          ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT)) ^ \
 760  761          ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 2)) ^ \
 761  762          ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 3))) & \
 762  763          SPL_MASK)
 763  764  
 764  765  #define SPL_HASH(pp)    \
 765  766          (&sfmmu_page_lock[SPL_INDEX(pp)].pad_mutex)
 766  767  
 767  768  static  pad_mutex_t     sfmmu_page_lock[SPL_TABLE_SIZE];
 768  769  
 769  770  /* Array of mutexes protecting a page's mapping list and p_nrm field. */
 770  771  
 771  772  #define MML_TABLE_SIZE  SPL_TABLE_SIZE
 772  773  #define MLIST_HASH(pp)  (&mml_table[SPL_INDEX(pp)].pad_mutex)
 773  774  
 774  775  static pad_mutex_t      mml_table[MML_TABLE_SIZE];
 775  776  
 776  777  /*
 777  778   * hat_unload_callback() will group together callbacks in order
 778  779   * to avoid xt_sync() calls.  This is the maximum size of the group.
 779  780   */
 780  781  #define MAX_CB_ADDR     32
 781  782  
 782  783  tte_t   hw_tte;
 783  784  static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT;
 784  785  
 785  786  static char     *mmu_ctx_kstat_names[] = {
 786  787          "mmu_ctx_tsb_exceptions",
 787  788          "mmu_ctx_tsb_raise_exception",
 788  789          "mmu_ctx_wrap_around",
 789  790  };
 790  791  
 791  792  /*
 792  793   * Wrapper for vmem_xalloc since vmem_create only allows limited
 793  794   * parameters for vm_source_alloc functions.  This function allows us
 794  795   * to specify alignment consistent with the size of the object being
 795  796   * allocated.
 796  797   */
 797  798  static void *
 798  799  sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag)
 799  800  {
 800  801          return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag));
 801  802  }
 802  803  
 803  804  /* Common code for setting tsb_alloc_hiwater. */
 804  805  #define SFMMU_SET_TSB_ALLOC_HIWATER(pages)      tsb_alloc_hiwater = \
 805  806                  ptob(pages) / tsb_alloc_hiwater_factor
 806  807  
 807  808  /*
 808  809   * Set tsb_max_growsize to allow at most all of physical memory to be mapped by
 809  810   * a single TSB.  physmem is the number of physical pages so we need physmem 8K
 810  811   * TTEs to represent all those physical pages.  We round this up by using
 811  812   * 1<<highbit().  To figure out which size code to use, remember that the size
 812  813   * code is just an amount to shift the smallest TSB size to get the size of
 813  814   * this TSB.  So we subtract that size, TSB_START_SIZE, from highbit() (or
 814  815   * highbit() - 1) to get the size code for the smallest TSB that can represent
 815  816   * all of physical memory, while erring on the side of too much.
 816  817   *
 817  818   * Restrict tsb_max_growsize to make sure that:
 818  819   *      1) TSBs can't grow larger than the TSB slab size
 819  820   *      2) TSBs can't grow larger than UTSB_MAX_SZCODE.
 820  821   */
 821  822  #define SFMMU_SET_TSB_MAX_GROWSIZE(pages) {                             \
 822  823          int     _i, _szc, _slabszc, _tsbszc;                            \
 823  824                                                                          \
 824  825          _i = highbit(pages);                                            \
 825  826          if ((1 << (_i - 1)) == (pages))                                 \
 826  827                  _i--;           /* 2^n case, round down */              \
 827  828          _szc = _i - TSB_START_SIZE;                                     \
 828  829          _slabszc = bigtsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT); \
 829  830          _tsbszc = MIN(_szc, _slabszc);                                  \
 830  831          tsb_max_growsize = MIN(_tsbszc, UTSB_MAX_SZCODE);               \
 831  832  }
 832  833  
 833  834  /*
 834  835   * Given a pointer to an sfmmu and a TTE size code, return a pointer to the
 835  836   * tsb_info which handles that TTE size.
 836  837   */
 837  838  #define SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) {                  \
 838  839          (tsbinfop) = (sfmmup)->sfmmu_tsb;                               \
 839  840          ASSERT(((tsbinfop)->tsb_flags & TSB_SHAREDCTX) ||               \
 840  841              sfmmu_hat_lock_held(sfmmup));                               \
 841  842          if ((tte_szc) >= TTE4M) {                                       \
 842  843                  ASSERT((tsbinfop) != NULL);                             \
 843  844                  (tsbinfop) = (tsbinfop)->tsb_next;                      \
 844  845          }                                                               \
 845  846  }
 846  847  
 847  848  /*
 848  849   * Macro to use to unload entries from the TSB.
 849  850   * It has knowledge of which page sizes get replicated in the TSB
 850  851   * and will call the appropriate unload routine for the appropriate size.
 851  852   */
 852  853  #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, ismhat)         \
 853  854  {                                                                       \
 854  855          int ttesz = get_hblk_ttesz(hmeblkp);                            \
 855  856          if (ttesz == TTE8K || ttesz == TTE4M) {                         \
 856  857                  sfmmu_unload_tsb(sfmmup, addr, ttesz);                  \
 857  858          } else {                                                        \
 858  859                  caddr_t sva = ismhat ? addr :                           \
 859  860                      (caddr_t)get_hblk_base(hmeblkp);                    \
 860  861                  caddr_t eva = sva + get_hblk_span(hmeblkp);             \
 861  862                  ASSERT(addr >= sva && addr < eva);                      \
 862  863                  sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz);        \
 863  864          }                                                               \
 864  865  }
 865  866  
 866  867  
 867  868  /* Update tsb_alloc_hiwater after memory is configured. */
 868  869  /*ARGSUSED*/
 869  870  static void
 870  871  sfmmu_update_post_add(void *arg, pgcnt_t delta_pages)
 871  872  {
 872  873          /* Assumes physmem has already been updated. */
 873  874          SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
 874  875          SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
 875  876  }
 876  877  
 877  878  /*
 878  879   * Update tsb_alloc_hiwater before memory is deleted.  We'll do nothing here
 879  880   * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is
 880  881   * deleted.
 881  882   */
 882  883  /*ARGSUSED*/
 883  884  static int
 884  885  sfmmu_update_pre_del(void *arg, pgcnt_t delta_pages)
 885  886  {
 886  887          return (0);
 887  888  }
 888  889  
 889  890  /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */
 890  891  /*ARGSUSED*/
 891  892  static void
 892  893  sfmmu_update_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
 893  894  {
 894  895          /*
 895  896           * Whether the delete was cancelled or not, just go ahead and update
 896  897           * tsb_alloc_hiwater and tsb_max_growsize.
 897  898           */
 898  899          SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
 899  900          SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
 900  901  }
 901  902  
 902  903  static kphysm_setup_vector_t sfmmu_update_vec = {
 903  904          KPHYSM_SETUP_VECTOR_VERSION,    /* version */
 904  905          sfmmu_update_post_add,          /* post_add */
 905  906          sfmmu_update_pre_del,           /* pre_del */
 906  907          sfmmu_update_post_del           /* post_del */
 907  908  };
 908  909  
 909  910  
 910  911  /*
 911  912   * HME_BLK HASH PRIMITIVES
 912  913   */
 913  914  
 914  915  /*
 915  916   * Enter a hme on the mapping list for page pp.
 916  917   * When large pages are more prevalent in the system we might want to
 917  918   * keep the mapping list in ascending order by the hment size. For now,
 918  919   * small pages are more frequent, so don't slow it down.
 919  920   */
 920  921  #define HME_ADD(hme, pp)                                        \
 921  922  {                                                               \
 922  923          ASSERT(sfmmu_mlist_held(pp));                           \
 923  924                                                                  \
 924  925          hme->hme_prev = NULL;                                   \
 925  926          hme->hme_next = pp->p_mapping;                          \
 926  927          hme->hme_page = pp;                                     \
 927  928          if (pp->p_mapping) {                                    \
 928  929                  ((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\
 929  930                  ASSERT(pp->p_share > 0);                        \
 930  931          } else  {                                               \
 931  932                  /* EMPTY */                                     \
 932  933                  ASSERT(pp->p_share == 0);                       \
 933  934          }                                                       \
 934  935          pp->p_mapping = hme;                                    \
 935  936          pp->p_share++;                                          \
 936  937  }
 937  938  
 938  939  /*
 939  940   * Enter a hme on the mapping list for page pp.
 940  941   * If we are unmapping a large translation, we need to make sure that the
 941  942   * change is reflect in the corresponding bit of the p_index field.
 942  943   */
 943  944  #define HME_SUB(hme, pp)                                        \
 944  945  {                                                               \
 945  946          ASSERT(sfmmu_mlist_held(pp));                           \
 946  947          ASSERT(hme->hme_page == pp || IS_PAHME(hme));           \
 947  948                                                                  \
 948  949          if (pp->p_mapping == NULL) {                            \
 949  950                  panic("hme_remove - no mappings");              \
 950  951          }                                                       \
 951  952                                                                  \
 952  953          membar_stst();  /* ensure previous stores finish */     \
 953  954                                                                  \
 954  955          ASSERT(pp->p_share > 0);                                \
 955  956          pp->p_share--;                                          \
 956  957                                                                  \
 957  958          if (hme->hme_prev) {                                    \
 958  959                  ASSERT(pp->p_mapping != hme);                   \
 959  960                  ASSERT(hme->hme_prev->hme_page == pp ||         \
 960  961                          IS_PAHME(hme->hme_prev));               \
 961  962                  hme->hme_prev->hme_next = hme->hme_next;        \
 962  963          } else {                                                \
 963  964                  ASSERT(pp->p_mapping == hme);                   \
 964  965                  pp->p_mapping = hme->hme_next;                  \
 965  966                  ASSERT((pp->p_mapping == NULL) ?                \
 966  967                          (pp->p_share == 0) : 1);                \
 967  968          }                                                       \
 968  969                                                                  \
 969  970          if (hme->hme_next) {                                    \
 970  971                  ASSERT(hme->hme_next->hme_page == pp ||         \
 971  972                          IS_PAHME(hme->hme_next));               \
 972  973                  hme->hme_next->hme_prev = hme->hme_prev;        \
 973  974          }                                                       \
 974  975                                                                  \
 975  976          /* zero out the entry */                                \
 976  977          hme->hme_next = NULL;                                   \
 977  978          hme->hme_prev = NULL;                                   \
 978  979          hme->hme_page = NULL;                                   \
 979  980                                                                  \
 980  981          if (hme_size(hme) > TTE8K) {                            \
 981  982                  /* remove mappings for remainder of large pg */ \
 982  983                  sfmmu_rm_large_mappings(pp, hme_size(hme));     \
 983  984          }                                                       \
 984  985  }
 985  986  
 986  987  /*
 987  988   * This function returns the hment given the hme_blk and a vaddr.
 988  989   * It assumes addr has already been checked to belong to hme_blk's
 989  990   * range.
 990  991   */
 991  992  #define HBLKTOHME(hment, hmeblkp, addr)                                 \
 992  993  {                                                                       \
 993  994          int index;                                                      \
 994  995          HBLKTOHME_IDX(hment, hmeblkp, addr, index)                      \
 995  996  }
 996  997  
 997  998  /*
 998  999   * Version of HBLKTOHME that also returns the index in hmeblkp
 999 1000   * of the hment.
1000 1001   */
1001 1002  #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx)                        \
1002 1003  {                                                                       \
1003 1004          ASSERT(in_hblk_range((hmeblkp), (addr)));                       \
1004 1005                                                                          \
1005 1006          if (get_hblk_ttesz(hmeblkp) == TTE8K) {                         \
1006 1007                  idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \
1007 1008          } else                                                          \
1008 1009                  idx = 0;                                                \
1009 1010                                                                          \
1010 1011          (hment) = &(hmeblkp)->hblk_hme[idx];                            \
1011 1012  }
1012 1013  
1013 1014  /*
1014 1015   * Disable any page sizes not supported by the CPU
1015 1016   */
1016 1017  void
1017 1018  hat_init_pagesizes()
1018 1019  {
1019 1020          int             i;
1020 1021  
1021 1022          mmu_exported_page_sizes = 0;
1022 1023          for (i = TTE8K; i < max_mmu_page_sizes; i++) {
1023 1024  
1024 1025                  szc_2_userszc[i] = (uint_t)-1;
1025 1026                  userszc_2_szc[i] = (uint_t)-1;
1026 1027  
1027 1028                  if ((mmu_exported_pagesize_mask & (1 << i)) == 0) {
1028 1029                          disable_large_pages |= (1 << i);
1029 1030                  } else {
1030 1031                          szc_2_userszc[i] = mmu_exported_page_sizes;
1031 1032                          userszc_2_szc[mmu_exported_page_sizes] = i;
1032 1033                          mmu_exported_page_sizes++;
1033 1034                  }
1034 1035          }
1035 1036  
1036 1037          disable_ism_large_pages |= disable_large_pages;
1037 1038          disable_auto_data_large_pages = disable_large_pages;
1038 1039          disable_auto_text_large_pages = disable_large_pages;
1039 1040  
1040 1041          /*
1041 1042           * Initialize mmu-specific large page sizes.
1042 1043           */
1043 1044          if (&mmu_large_pages_disabled) {
1044 1045                  disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD);
1045 1046                  disable_ism_large_pages |=
1046 1047                      mmu_large_pages_disabled(HAT_LOAD_SHARE);
1047 1048                  disable_auto_data_large_pages |=
1048 1049                      mmu_large_pages_disabled(HAT_AUTO_DATA);
1049 1050                  disable_auto_text_large_pages |=
1050 1051                      mmu_large_pages_disabled(HAT_AUTO_TEXT);
1051 1052          }
1052 1053  }
1053 1054  
1054 1055  /*
1055 1056   * Initialize the hardware address translation structures.
1056 1057   */
1057 1058  void
1058 1059  hat_init(void)
1059 1060  {
1060 1061          int             i;
1061 1062          uint_t          sz;
1062 1063          size_t          size;
1063 1064  
1064 1065          hat_lock_init();
1065 1066          hat_kstat_init();
1066 1067  
1067 1068          /*
1068 1069           * Hardware-only bits in a TTE
1069 1070           */
1070 1071          MAKE_TTE_MASK(&hw_tte);
1071 1072  
1072 1073          hat_init_pagesizes();
1073 1074  
1074 1075          /* Initialize the hash locks */
1075 1076          for (i = 0; i < khmehash_num; i++) {
1076 1077                  mutex_init(&khme_hash[i].hmehash_mutex, NULL,
1077 1078                      MUTEX_DEFAULT, NULL);
1078 1079                  khme_hash[i].hmeh_nextpa = HMEBLK_ENDPA;
1079 1080          }
1080 1081          for (i = 0; i < uhmehash_num; i++) {
1081 1082                  mutex_init(&uhme_hash[i].hmehash_mutex, NULL,
1082 1083                      MUTEX_DEFAULT, NULL);
1083 1084                  uhme_hash[i].hmeh_nextpa = HMEBLK_ENDPA;
1084 1085          }
1085 1086          khmehash_num--;         /* make sure counter starts from 0 */
1086 1087          uhmehash_num--;         /* make sure counter starts from 0 */
1087 1088  
1088 1089          /*
1089 1090           * Allocate context domain structures.
1090 1091           *
1091 1092           * A platform may choose to modify max_mmu_ctxdoms in
1092 1093           * set_platform_defaults(). If a platform does not define
1093 1094           * a set_platform_defaults() or does not choose to modify
1094 1095           * max_mmu_ctxdoms, it gets one MMU context domain for every CPU.
1095 1096           *
1096 1097           * For all platforms that have CPUs sharing MMUs, this
1097 1098           * value must be defined.
1098 1099           */
1099 1100          if (max_mmu_ctxdoms == 0)
1100 1101                  max_mmu_ctxdoms = max_ncpus;
1101 1102  
1102 1103          size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *);
1103 1104          mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP);
1104 1105  
1105 1106          /* mmu_ctx_t is 64 bytes aligned */
1106 1107          mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache",
1107 1108              sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
1108 1109          /*
1109 1110           * MMU context domain initialization for the Boot CPU.
1110 1111           * This needs the context domains array allocated above.
1111 1112           */
1112 1113          mutex_enter(&cpu_lock);
1113 1114          sfmmu_cpu_init(CPU);
1114 1115          mutex_exit(&cpu_lock);
1115 1116  
1116 1117          /*
1117 1118           * Intialize ism mapping list lock.
1118 1119           */
1119 1120  
1120 1121          mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL);
1121 1122  
1122 1123          /*
1123 1124           * Each sfmmu structure carries an array of MMU context info
1124 1125           * structures, one per context domain. The size of this array depends
1125 1126           * on the maximum number of context domains. So, the size of the
1126 1127           * sfmmu structure varies per platform.
1127 1128           *
1128 1129           * sfmmu is allocated from static arena, because trap
1129 1130           * handler at TL > 0 is not allowed to touch kernel relocatable
1130 1131           * memory. sfmmu's alignment is changed to 64 bytes from
1131 1132           * default 8 bytes, as the lower 6 bits will be used to pass
1132 1133           * pgcnt to vtag_flush_pgcnt_tl1.
1133 1134           */
1134 1135          size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1);
1135 1136  
1136 1137          sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size,
1137 1138              64, sfmmu_idcache_constructor, sfmmu_idcache_destructor,
1138 1139              NULL, NULL, static_arena, 0);
1139 1140  
1140 1141          sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache",
1141 1142              sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0);
1142 1143  
1143 1144          /*
1144 1145           * Since we only use the tsb8k cache to "borrow" pages for TSBs
1145 1146           * from the heap when low on memory or when TSB_FORCEALLOC is
1146 1147           * specified, don't use magazines to cache them--we want to return
1147 1148           * them to the system as quickly as possible.
1148 1149           */
1149 1150          sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache",
1150 1151              MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL,
1151 1152              static_arena, KMC_NOMAGAZINE);
1152 1153  
1153 1154          /*
1154 1155           * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical
1155 1156           * memory, which corresponds to the old static reserve for TSBs.
1156 1157           * tsb_alloc_hiwater_factor defaults to 32.  This caps the amount of
1157 1158           * memory we'll allocate for TSB slabs; beyond this point TSB
1158 1159           * allocations will be taken from the kernel heap (via
1159 1160           * sfmmu_tsb8k_cache) and will be throttled as would any other kmem
1160 1161           * consumer.
1161 1162           */
1162 1163          if (tsb_alloc_hiwater_factor == 0) {
1163 1164                  tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT;
1164 1165          }
1165 1166          SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
1166 1167  
1167 1168          for (sz = tsb_slab_ttesz; sz > 0; sz--) {
1168 1169                  if (!(disable_large_pages & (1 << sz)))
1169 1170                          break;
1170 1171          }
1171 1172  
1172 1173          if (sz < tsb_slab_ttesz) {
1173 1174                  tsb_slab_ttesz = sz;
1174 1175                  tsb_slab_shift = MMU_PAGESHIFT + (sz << 1) + sz;
1175 1176                  tsb_slab_size = 1 << tsb_slab_shift;
1176 1177                  tsb_slab_mask = (1 << (tsb_slab_shift - MMU_PAGESHIFT)) - 1;
1177 1178                  use_bigtsb_arena = 0;
1178 1179          } else if (use_bigtsb_arena &&
1179 1180              (disable_large_pages & (1 << bigtsb_slab_ttesz))) {
1180 1181                  use_bigtsb_arena = 0;
1181 1182          }
1182 1183  
1183 1184          if (!use_bigtsb_arena) {
1184 1185                  bigtsb_slab_shift = tsb_slab_shift;
1185 1186          }
1186 1187          SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
1187 1188  
1188 1189          /*
1189 1190           * On smaller memory systems, allocate TSB memory in smaller chunks
1190 1191           * than the default 4M slab size. We also honor disable_large_pages
1191 1192           * here.
1192 1193           *
1193 1194           * The trap handlers need to be patched with the final slab shift,
1194 1195           * since they need to be able to construct the TSB pointer at runtime.
1195 1196           */
1196 1197          if ((tsb_max_growsize <= TSB_512K_SZCODE) &&
1197 1198              !(disable_large_pages & (1 << TTE512K))) {
1198 1199                  tsb_slab_ttesz = TTE512K;
1199 1200                  tsb_slab_shift = MMU_PAGESHIFT512K;
1200 1201                  tsb_slab_size = MMU_PAGESIZE512K;
1201 1202                  tsb_slab_mask = MMU_PAGEOFFSET512K >> MMU_PAGESHIFT;
1202 1203                  use_bigtsb_arena = 0;
1203 1204          }
1204 1205  
1205 1206          if (!use_bigtsb_arena) {
1206 1207                  bigtsb_slab_ttesz = tsb_slab_ttesz;
1207 1208                  bigtsb_slab_shift = tsb_slab_shift;
1208 1209                  bigtsb_slab_size = tsb_slab_size;
1209 1210                  bigtsb_slab_mask = tsb_slab_mask;
1210 1211          }
1211 1212  
1212 1213  
1213 1214          /*
1214 1215           * Set up memory callback to update tsb_alloc_hiwater and
1215 1216           * tsb_max_growsize.
1216 1217           */
1217 1218          i = kphysm_setup_func_register(&sfmmu_update_vec, (void *) 0);
1218 1219          ASSERT(i == 0);
1219 1220  
1220 1221          /*
1221 1222           * kmem_tsb_arena is the source from which large TSB slabs are
1222 1223           * drawn.  The quantum of this arena corresponds to the largest
1223 1224           * TSB size we can dynamically allocate for user processes.
1224 1225           * Currently it must also be a supported page size since we
1225 1226           * use exactly one translation entry to map each slab page.
1226 1227           *
1227 1228           * The per-lgroup kmem_tsb_default_arena arenas are the arenas from
1228 1229           * which most TSBs are allocated.  Since most TSB allocations are
1229 1230           * typically 8K we have a kmem cache we stack on top of each
1230 1231           * kmem_tsb_default_arena to speed up those allocations.
1231 1232           *
1232 1233           * Note the two-level scheme of arenas is required only
1233 1234           * because vmem_create doesn't allow us to specify alignment
1234 1235           * requirements.  If this ever changes the code could be
1235 1236           * simplified to use only one level of arenas.
1236 1237           *
1237 1238           * If 256M page support exists on sun4v, 256MB kmem_bigtsb_arena
1238 1239           * will be provided in addition to the 4M kmem_tsb_arena.
1239 1240           */
1240 1241          if (use_bigtsb_arena) {
1241 1242                  kmem_bigtsb_arena = vmem_create("kmem_bigtsb", NULL, 0,
1242 1243                      bigtsb_slab_size, sfmmu_vmem_xalloc_aligned_wrapper,
1243 1244                      vmem_xfree, heap_arena, 0, VM_SLEEP);
1244 1245          }
1245 1246  
1246 1247          kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size,
1247 1248              sfmmu_vmem_xalloc_aligned_wrapper,
1248 1249              vmem_xfree, heap_arena, 0, VM_SLEEP);
1249 1250  
1250 1251          if (tsb_lgrp_affinity) {
1251 1252                  char s[50];
1252 1253                  for (i = 0; i < NLGRPS_MAX; i++) {
1253 1254                          if (use_bigtsb_arena) {
1254 1255                                  (void) sprintf(s, "kmem_bigtsb_lgrp%d", i);
1255 1256                                  kmem_bigtsb_default_arena[i] = vmem_create(s,
1256 1257                                      NULL, 0, 2 * tsb_slab_size,
1257 1258                                      sfmmu_tsb_segkmem_alloc,
1258 1259                                      sfmmu_tsb_segkmem_free, kmem_bigtsb_arena,
1259 1260                                      0, VM_SLEEP | VM_BESTFIT);
1260 1261                          }
1261 1262  
1262 1263                          (void) sprintf(s, "kmem_tsb_lgrp%d", i);
1263 1264                          kmem_tsb_default_arena[i] = vmem_create(s,
1264 1265                              NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc,
1265 1266                              sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0,
1266 1267                              VM_SLEEP | VM_BESTFIT);
1267 1268  
1268 1269                          (void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i);
1269 1270                          sfmmu_tsb_cache[i] = kmem_cache_create(s,
1270 1271                              PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL,
1271 1272                              kmem_tsb_default_arena[i], 0);
1272 1273                  }
1273 1274          } else {
1274 1275                  if (use_bigtsb_arena) {
1275 1276                          kmem_bigtsb_default_arena[0] =
1276 1277                              vmem_create("kmem_bigtsb_default", NULL, 0,
1277 1278                              2 * tsb_slab_size, sfmmu_tsb_segkmem_alloc,
1278 1279                              sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 0,
1279 1280                              VM_SLEEP | VM_BESTFIT);
1280 1281                  }
1281 1282  
1282 1283                  kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default",
1283 1284                      NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc,
1284 1285                      sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0,
1285 1286                      VM_SLEEP | VM_BESTFIT);
1286 1287                  sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache",
1287 1288                      PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL,
1288 1289                      kmem_tsb_default_arena[0], 0);
1289 1290          }
1290 1291  
1291 1292          sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ,
1292 1293              HMEBLK_ALIGN, sfmmu_hblkcache_constructor,
1293 1294              sfmmu_hblkcache_destructor,
1294 1295              sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ,
1295 1296              hat_memload_arena, KMC_NOHASH);
1296 1297  
1297 1298          hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE,
1298 1299              segkmem_alloc_permanent, segkmem_free, heap_arena, 0,
1299 1300              VMC_DUMPSAFE | VM_SLEEP);
1300 1301  
1301 1302          sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ,
1302 1303              HMEBLK_ALIGN, sfmmu_hblkcache_constructor,
1303 1304              sfmmu_hblkcache_destructor,
1304 1305              NULL, (void *)HME1BLK_SZ,
1305 1306              hat_memload1_arena, KMC_NOHASH);
1306 1307  
1307 1308          pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ,
1308 1309              0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH);
1309 1310  
1310 1311          ism_blk_cache = kmem_cache_create("ism_blk_cache",
1311 1312              sizeof (ism_blk_t), ecache_alignsize, NULL, NULL,
1312 1313              NULL, NULL, static_arena, KMC_NOHASH);
1313 1314  
1314 1315          ism_ment_cache = kmem_cache_create("ism_ment_cache",
1315 1316              sizeof (ism_ment_t), 0, NULL, NULL,
1316 1317              NULL, NULL, NULL, 0);
1317 1318  
1318 1319          /*
1319 1320           * We grab the first hat for the kernel,
1320 1321           */
1321 1322          AS_LOCK_ENTER(&kas, RW_WRITER);
1322 1323          kas.a_hat = hat_alloc(&kas);
1323 1324          AS_LOCK_EXIT(&kas);
1324 1325  
1325 1326          /*
1326 1327           * Initialize hblk_reserve.
1327 1328           */
1328 1329          ((struct hme_blk *)hblk_reserve)->hblk_nextpa =
1329 1330              va_to_pa((caddr_t)hblk_reserve);
1330 1331  
1331 1332  #ifndef UTSB_PHYS
1332 1333          /*
1333 1334           * Reserve some kernel virtual address space for the locked TTEs
1334 1335           * that allow us to probe the TSB from TL>0.
1335 1336           */
1336 1337          utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
1337 1338              0, 0, NULL, NULL, VM_SLEEP);
1338 1339          utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
1339 1340              0, 0, NULL, NULL, VM_SLEEP);
1340 1341  #endif
1341 1342  
1342 1343  #ifdef VAC
1343 1344          /*
1344 1345           * The big page VAC handling code assumes VAC
1345 1346           * will not be bigger than the smallest big
1346 1347           * page- which is 64K.
1347 1348           */
1348 1349          if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) {
1349 1350                  cmn_err(CE_PANIC, "VAC too big!");
1350 1351          }
1351 1352  #endif
1352 1353  
1353 1354          uhme_hash_pa = va_to_pa(uhme_hash);
1354 1355          khme_hash_pa = va_to_pa(khme_hash);
1355 1356  
1356 1357          /*
1357 1358           * Initialize relocation locks. kpr_suspendlock is held
1358 1359           * at PIL_MAX to prevent interrupts from pinning the holder
1359 1360           * of a suspended TTE which may access it leading to a
1360 1361           * deadlock condition.
1361 1362           */
1362 1363          mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL);
1363 1364          mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX);
1364 1365  
1365 1366          /*
1366 1367           * If Shared context support is disabled via /etc/system
1367 1368           * set shctx_on to 0 here if it was set to 1 earlier in boot
1368 1369           * sequence by cpu module initialization code.
1369 1370           */
1370 1371          if (shctx_on && disable_shctx) {
1371 1372                  shctx_on = 0;
1372 1373          }
1373 1374  
1374 1375          if (shctx_on) {
1375 1376                  srd_buckets = kmem_zalloc(SFMMU_MAX_SRD_BUCKETS *
1376 1377                      sizeof (srd_buckets[0]), KM_SLEEP);
1377 1378                  for (i = 0; i < SFMMU_MAX_SRD_BUCKETS; i++) {
1378 1379                          mutex_init(&srd_buckets[i].srdb_lock, NULL,
1379 1380                              MUTEX_DEFAULT, NULL);
1380 1381                  }
1381 1382  
1382 1383                  srd_cache = kmem_cache_create("srd_cache", sizeof (sf_srd_t),
1383 1384                      0, sfmmu_srdcache_constructor, sfmmu_srdcache_destructor,
1384 1385                      NULL, NULL, NULL, 0);
1385 1386                  region_cache = kmem_cache_create("region_cache",
1386 1387                      sizeof (sf_region_t), 0, sfmmu_rgncache_constructor,
1387 1388                      sfmmu_rgncache_destructor, NULL, NULL, NULL, 0);
1388 1389                  scd_cache = kmem_cache_create("scd_cache", sizeof (sf_scd_t),
1389 1390                      0, sfmmu_scdcache_constructor,  sfmmu_scdcache_destructor,
1390 1391                      NULL, NULL, NULL, 0);
1391 1392          }
1392 1393  
1393 1394          /*
1394 1395           * Pre-allocate hrm_hashtab before enabling the collection of
1395 1396           * refmod statistics.  Allocating on the fly would mean us
1396 1397           * running the risk of suffering recursive mutex enters or
1397 1398           * deadlocks.
1398 1399           */
1399 1400          hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
1400 1401              KM_SLEEP);
1401 1402  
1402 1403          /* Allocate per-cpu pending freelist of hmeblks */
1403 1404          cpu_hme_pend = kmem_zalloc((NCPU * sizeof (cpu_hme_pend_t)) + 64,
1404 1405              KM_SLEEP);
1405 1406          cpu_hme_pend = (cpu_hme_pend_t *)P2ROUNDUP(
1406 1407              (uintptr_t)cpu_hme_pend, 64);
1407 1408  
1408 1409          for (i = 0; i < NCPU; i++) {
1409 1410                  mutex_init(&cpu_hme_pend[i].chp_mutex, NULL, MUTEX_DEFAULT,
1410 1411                      NULL);
1411 1412          }
1412 1413  
1413 1414          if (cpu_hme_pend_thresh == 0) {
1414 1415                  cpu_hme_pend_thresh = CPU_HME_PEND_THRESH;
1415 1416          }
1416 1417  }
1417 1418  
1418 1419  /*
1419 1420   * Initialize locking for the hat layer, called early during boot.
1420 1421   */
1421 1422  static void
1422 1423  hat_lock_init()
1423 1424  {
1424 1425          int i;
1425 1426  
1426 1427          /*
1427 1428           * initialize the array of mutexes protecting a page's mapping
1428 1429           * list and p_nrm field.
1429 1430           */
1430 1431          for (i = 0; i < MML_TABLE_SIZE; i++)
1431 1432                  mutex_init(&mml_table[i].pad_mutex, NULL, MUTEX_DEFAULT, NULL);
1432 1433  
1433 1434          if (kpm_enable) {
1434 1435                  for (i = 0; i < kpmp_table_sz; i++) {
1435 1436                          mutex_init(&kpmp_table[i].khl_mutex, NULL,
1436 1437                              MUTEX_DEFAULT, NULL);
1437 1438                  }
1438 1439          }
1439 1440  
1440 1441          /*
1441 1442           * Initialize array of mutex locks that protects sfmmu fields and
1442 1443           * TSB lists.
1443 1444           */
1444 1445          for (i = 0; i < SFMMU_NUM_LOCK; i++)
1445 1446                  mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT,
1446 1447                      NULL);
1447 1448  }
1448 1449  
1449 1450  #define SFMMU_KERNEL_MAXVA \
1450 1451          (kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT))
1451 1452  
1452 1453  /*
1453 1454   * Allocate a hat structure.
1454 1455   * Called when an address space first uses a hat.
1455 1456   */
1456 1457  struct hat *
1457 1458  hat_alloc(struct as *as)
1458 1459  {
1459 1460          sfmmu_t *sfmmup;
1460 1461          int i;
1461 1462          uint64_t cnum;
1462 1463          extern uint_t get_color_start(struct as *);
1463 1464  
1464 1465          ASSERT(AS_WRITE_HELD(as));
1465 1466          sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP);
1466 1467          sfmmup->sfmmu_as = as;
1467 1468          sfmmup->sfmmu_flags = 0;
1468 1469          sfmmup->sfmmu_tteflags = 0;
1469 1470          sfmmup->sfmmu_rtteflags = 0;
1470 1471          LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock);
1471 1472  
1472 1473          if (as == &kas) {
1473 1474                  ksfmmup = sfmmup;
1474 1475                  sfmmup->sfmmu_cext = 0;
1475 1476                  cnum = KCONTEXT;
1476 1477  
1477 1478                  sfmmup->sfmmu_clrstart = 0;
1478 1479                  sfmmup->sfmmu_tsb = NULL;
1479 1480                  /*
1480 1481                   * hat_kern_setup() will call sfmmu_init_ktsbinfo()
1481 1482                   * to setup tsb_info for ksfmmup.
1482 1483                   */
1483 1484          } else {
1484 1485  
1485 1486                  /*
1486 1487                   * Just set to invalid ctx. When it faults, it will
1487 1488                   * get a valid ctx. This would avoid the situation
1488 1489                   * where we get a ctx, but it gets stolen and then
1489 1490                   * we fault when we try to run and so have to get
1490 1491                   * another ctx.
1491 1492                   */
1492 1493                  sfmmup->sfmmu_cext = 0;
1493 1494                  cnum = INVALID_CONTEXT;
1494 1495  
1495 1496                  /* initialize original physical page coloring bin */
1496 1497                  sfmmup->sfmmu_clrstart = get_color_start(as);
1497 1498  #ifdef DEBUG
1498 1499                  if (tsb_random_size) {
1499 1500                          uint32_t randval = (uint32_t)gettick() >> 4;
1500 1501                          int size = randval % (tsb_max_growsize + 1);
1501 1502  
1502 1503                          /* chose a random tsb size for stress testing */
1503 1504                          (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size,
1504 1505                              TSB8K|TSB64K|TSB512K, 0, sfmmup);
1505 1506                  } else
1506 1507  #endif /* DEBUG */
1507 1508                          (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb,
1508 1509                              default_tsb_size,
1509 1510                              TSB8K|TSB64K|TSB512K, 0, sfmmup);
1510 1511                  sfmmup->sfmmu_flags = HAT_SWAPPED | HAT_ALLCTX_INVALID;
1511 1512                  ASSERT(sfmmup->sfmmu_tsb != NULL);
1512 1513          }
1513 1514  
1514 1515          ASSERT(max_mmu_ctxdoms > 0);
1515 1516          for (i = 0; i < max_mmu_ctxdoms; i++) {
1516 1517                  sfmmup->sfmmu_ctxs[i].cnum = cnum;
1517 1518                  sfmmup->sfmmu_ctxs[i].gnum = 0;
1518 1519          }
1519 1520  
1520 1521          for (i = 0; i < max_mmu_page_sizes; i++) {
1521 1522                  sfmmup->sfmmu_ttecnt[i] = 0;
1522 1523                  sfmmup->sfmmu_scdrttecnt[i] = 0;
1523 1524                  sfmmup->sfmmu_ismttecnt[i] = 0;
1524 1525                  sfmmup->sfmmu_scdismttecnt[i] = 0;
1525 1526                  sfmmup->sfmmu_pgsz[i] = TTE8K;
1526 1527          }
1527 1528          sfmmup->sfmmu_tsb0_4minflcnt = 0;
1528 1529          sfmmup->sfmmu_iblk = NULL;
1529 1530          sfmmup->sfmmu_ismhat = 0;
1530 1531          sfmmup->sfmmu_scdhat = 0;
1531 1532          sfmmup->sfmmu_ismblkpa = (uint64_t)-1;
1532 1533          if (sfmmup == ksfmmup) {
1533 1534                  CPUSET_ALL(sfmmup->sfmmu_cpusran);
1534 1535          } else {
1535 1536                  CPUSET_ZERO(sfmmup->sfmmu_cpusran);
1536 1537          }
1537 1538          sfmmup->sfmmu_free = 0;
1538 1539          sfmmup->sfmmu_rmstat = 0;
1539 1540          sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart;
1540 1541          cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL);
1541 1542          sfmmup->sfmmu_srdp = NULL;
1542 1543          SF_RGNMAP_ZERO(sfmmup->sfmmu_region_map);
1543 1544          bzero(sfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE);
1544 1545          sfmmup->sfmmu_scdp = NULL;
1545 1546          sfmmup->sfmmu_scd_link.next = NULL;
1546 1547          sfmmup->sfmmu_scd_link.prev = NULL;
1547 1548          return (sfmmup);
1548 1549  }
1549 1550  
1550 1551  /*
1551 1552   * Create per-MMU context domain kstats for a given MMU ctx.
1552 1553   */
1553 1554  static void
1554 1555  sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp)
1555 1556  {
1556 1557          mmu_ctx_stat_t  stat;
1557 1558          kstat_t         *mmu_kstat;
1558 1559  
1559 1560          ASSERT(MUTEX_HELD(&cpu_lock));
1560 1561          ASSERT(mmu_ctxp->mmu_kstat == NULL);
1561 1562  
1562 1563          mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx",
1563 1564              "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL);
1564 1565  
1565 1566          if (mmu_kstat == NULL) {
1566 1567                  cmn_err(CE_WARN, "kstat_create for MMU %d failed",
1567 1568                      mmu_ctxp->mmu_idx);
1568 1569          } else {
1569 1570                  mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data;
1570 1571                  for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++)
1571 1572                          kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat],
1572 1573                              mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64);
1573 1574                  mmu_ctxp->mmu_kstat = mmu_kstat;
1574 1575                  kstat_install(mmu_kstat);
1575 1576          }
1576 1577  }
1577 1578  
1578 1579  /*
1579 1580   * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU
1580 1581   * context domain information for a given CPU. If a platform does not
1581 1582   * specify that interface, then the function below is used instead to return
1582 1583   * default information. The defaults are as follows:
1583 1584   *
1584 1585   *      - The number of MMU context IDs supported on any CPU in the
1585 1586   *        system is 8K.
1586 1587   *      - There is one MMU context domain per CPU.
1587 1588   */
1588 1589  /*ARGSUSED*/
1589 1590  static void
1590 1591  sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop)
1591 1592  {
1592 1593          infop->mmu_nctxs = nctxs;
1593 1594          infop->mmu_idx = cpu[cpuid]->cpu_seqid;
1594 1595  }
1595 1596  
1596 1597  /*
1597 1598   * Called during CPU initialization to set the MMU context-related information
1598 1599   * for a CPU.
1599 1600   *
1600 1601   * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum.
1601 1602   */
1602 1603  void
1603 1604  sfmmu_cpu_init(cpu_t *cp)
1604 1605  {
1605 1606          mmu_ctx_info_t  info;
1606 1607          mmu_ctx_t       *mmu_ctxp;
1607 1608  
1608 1609          ASSERT(MUTEX_HELD(&cpu_lock));
1609 1610  
1610 1611          if (&plat_cpuid_to_mmu_ctx_info == NULL)
1611 1612                  sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info);
1612 1613          else
1613 1614                  plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info);
1614 1615  
1615 1616          ASSERT(info.mmu_idx < max_mmu_ctxdoms);
1616 1617  
1617 1618          if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) {
1618 1619                  /* Each mmu_ctx is cacheline aligned. */
1619 1620                  mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP);
1620 1621                  bzero(mmu_ctxp, sizeof (mmu_ctx_t));
1621 1622  
1622 1623                  mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN,
1623 1624                      (void *)ipltospl(DISP_LEVEL));
1624 1625                  mmu_ctxp->mmu_idx = info.mmu_idx;
1625 1626                  mmu_ctxp->mmu_nctxs = info.mmu_nctxs;
1626 1627                  /*
1627 1628                   * Globally for lifetime of a system,
1628 1629                   * gnum must always increase.
1629 1630                   * mmu_saved_gnum is protected by the cpu_lock.
1630 1631                   */
1631 1632                  mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1;
1632 1633                  mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS;
1633 1634  
1634 1635                  sfmmu_mmu_kstat_create(mmu_ctxp);
1635 1636  
1636 1637                  mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp;
1637 1638          } else {
1638 1639                  ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx);
1639 1640                  ASSERT(mmu_ctxp->mmu_nctxs <= info.mmu_nctxs);
1640 1641          }
1641 1642  
1642 1643          /*
1643 1644           * The mmu_lock is acquired here to prevent races with
1644 1645           * the wrap-around code.
1645 1646           */
1646 1647          mutex_enter(&mmu_ctxp->mmu_lock);
1647 1648  
1648 1649  
1649 1650          mmu_ctxp->mmu_ncpus++;
1650 1651          CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id);
1651 1652          CPU_MMU_IDX(cp) = info.mmu_idx;
1652 1653          CPU_MMU_CTXP(cp) = mmu_ctxp;
1653 1654  
1654 1655          mutex_exit(&mmu_ctxp->mmu_lock);
1655 1656  }
1656 1657  
1657 1658  static void
1658 1659  sfmmu_ctxdom_free(mmu_ctx_t *mmu_ctxp)
1659 1660  {
1660 1661          ASSERT(MUTEX_HELD(&cpu_lock));
1661 1662          ASSERT(!MUTEX_HELD(&mmu_ctxp->mmu_lock));
1662 1663  
1663 1664          mutex_destroy(&mmu_ctxp->mmu_lock);
1664 1665  
1665 1666          if (mmu_ctxp->mmu_kstat)
1666 1667                  kstat_delete(mmu_ctxp->mmu_kstat);
1667 1668  
1668 1669          /* mmu_saved_gnum is protected by the cpu_lock. */
1669 1670          if (mmu_saved_gnum < mmu_ctxp->mmu_gnum)
1670 1671                  mmu_saved_gnum = mmu_ctxp->mmu_gnum;
1671 1672  
1672 1673          kmem_cache_free(mmuctxdom_cache, mmu_ctxp);
1673 1674  }
1674 1675  
1675 1676  /*
1676 1677   * Called to perform MMU context-related cleanup for a CPU.
1677 1678   */
1678 1679  void
1679 1680  sfmmu_cpu_cleanup(cpu_t *cp)
1680 1681  {
1681 1682          mmu_ctx_t       *mmu_ctxp;
1682 1683  
1683 1684          ASSERT(MUTEX_HELD(&cpu_lock));
1684 1685  
1685 1686          mmu_ctxp = CPU_MMU_CTXP(cp);
1686 1687          ASSERT(mmu_ctxp != NULL);
1687 1688  
1688 1689          /*
1689 1690           * The mmu_lock is acquired here to prevent races with
1690 1691           * the wrap-around code.
1691 1692           */
1692 1693          mutex_enter(&mmu_ctxp->mmu_lock);
1693 1694  
1694 1695          CPU_MMU_CTXP(cp) = NULL;
1695 1696  
1696 1697          CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id);
1697 1698          if (--mmu_ctxp->mmu_ncpus == 0) {
1698 1699                  mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL;
1699 1700                  mutex_exit(&mmu_ctxp->mmu_lock);
1700 1701                  sfmmu_ctxdom_free(mmu_ctxp);
1701 1702                  return;
1702 1703          }
1703 1704  
1704 1705          mutex_exit(&mmu_ctxp->mmu_lock);
1705 1706  }
1706 1707  
1707 1708  uint_t
1708 1709  sfmmu_ctxdom_nctxs(int idx)
1709 1710  {
1710 1711          return (mmu_ctxs_tbl[idx]->mmu_nctxs);
1711 1712  }
1712 1713  
1713 1714  #ifdef sun4v
1714 1715  /*
1715 1716   * sfmmu_ctxdoms_* is an interface provided to help keep context domains
1716 1717   * consistant after suspend/resume on system that can resume on a different
1717 1718   * hardware than it was suspended.
1718 1719   *
1719 1720   * sfmmu_ctxdom_lock(void) locks all context domains and prevents new contexts
1720 1721   * from being allocated.  It acquires all hat_locks, which blocks most access to
1721 1722   * context data, except for a few cases that are handled separately or are
1722 1723   * harmless.  It wraps each domain to increment gnum and invalidate on-CPU
1723 1724   * contexts, and forces cnum to its max.  As a result of this call all user
1724 1725   * threads that are running on CPUs trap and try to perform wrap around but
1725 1726   * can't because hat_locks are taken.  Threads that were not on CPUs but started
1726 1727   * by scheduler go to sfmmu_alloc_ctx() to aquire context without checking
1727 1728   * hat_lock, but fail, because cnum == nctxs, and therefore also trap and block
1728 1729   * on hat_lock trying to wrap.  sfmmu_ctxdom_lock() must be called before CPUs
1729 1730   * are paused, else it could deadlock acquiring locks held by paused CPUs.
1730 1731   *
1731 1732   * sfmmu_ctxdoms_remove() removes context domains from every CPUs and records
1732 1733   * the CPUs that had them.  It must be called after CPUs have been paused. This
1733 1734   * ensures that no threads are in sfmmu_alloc_ctx() accessing domain data,
1734 1735   * because pause_cpus sends a mondo interrupt to every CPU, and sfmmu_alloc_ctx
1735 1736   * runs with interrupts disabled.  When CPUs are later resumed, they may enter
1736 1737   * sfmmu_alloc_ctx, but it will check for CPU_MMU_CTXP = NULL and immediately
1737 1738   * return failure.  Or, they will be blocked trying to acquire hat_lock. Thus
1738 1739   * after sfmmu_ctxdoms_remove returns, we are guaranteed that no one is
1739 1740   * accessing the old context domains.
1740 1741   *
1741 1742   * sfmmu_ctxdoms_update(void) frees space used by old context domains and
1742 1743   * allocates new context domains based on hardware layout.  It initializes
1743 1744   * every CPU that had context domain before migration to have one again.
1744 1745   * sfmmu_ctxdoms_update must be called after CPUs are resumed, else it
1745 1746   * could deadlock acquiring locks held by paused CPUs.
1746 1747   *
1747 1748   * sfmmu_ctxdoms_unlock(void) releases all hat_locks after which user threads
1748 1749   * acquire new context ids and continue execution.
1749 1750   *
1750 1751   * Therefore functions should be called in the following order:
1751 1752   *       suspend_routine()
1752 1753   *              sfmmu_ctxdom_lock()
1753 1754   *              pause_cpus()
1754 1755   *              suspend()
1755 1756   *                      if (suspend failed)
1756 1757   *                              sfmmu_ctxdom_unlock()
1757 1758   *              ...
1758 1759   *              sfmmu_ctxdom_remove()
1759 1760   *              resume_cpus()
1760 1761   *              sfmmu_ctxdom_update()
1761 1762   *              sfmmu_ctxdom_unlock()
1762 1763   */
1763 1764  static cpuset_t sfmmu_ctxdoms_pset;
1764 1765  
1765 1766  void
1766 1767  sfmmu_ctxdoms_remove()
1767 1768  {
1768 1769          processorid_t   id;
1769 1770          cpu_t           *cp;
1770 1771  
1771 1772          /*
1772 1773           * Record the CPUs that have domains in sfmmu_ctxdoms_pset, so they can
1773 1774           * be restored post-migration. A CPU may be powered off and not have a
1774 1775           * domain, for example.
1775 1776           */
1776 1777          CPUSET_ZERO(sfmmu_ctxdoms_pset);
1777 1778  
1778 1779          for (id = 0; id < NCPU; id++) {
1779 1780                  if ((cp = cpu[id]) != NULL && CPU_MMU_CTXP(cp) != NULL) {
1780 1781                          CPUSET_ADD(sfmmu_ctxdoms_pset, id);
1781 1782                          CPU_MMU_CTXP(cp) = NULL;
1782 1783                  }
1783 1784          }
1784 1785  }
1785 1786  
1786 1787  void
1787 1788  sfmmu_ctxdoms_lock(void)
1788 1789  {
1789 1790          int             idx;
1790 1791          mmu_ctx_t       *mmu_ctxp;
1791 1792  
1792 1793          sfmmu_hat_lock_all();
1793 1794  
1794 1795          /*
1795 1796           * At this point, no thread can be in sfmmu_ctx_wrap_around, because
1796 1797           * hat_lock is always taken before calling it.
1797 1798           *
1798 1799           * For each domain, set mmu_cnum to max so no more contexts can be
1799 1800           * allocated, and wrap to flush on-CPU contexts and force threads to
1800 1801           * acquire a new context when we later drop hat_lock after migration.
1801 1802           * Setting mmu_cnum may race with sfmmu_alloc_ctx which also sets cnum,
1802 1803           * but the latter uses CAS and will miscompare and not overwrite it.
1803 1804           */
1804 1805          kpreempt_disable(); /* required by sfmmu_ctx_wrap_around */
1805 1806          for (idx = 0; idx < max_mmu_ctxdoms; idx++) {
1806 1807                  if ((mmu_ctxp = mmu_ctxs_tbl[idx]) != NULL) {
1807 1808                          mutex_enter(&mmu_ctxp->mmu_lock);
1808 1809                          mmu_ctxp->mmu_cnum = mmu_ctxp->mmu_nctxs;
1809 1810                          /* make sure updated cnum visible */
1810 1811                          membar_enter();
1811 1812                          mutex_exit(&mmu_ctxp->mmu_lock);
1812 1813                          sfmmu_ctx_wrap_around(mmu_ctxp, B_FALSE);
1813 1814                  }
1814 1815          }
1815 1816          kpreempt_enable();
1816 1817  }
1817 1818  
1818 1819  void
1819 1820  sfmmu_ctxdoms_unlock(void)
1820 1821  {
1821 1822          sfmmu_hat_unlock_all();
1822 1823  }
1823 1824  
1824 1825  void
1825 1826  sfmmu_ctxdoms_update(void)
1826 1827  {
1827 1828          processorid_t   id;
1828 1829          cpu_t           *cp;
1829 1830          uint_t          idx;
1830 1831          mmu_ctx_t       *mmu_ctxp;
1831 1832  
1832 1833          /*
1833 1834           * Free all context domains.  As side effect, this increases
1834 1835           * mmu_saved_gnum to the maximum gnum over all domains, which is used to
1835 1836           * init gnum in the new domains, which therefore will be larger than the
1836 1837           * sfmmu gnum for any process, guaranteeing that every process will see
1837 1838           * a new generation and allocate a new context regardless of what new
1838 1839           * domain it runs in.
1839 1840           */
1840 1841          mutex_enter(&cpu_lock);
1841 1842  
1842 1843          for (idx = 0; idx < max_mmu_ctxdoms; idx++) {
1843 1844                  if (mmu_ctxs_tbl[idx] != NULL) {
1844 1845                          mmu_ctxp = mmu_ctxs_tbl[idx];
1845 1846                          mmu_ctxs_tbl[idx] = NULL;
1846 1847                          sfmmu_ctxdom_free(mmu_ctxp);
1847 1848                  }
1848 1849          }
1849 1850  
1850 1851          for (id = 0; id < NCPU; id++) {
1851 1852                  if (CPU_IN_SET(sfmmu_ctxdoms_pset, id) &&
1852 1853                      (cp = cpu[id]) != NULL)
1853 1854                          sfmmu_cpu_init(cp);
1854 1855          }
1855 1856          mutex_exit(&cpu_lock);
1856 1857  }
1857 1858  #endif
1858 1859  
1859 1860  /*
1860 1861   * Hat_setup, makes an address space context the current active one.
1861 1862   * In sfmmu this translates to setting the secondary context with the
1862 1863   * corresponding context.
1863 1864   */
1864 1865  void
1865 1866  hat_setup(struct hat *sfmmup, int allocflag)
1866 1867  {
1867 1868          hatlock_t *hatlockp;
1868 1869  
1869 1870          /* Init needs some special treatment. */
1870 1871          if (allocflag == HAT_INIT) {
1871 1872                  /*
1872 1873                   * Make sure that we have
1873 1874                   * 1. a TSB
1874 1875                   * 2. a valid ctx that doesn't get stolen after this point.
1875 1876                   */
1876 1877                  hatlockp = sfmmu_hat_enter(sfmmup);
1877 1878  
1878 1879                  /*
1879 1880                   * Swap in the TSB.  hat_init() allocates tsbinfos without
1880 1881                   * TSBs, but we need one for init, since the kernel does some
1881 1882                   * special things to set up its stack and needs the TSB to
1882 1883                   * resolve page faults.
1883 1884                   */
1884 1885                  sfmmu_tsb_swapin(sfmmup, hatlockp);
1885 1886  
1886 1887                  sfmmu_get_ctx(sfmmup);
1887 1888  
1888 1889                  sfmmu_hat_exit(hatlockp);
1889 1890          } else {
1890 1891                  ASSERT(allocflag == HAT_ALLOC);
1891 1892  
1892 1893                  hatlockp = sfmmu_hat_enter(sfmmup);
1893 1894                  kpreempt_disable();
1894 1895  
1895 1896                  CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id);
1896 1897                  /*
1897 1898                   * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter,
1898 1899                   * pagesize bits don't matter in this case since we are passing
1899 1900                   * INVALID_CONTEXT to it.
1900 1901                   * Compatibility Note: hw takes care of MMU_SCONTEXT1
1901 1902                   */
1902 1903                  sfmmu_setctx_sec(INVALID_CONTEXT);
1903 1904                  sfmmu_clear_utsbinfo();
1904 1905  
1905 1906                  kpreempt_enable();
1906 1907                  sfmmu_hat_exit(hatlockp);
1907 1908          }
1908 1909  }
1909 1910  
1910 1911  /*
1911 1912   * Free all the translation resources for the specified address space.
1912 1913   * Called from as_free when an address space is being destroyed.
1913 1914   */
1914 1915  void
1915 1916  hat_free_start(struct hat *sfmmup)
1916 1917  {
1917 1918          ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
1918 1919          ASSERT(sfmmup != ksfmmup);
1919 1920  
1920 1921          sfmmup->sfmmu_free = 1;
1921 1922          if (sfmmup->sfmmu_scdp != NULL) {
1922 1923                  sfmmu_leave_scd(sfmmup, 0);
1923 1924          }
1924 1925  
1925 1926          ASSERT(sfmmup->sfmmu_scdp == NULL);
1926 1927  }
1927 1928  
1928 1929  void
1929 1930  hat_free_end(struct hat *sfmmup)
1930 1931  {
1931 1932          int i;
1932 1933  
1933 1934          ASSERT(sfmmup->sfmmu_free == 1);
1934 1935          ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
1935 1936          ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
1936 1937          ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
1937 1938          ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
1938 1939          ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
1939 1940          ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
1940 1941  
1941 1942          if (sfmmup->sfmmu_rmstat) {
1942 1943                  hat_freestat(sfmmup->sfmmu_as, NULL);
1943 1944          }
1944 1945  
1945 1946          while (sfmmup->sfmmu_tsb != NULL) {
1946 1947                  struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next;
1947 1948                  sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb);
1948 1949                  sfmmup->sfmmu_tsb = next;
1949 1950          }
1950 1951  
1951 1952          if (sfmmup->sfmmu_srdp != NULL) {
1952 1953                  sfmmu_leave_srd(sfmmup);
1953 1954                  ASSERT(sfmmup->sfmmu_srdp == NULL);
1954 1955                  for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1955 1956                          if (sfmmup->sfmmu_hmeregion_links[i] != NULL) {
1956 1957                                  kmem_free(sfmmup->sfmmu_hmeregion_links[i],
1957 1958                                      SFMMU_L2_HMERLINKS_SIZE);
1958 1959                                  sfmmup->sfmmu_hmeregion_links[i] = NULL;
1959 1960                          }
1960 1961                  }
1961 1962          }
1962 1963          sfmmu_free_sfmmu(sfmmup);
1963 1964  
1964 1965  #ifdef DEBUG
1965 1966          for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1966 1967                  ASSERT(sfmmup->sfmmu_hmeregion_links[i] == NULL);
1967 1968          }
1968 1969  #endif
1969 1970  
1970 1971          kmem_cache_free(sfmmuid_cache, sfmmup);
1971 1972  }
1972 1973  
1973 1974  /*
1974 1975   * Set up any translation structures, for the specified address space,
1975 1976   * that are needed or preferred when the process is being swapped in.
1976 1977   */
1977 1978  /* ARGSUSED */
1978 1979  void
1979 1980  hat_swapin(struct hat *hat)
1980 1981  {
1981 1982  }
1982 1983  
1983 1984  /*
1984 1985   * Free all of the translation resources, for the specified address space,
1985 1986   * that can be freed while the process is swapped out. Called from as_swapout.
1986 1987   * Also, free up the ctx that this process was using.
1987 1988   */
1988 1989  void
1989 1990  hat_swapout(struct hat *sfmmup)
1990 1991  {
1991 1992          struct hmehash_bucket *hmebp;
1992 1993          struct hme_blk *hmeblkp;
1993 1994          struct hme_blk *pr_hblk = NULL;
1994 1995          struct hme_blk *nx_hblk;
1995 1996          int i;
1996 1997          struct hme_blk *list = NULL;
1997 1998          hatlock_t *hatlockp;
1998 1999          struct tsb_info *tsbinfop;
1999 2000          struct free_tsb {
2000 2001                  struct free_tsb *next;
2001 2002                  struct tsb_info *tsbinfop;
2002 2003          };                      /* free list of TSBs */
2003 2004          struct free_tsb *freelist, *last, *next;
2004 2005  
2005 2006          SFMMU_STAT(sf_swapout);
2006 2007  
2007 2008          /*
2008 2009           * There is no way to go from an as to all its translations in sfmmu.
2009 2010           * Here is one of the times when we take the big hit and traverse
2010 2011           * the hash looking for hme_blks to free up.  Not only do we free up
2011 2012           * this as hme_blks but all those that are free.  We are obviously
2012 2013           * swapping because we need memory so let's free up as much
2013 2014           * as we can.
2014 2015           *
2015 2016           * Note that we don't flush TLB/TSB here -- it's not necessary
2016 2017           * because:
2017 2018           *  1) we free the ctx we're using and throw away the TSB(s);
2018 2019           *  2) processes aren't runnable while being swapped out.
2019 2020           */
2020 2021          ASSERT(sfmmup != KHATID);
2021 2022          for (i = 0; i <= UHMEHASH_SZ; i++) {
2022 2023                  hmebp = &uhme_hash[i];
2023 2024                  SFMMU_HASH_LOCK(hmebp);
2024 2025                  hmeblkp = hmebp->hmeblkp;
2025 2026                  pr_hblk = NULL;
2026 2027                  while (hmeblkp) {
2027 2028  
2028 2029                          if ((hmeblkp->hblk_tag.htag_id == sfmmup) &&
2029 2030                              !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) {
2030 2031                                  ASSERT(!hmeblkp->hblk_shared);
2031 2032                                  (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
2032 2033                                      (caddr_t)get_hblk_base(hmeblkp),
2033 2034                                      get_hblk_endaddr(hmeblkp),
2034 2035                                      NULL, HAT_UNLOAD);
2035 2036                          }
2036 2037                          nx_hblk = hmeblkp->hblk_next;
2037 2038                          if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
2038 2039                                  ASSERT(!hmeblkp->hblk_lckcnt);
2039 2040                                  sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
2040 2041                                      &list, 0);
2041 2042                          } else {
2042 2043                                  pr_hblk = hmeblkp;
2043 2044                          }
2044 2045                          hmeblkp = nx_hblk;
2045 2046                  }
2046 2047                  SFMMU_HASH_UNLOCK(hmebp);
2047 2048          }
2048 2049  
2049 2050          sfmmu_hblks_list_purge(&list, 0);
2050 2051  
2051 2052          /*
2052 2053           * Now free up the ctx so that others can reuse it.
2053 2054           */
2054 2055          hatlockp = sfmmu_hat_enter(sfmmup);
2055 2056  
2056 2057          sfmmu_invalidate_ctx(sfmmup);
2057 2058  
2058 2059          /*
2059 2060           * Free TSBs, but not tsbinfos, and set SWAPPED flag.
2060 2061           * If TSBs were never swapped in, just return.
2061 2062           * This implies that we don't support partial swapping
2062 2063           * of TSBs -- either all are swapped out, or none are.
2063 2064           *
2064 2065           * We must hold the HAT lock here to prevent racing with another
2065 2066           * thread trying to unmap TTEs from the TSB or running the post-
2066 2067           * relocator after relocating the TSB's memory.  Unfortunately, we
2067 2068           * can't free memory while holding the HAT lock or we could
2068 2069           * deadlock, so we build a list of TSBs to be freed after marking
2069 2070           * the tsbinfos as swapped out and free them after dropping the
2070 2071           * lock.
2071 2072           */
2072 2073          if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
2073 2074                  sfmmu_hat_exit(hatlockp);
2074 2075                  return;
2075 2076          }
2076 2077  
2077 2078          SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED);
2078 2079          last = freelist = NULL;
2079 2080          for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
2080 2081              tsbinfop = tsbinfop->tsb_next) {
2081 2082                  ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0);
2082 2083  
2083 2084                  /*
2084 2085                   * Cast the TSB into a struct free_tsb and put it on the free
2085 2086                   * list.
2086 2087                   */
2087 2088                  if (freelist == NULL) {
2088 2089                          last = freelist = (struct free_tsb *)tsbinfop->tsb_va;
2089 2090                  } else {
2090 2091                          last->next = (struct free_tsb *)tsbinfop->tsb_va;
2091 2092                          last = last->next;
2092 2093                  }
2093 2094                  last->next = NULL;
2094 2095                  last->tsbinfop = tsbinfop;
2095 2096                  tsbinfop->tsb_flags |= TSB_SWAPPED;
2096 2097                  /*
2097 2098                   * Zero out the TTE to clear the valid bit.
2098 2099                   * Note we can't use a value like 0xbad because we want to
2099 2100                   * ensure diagnostic bits are NEVER set on TTEs that might
2100 2101                   * be loaded.  The intent is to catch any invalid access
2101 2102                   * to the swapped TSB, such as a thread running with a valid
2102 2103                   * context without first calling sfmmu_tsb_swapin() to
2103 2104                   * allocate TSB memory.
2104 2105                   */
2105 2106                  tsbinfop->tsb_tte.ll = 0;
2106 2107          }
2107 2108  
2108 2109          /* Now we can drop the lock and free the TSB memory. */
2109 2110          sfmmu_hat_exit(hatlockp);
2110 2111          for (; freelist != NULL; freelist = next) {
2111 2112                  next = freelist->next;
2112 2113                  sfmmu_tsb_free(freelist->tsbinfop);
2113 2114          }
2114 2115  }
2115 2116  
2116 2117  /*
2117 2118   * Duplicate the translations of an as into another newas
2118 2119   */
2119 2120  /* ARGSUSED */
2120 2121  int
2121 2122  hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len,
2122 2123          uint_t flag)
2123 2124  {
2124 2125          sf_srd_t *srdp;
2125 2126          sf_scd_t *scdp;
2126 2127          int i;
2127 2128          extern uint_t get_color_start(struct as *);
2128 2129  
2129 2130          ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) ||
2130 2131              (flag == HAT_DUP_SRD));
2131 2132          ASSERT(hat != ksfmmup);
2132 2133          ASSERT(newhat != ksfmmup);
2133 2134          ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp);
2134 2135  
2135 2136          if (flag == HAT_DUP_COW) {
2136 2137                  panic("hat_dup: HAT_DUP_COW not supported");
2137 2138          }
2138 2139  
2139 2140          if (flag == HAT_DUP_SRD && ((srdp = hat->sfmmu_srdp) != NULL)) {
2140 2141                  ASSERT(srdp->srd_evp != NULL);
2141 2142                  VN_HOLD(srdp->srd_evp);
2142 2143                  ASSERT(srdp->srd_refcnt > 0);
2143 2144                  newhat->sfmmu_srdp = srdp;
2144 2145                  atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt);
2145 2146          }
2146 2147  
2147 2148          /*
2148 2149           * HAT_DUP_ALL flag is used after as duplication is done.
2149 2150           */
2150 2151          if (flag == HAT_DUP_ALL && ((srdp = newhat->sfmmu_srdp) != NULL)) {
2151 2152                  ASSERT(newhat->sfmmu_srdp->srd_refcnt >= 2);
2152 2153                  newhat->sfmmu_rtteflags = hat->sfmmu_rtteflags;
2153 2154                  if (hat->sfmmu_flags & HAT_4MTEXT_FLAG) {
2154 2155                          newhat->sfmmu_flags |= HAT_4MTEXT_FLAG;
2155 2156                  }
2156 2157  
2157 2158                  /* check if need to join scd */
2158 2159                  if ((scdp = hat->sfmmu_scdp) != NULL &&
2159 2160                      newhat->sfmmu_scdp != scdp) {
2160 2161                          int ret;
2161 2162                          SF_RGNMAP_IS_SUBSET(&newhat->sfmmu_region_map,
2162 2163                              &scdp->scd_region_map, ret);
2163 2164                          ASSERT(ret);
2164 2165                          sfmmu_join_scd(scdp, newhat);
2165 2166                          ASSERT(newhat->sfmmu_scdp == scdp &&
2166 2167                              scdp->scd_refcnt >= 2);
2167 2168                          for (i = 0; i < max_mmu_page_sizes; i++) {
2168 2169                                  newhat->sfmmu_ismttecnt[i] =
2169 2170                                      hat->sfmmu_ismttecnt[i];
2170 2171                                  newhat->sfmmu_scdismttecnt[i] =
2171 2172                                      hat->sfmmu_scdismttecnt[i];
2172 2173                          }
2173 2174                  }
2174 2175  
2175 2176                  sfmmu_check_page_sizes(newhat, 1);
2176 2177          }
2177 2178  
2178 2179          if (flag == HAT_DUP_ALL && consistent_coloring == 0 &&
2179 2180              update_proc_pgcolorbase_after_fork != 0) {
2180 2181                  hat->sfmmu_clrbin = get_color_start(hat->sfmmu_as);
2181 2182          }
2182 2183          return (0);
2183 2184  }
2184 2185  
2185 2186  void
2186 2187  hat_memload(struct hat *hat, caddr_t addr, struct page *pp,
2187 2188          uint_t attr, uint_t flags)
2188 2189  {
2189 2190          hat_do_memload(hat, addr, pp, attr, flags,
2190 2191              SFMMU_INVALID_SHMERID);
2191 2192  }
2192 2193  
2193 2194  void
2194 2195  hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
2195 2196          uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
2196 2197  {
2197 2198          uint_t rid;
2198 2199          if (rcookie == HAT_INVALID_REGION_COOKIE) {
2199 2200                  hat_do_memload(hat, addr, pp, attr, flags,
2200 2201                      SFMMU_INVALID_SHMERID);
2201 2202                  return;
2202 2203          }
2203 2204          rid = (uint_t)((uint64_t)rcookie);
2204 2205          ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2205 2206          hat_do_memload(hat, addr, pp, attr, flags, rid);
2206 2207  }
2207 2208  
2208 2209  /*
2209 2210   * Set up addr to map to page pp with protection prot.
2210 2211   * As an optimization we also load the TSB with the
2211 2212   * corresponding tte but it is no big deal if  the tte gets kicked out.
2212 2213   */
2213 2214  static void
2214 2215  hat_do_memload(struct hat *hat, caddr_t addr, struct page *pp,
2215 2216          uint_t attr, uint_t flags, uint_t rid)
2216 2217  {
2217 2218          tte_t tte;
2218 2219  
2219 2220  
2220 2221          ASSERT(hat != NULL);
2221 2222          ASSERT(PAGE_LOCKED(pp));
2222 2223          ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2223 2224          ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2224 2225          ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2225 2226          SFMMU_VALIDATE_HMERID(hat, rid, addr, MMU_PAGESIZE);
2226 2227  
2227 2228          if (PP_ISFREE(pp)) {
2228 2229                  panic("hat_memload: loading a mapping to free page %p",
2229 2230                      (void *)pp);
2230 2231          }
2231 2232  
2232 2233          ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as));
2233 2234  
2234 2235          if (flags & ~SFMMU_LOAD_ALLFLAG)
2235 2236                  cmn_err(CE_NOTE, "hat_memload: unsupported flags %d",
2236 2237                      flags & ~SFMMU_LOAD_ALLFLAG);
2237 2238  
2238 2239          if (hat->sfmmu_rmstat)
2239 2240                  hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr);
2240 2241  
2241 2242  #if defined(SF_ERRATA_57)
2242 2243          if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2243 2244              (addr < errata57_limit) && (attr & PROT_EXEC) &&
2244 2245              !(flags & HAT_LOAD_SHARE)) {
2245 2246                  cmn_err(CE_WARN, "hat_memload: illegal attempt to make user "
2246 2247                      " page executable");
2247 2248                  attr &= ~PROT_EXEC;
2248 2249          }
2249 2250  #endif
2250 2251  
2251 2252          sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K);
2252 2253          (void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags, rid);
2253 2254  
2254 2255          /*
2255 2256           * Check TSB and TLB page sizes.
2256 2257           */
2257 2258          if ((flags & HAT_LOAD_SHARE) == 0) {
2258 2259                  sfmmu_check_page_sizes(hat, 1);
2259 2260          }
2260 2261  }
2261 2262  
2262 2263  /*
2263 2264   * hat_devload can be called to map real memory (e.g.
2264 2265   * /dev/kmem) and even though hat_devload will determine pf is
2265 2266   * for memory, it will be unable to get a shared lock on the
2266 2267   * page (because someone else has it exclusively) and will
2267 2268   * pass dp = NULL.  If tteload doesn't get a non-NULL
2268 2269   * page pointer it can't cache memory.
2269 2270   */
2270 2271  void
2271 2272  hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn,
2272 2273          uint_t attr, int flags)
2273 2274  {
2274 2275          tte_t tte;
2275 2276          struct page *pp = NULL;
2276 2277          int use_lgpg = 0;
2277 2278  
2278 2279          ASSERT(hat != NULL);
2279 2280  
2280 2281          ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2281 2282          ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2282 2283          ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as));
2283 2284          if (len == 0)
2284 2285                  panic("hat_devload: zero len");
2285 2286          if (flags & ~SFMMU_LOAD_ALLFLAG)
2286 2287                  cmn_err(CE_NOTE, "hat_devload: unsupported flags %d",
2287 2288                      flags & ~SFMMU_LOAD_ALLFLAG);
2288 2289  
2289 2290  #if defined(SF_ERRATA_57)
2290 2291          if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2291 2292              (addr < errata57_limit) && (attr & PROT_EXEC) &&
2292 2293              !(flags & HAT_LOAD_SHARE)) {
2293 2294                  cmn_err(CE_WARN, "hat_devload: illegal attempt to make user "
2294 2295                      " page executable");
2295 2296                  attr &= ~PROT_EXEC;
2296 2297          }
2297 2298  #endif
2298 2299  
2299 2300          /*
2300 2301           * If it's a memory page find its pp
2301 2302           */
2302 2303          if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) {
2303 2304                  pp = page_numtopp_nolock(pfn);
2304 2305                  if (pp == NULL) {
2305 2306                          flags |= HAT_LOAD_NOCONSIST;
2306 2307                  } else {
2307 2308                          if (PP_ISFREE(pp)) {
2308 2309                                  panic("hat_memload: loading "
2309 2310                                      "a mapping to free page %p",
2310 2311                                      (void *)pp);
2311 2312                          }
2312 2313                          if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
2313 2314                                  panic("hat_memload: loading a mapping "
2314 2315                                      "to unlocked relocatable page %p",
2315 2316                                      (void *)pp);
2316 2317                          }
2317 2318                          ASSERT(len == MMU_PAGESIZE);
2318 2319                  }
2319 2320          }
2320 2321  
2321 2322          if (hat->sfmmu_rmstat)
2322 2323                  hat_resvstat(len, hat->sfmmu_as, addr);
2323 2324  
2324 2325          if (flags & HAT_LOAD_NOCONSIST) {
2325 2326                  attr |= SFMMU_UNCACHEVTTE;
2326 2327                  use_lgpg = 1;
2327 2328          }
2328 2329          if (!pf_is_memory(pfn)) {
2329 2330                  attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC;
2330 2331                  use_lgpg = 1;
2331 2332                  switch (attr & HAT_ORDER_MASK) {
2332 2333                          case HAT_STRICTORDER:
2333 2334                          case HAT_UNORDERED_OK:
2334 2335                                  /*
2335 2336                                   * we set the side effect bit for all non
2336 2337                                   * memory mappings unless merging is ok
2337 2338                                   */
2338 2339                                  attr |= SFMMU_SIDEFFECT;
2339 2340                                  break;
2340 2341                          case HAT_MERGING_OK:
2341 2342                          case HAT_LOADCACHING_OK:
2342 2343                          case HAT_STORECACHING_OK:
2343 2344                                  break;
2344 2345                          default:
2345 2346                                  panic("hat_devload: bad attr");
2346 2347                                  break;
2347 2348                  }
2348 2349          }
2349 2350          while (len) {
2350 2351                  if (!use_lgpg) {
2351 2352                          sfmmu_memtte(&tte, pfn, attr, TTE8K);
2352 2353                          (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2353 2354                              flags, SFMMU_INVALID_SHMERID);
2354 2355                          len -= MMU_PAGESIZE;
2355 2356                          addr += MMU_PAGESIZE;
2356 2357                          pfn++;
2357 2358                          continue;
2358 2359                  }
2359 2360                  /*
2360 2361                   *  try to use large pages, check va/pa alignments
2361 2362                   *  Note that 32M/256M page sizes are not (yet) supported.
2362 2363                   */
2363 2364                  if ((len >= MMU_PAGESIZE4M) &&
2364 2365                      !((uintptr_t)addr & MMU_PAGEOFFSET4M) &&
2365 2366                      !(disable_large_pages & (1 << TTE4M)) &&
2366 2367                      !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) {
2367 2368                          sfmmu_memtte(&tte, pfn, attr, TTE4M);
2368 2369                          (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2369 2370                              flags, SFMMU_INVALID_SHMERID);
2370 2371                          len -= MMU_PAGESIZE4M;
2371 2372                          addr += MMU_PAGESIZE4M;
2372 2373                          pfn += MMU_PAGESIZE4M / MMU_PAGESIZE;
2373 2374                  } else if ((len >= MMU_PAGESIZE512K) &&
2374 2375                      !((uintptr_t)addr & MMU_PAGEOFFSET512K) &&
2375 2376                      !(disable_large_pages & (1 << TTE512K)) &&
2376 2377                      !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) {
2377 2378                          sfmmu_memtte(&tte, pfn, attr, TTE512K);
2378 2379                          (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2379 2380                              flags, SFMMU_INVALID_SHMERID);
2380 2381                          len -= MMU_PAGESIZE512K;
2381 2382                          addr += MMU_PAGESIZE512K;
2382 2383                          pfn += MMU_PAGESIZE512K / MMU_PAGESIZE;
2383 2384                  } else if ((len >= MMU_PAGESIZE64K) &&
2384 2385                      !((uintptr_t)addr & MMU_PAGEOFFSET64K) &&
2385 2386                      !(disable_large_pages & (1 << TTE64K)) &&
2386 2387                      !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) {
2387 2388                          sfmmu_memtte(&tte, pfn, attr, TTE64K);
2388 2389                          (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2389 2390                              flags, SFMMU_INVALID_SHMERID);
2390 2391                          len -= MMU_PAGESIZE64K;
2391 2392                          addr += MMU_PAGESIZE64K;
2392 2393                          pfn += MMU_PAGESIZE64K / MMU_PAGESIZE;
2393 2394                  } else {
2394 2395                          sfmmu_memtte(&tte, pfn, attr, TTE8K);
2395 2396                          (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2396 2397                              flags, SFMMU_INVALID_SHMERID);
2397 2398                          len -= MMU_PAGESIZE;
2398 2399                          addr += MMU_PAGESIZE;
2399 2400                          pfn++;
2400 2401                  }
2401 2402          }
2402 2403  
2403 2404          /*
2404 2405           * Check TSB and TLB page sizes.
2405 2406           */
2406 2407          if ((flags & HAT_LOAD_SHARE) == 0) {
2407 2408                  sfmmu_check_page_sizes(hat, 1);
2408 2409          }
2409 2410  }
2410 2411  
2411 2412  void
2412 2413  hat_memload_array(struct hat *hat, caddr_t addr, size_t len,
2413 2414          struct page **pps, uint_t attr, uint_t flags)
2414 2415  {
2415 2416          hat_do_memload_array(hat, addr, len, pps, attr, flags,
2416 2417              SFMMU_INVALID_SHMERID);
2417 2418  }
2418 2419  
2419 2420  void
2420 2421  hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
2421 2422          struct page **pps, uint_t attr, uint_t flags,
2422 2423          hat_region_cookie_t rcookie)
2423 2424  {
2424 2425          uint_t rid;
2425 2426          if (rcookie == HAT_INVALID_REGION_COOKIE) {
2426 2427                  hat_do_memload_array(hat, addr, len, pps, attr, flags,
2427 2428                      SFMMU_INVALID_SHMERID);
2428 2429                  return;
2429 2430          }
2430 2431          rid = (uint_t)((uint64_t)rcookie);
2431 2432          ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2432 2433          hat_do_memload_array(hat, addr, len, pps, attr, flags, rid);
2433 2434  }
2434 2435  
2435 2436  /*
2436 2437   * Map the largest extend possible out of the page array. The array may NOT
2437 2438   * be in order.  The largest possible mapping a page can have
2438 2439   * is specified in the p_szc field.  The p_szc field
2439 2440   * cannot change as long as there any mappings (large or small)
2440 2441   * to any of the pages that make up the large page. (ie. any
2441 2442   * promotion/demotion of page size is not up to the hat but up to
2442 2443   * the page free list manager).  The array
2443 2444   * should consist of properly aligned contigous pages that are
2444 2445   * part of a big page for a large mapping to be created.
2445 2446   */
2446 2447  static void
2447 2448  hat_do_memload_array(struct hat *hat, caddr_t addr, size_t len,
2448 2449          struct page **pps, uint_t attr, uint_t flags, uint_t rid)
2449 2450  {
2450 2451          int  ttesz;
2451 2452          size_t mapsz;
2452 2453          pgcnt_t numpg, npgs;
2453 2454          tte_t tte;
2454 2455          page_t *pp;
2455 2456          uint_t large_pages_disable;
2456 2457  
2457 2458          ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2458 2459          SFMMU_VALIDATE_HMERID(hat, rid, addr, len);
2459 2460  
2460 2461          if (hat->sfmmu_rmstat)
2461 2462                  hat_resvstat(len, hat->sfmmu_as, addr);
2462 2463  
2463 2464  #if defined(SF_ERRATA_57)
2464 2465          if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2465 2466              (addr < errata57_limit) && (attr & PROT_EXEC) &&
2466 2467              !(flags & HAT_LOAD_SHARE)) {
2467 2468                  cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make "
2468 2469                      "user page executable");
2469 2470                  attr &= ~PROT_EXEC;
2470 2471          }
2471 2472  #endif
2472 2473  
2473 2474          /* Get number of pages */
2474 2475          npgs = len >> MMU_PAGESHIFT;
2475 2476  
2476 2477          if (flags & HAT_LOAD_SHARE) {
2477 2478                  large_pages_disable = disable_ism_large_pages;
2478 2479          } else {
2479 2480                  large_pages_disable = disable_large_pages;
2480 2481          }
2481 2482  
2482 2483          if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) {
2483 2484                  sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs,
2484 2485                      rid);
2485 2486                  return;
2486 2487          }
2487 2488  
2488 2489          while (npgs >= NHMENTS) {
2489 2490                  pp = *pps;
2490 2491                  for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) {
2491 2492                          /*
2492 2493                           * Check if this page size is disabled.
2493 2494                           */
2494 2495                          if (large_pages_disable & (1 << ttesz))
2495 2496                                  continue;
2496 2497  
2497 2498                          numpg = TTEPAGES(ttesz);
2498 2499                          mapsz = numpg << MMU_PAGESHIFT;
2499 2500                          if ((npgs >= numpg) &&
2500 2501                              IS_P2ALIGNED(addr, mapsz) &&
2501 2502                              IS_P2ALIGNED(pp->p_pagenum, numpg)) {
2502 2503                                  /*
2503 2504                                   * At this point we have enough pages and
2504 2505                                   * we know the virtual address and the pfn
2505 2506                                   * are properly aligned.  We still need
2506 2507                                   * to check for physical contiguity but since
2507 2508                                   * it is very likely that this is the case
2508 2509                                   * we will assume they are so and undo
2509 2510                                   * the request if necessary.  It would
2510 2511                                   * be great if we could get a hint flag
2511 2512                                   * like HAT_CONTIG which would tell us
2512 2513                                   * the pages are contigous for sure.
2513 2514                                   */
2514 2515                                  sfmmu_memtte(&tte, (*pps)->p_pagenum,
2515 2516                                      attr, ttesz);
2516 2517                                  if (!sfmmu_tteload_array(hat, &tte, addr,
2517 2518                                      pps, flags, rid)) {
2518 2519                                          break;
2519 2520                                  }
2520 2521                          }
2521 2522                  }
2522 2523                  if (ttesz == TTE8K) {
2523 2524                          /*
2524 2525                           * We were not able to map array using a large page
2525 2526                           * batch a hmeblk or fraction at a time.
2526 2527                           */
2527 2528                          numpg = ((uintptr_t)addr >> MMU_PAGESHIFT)
2528 2529                              & (NHMENTS-1);
2529 2530                          numpg = NHMENTS - numpg;
2530 2531                          ASSERT(numpg <= npgs);
2531 2532                          mapsz = numpg * MMU_PAGESIZE;
2532 2533                          sfmmu_memload_batchsmall(hat, addr, pps, attr, flags,
2533 2534                              numpg, rid);
2534 2535                  }
2535 2536                  addr += mapsz;
2536 2537                  npgs -= numpg;
2537 2538                  pps += numpg;
2538 2539          }
2539 2540  
2540 2541          if (npgs) {
2541 2542                  sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs,
2542 2543                      rid);
2543 2544          }
2544 2545  
2545 2546          /*
2546 2547           * Check TSB and TLB page sizes.
2547 2548           */
2548 2549          if ((flags & HAT_LOAD_SHARE) == 0) {
2549 2550                  sfmmu_check_page_sizes(hat, 1);
2550 2551          }
2551 2552  }
2552 2553  
2553 2554  /*
2554 2555   * Function tries to batch 8K pages into the same hme blk.
2555 2556   */
2556 2557  static void
2557 2558  sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps,
2558 2559                      uint_t attr, uint_t flags, pgcnt_t npgs, uint_t rid)
2559 2560  {
2560 2561          tte_t   tte;
2561 2562          page_t *pp;
2562 2563          struct hmehash_bucket *hmebp;
2563 2564          struct hme_blk *hmeblkp;
2564 2565          int     index;
2565 2566  
2566 2567          while (npgs) {
2567 2568                  /*
2568 2569                   * Acquire the hash bucket.
2569 2570                   */
2570 2571                  hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K,
2571 2572                      rid);
2572 2573                  ASSERT(hmebp);
2573 2574  
2574 2575                  /*
2575 2576                   * Find the hment block.
2576 2577                   */
2577 2578                  hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr,
2578 2579                      TTE8K, flags, rid);
2579 2580                  ASSERT(hmeblkp);
2580 2581  
2581 2582                  do {
2582 2583                          /*
2583 2584                           * Make the tte.
2584 2585                           */
2585 2586                          pp = *pps;
2586 2587                          sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K);
2587 2588  
2588 2589                          /*
2589 2590                           * Add the translation.
2590 2591                           */
2591 2592                          (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte,
2592 2593                              vaddr, pps, flags, rid);
2593 2594  
2594 2595                          /*
2595 2596                           * Goto next page.
2596 2597                           */
2597 2598                          pps++;
2598 2599                          npgs--;
2599 2600  
2600 2601                          /*
2601 2602                           * Goto next address.
2602 2603                           */
2603 2604                          vaddr += MMU_PAGESIZE;
2604 2605  
2605 2606                          /*
2606 2607                           * Don't crossover into a different hmentblk.
2607 2608                           */
2608 2609                          index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) &
2609 2610                              (NHMENTS-1));
2610 2611  
2611 2612                  } while (index != 0 && npgs != 0);
2612 2613  
2613 2614                  /*
2614 2615                   * Release the hash bucket.
2615 2616                   */
2616 2617  
2617 2618                  sfmmu_tteload_release_hashbucket(hmebp);
2618 2619          }
2619 2620  }
2620 2621  
2621 2622  /*
2622 2623   * Construct a tte for a page:
2623 2624   *
2624 2625   * tte_valid = 1
2625 2626   * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only)
2626 2627   * tte_size = size
2627 2628   * tte_nfo = attr & HAT_NOFAULT
2628 2629   * tte_ie = attr & HAT_STRUCTURE_LE
2629 2630   * tte_hmenum = hmenum
2630 2631   * tte_pahi = pp->p_pagenum >> TTE_PASHIFT;
2631 2632   * tte_palo = pp->p_pagenum & TTE_PALOMASK;
2632 2633   * tte_ref = 1 (optimization)
2633 2634   * tte_wr_perm = attr & PROT_WRITE;
2634 2635   * tte_no_sync = attr & HAT_NOSYNC
2635 2636   * tte_lock = attr & SFMMU_LOCKTTE
2636 2637   * tte_cp = !(attr & SFMMU_UNCACHEPTTE)
2637 2638   * tte_cv = !(attr & SFMMU_UNCACHEVTTE)
2638 2639   * tte_e = attr & SFMMU_SIDEFFECT
2639 2640   * tte_priv = !(attr & PROT_USER)
2640 2641   * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt)
2641 2642   * tte_glb = 0
2642 2643   */
2643 2644  void
2644 2645  sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz)
2645 2646  {
2646 2647          ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2647 2648  
2648 2649          ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */);
2649 2650          ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */);
2650 2651  
2651 2652          if (TTE_IS_NOSYNC(ttep)) {
2652 2653                  TTE_SET_REF(ttep);
2653 2654                  if (TTE_IS_WRITABLE(ttep)) {
2654 2655                          TTE_SET_MOD(ttep);
2655 2656                  }
2656 2657          }
2657 2658          if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) {
2658 2659                  panic("sfmmu_memtte: can't set both NFO and EXEC bits");
2659 2660          }
2660 2661  }
2661 2662  
2662 2663  /*
2663 2664   * This function will add a translation to the hme_blk and allocate the
2664 2665   * hme_blk if one does not exist.
2665 2666   * If a page structure is specified then it will add the
2666 2667   * corresponding hment to the mapping list.
2667 2668   * It will also update the hmenum field for the tte.
2668 2669   *
2669 2670   * Currently this function is only used for kernel mappings.
2670 2671   * So pass invalid region to sfmmu_tteload_array().
2671 2672   */
2672 2673  void
2673 2674  sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp,
2674 2675          uint_t flags)
2675 2676  {
2676 2677          ASSERT(sfmmup == ksfmmup);
2677 2678          (void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags,
2678 2679              SFMMU_INVALID_SHMERID);
2679 2680  }
2680 2681  
2681 2682  /*
2682 2683   * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB.
2683 2684   * Assumes that a particular page size may only be resident in one TSB.
2684 2685   */
2685 2686  static void
2686 2687  sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz)
2687 2688  {
2688 2689          struct tsb_info *tsbinfop = NULL;
2689 2690          uint64_t tag;
2690 2691          struct tsbe *tsbe_addr;
2691 2692          uint64_t tsb_base;
2692 2693          uint_t tsb_size;
2693 2694          int vpshift = MMU_PAGESHIFT;
2694 2695          int phys = 0;
2695 2696  
2696 2697          if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */
2697 2698                  phys = ktsb_phys;
2698 2699                  if (ttesz >= TTE4M) {
2699 2700  #ifndef sun4v
2700 2701                          ASSERT((ttesz != TTE32M) && (ttesz != TTE256M));
2701 2702  #endif
2702 2703                          tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base;
2703 2704                          tsb_size = ktsb4m_szcode;
2704 2705                  } else {
2705 2706                          tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base;
2706 2707                          tsb_size = ktsb_szcode;
2707 2708                  }
2708 2709          } else {
2709 2710                  SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz);
2710 2711  
2711 2712                  /*
2712 2713                   * If there isn't a TSB for this page size, or the TSB is
2713 2714                   * swapped out, there is nothing to do.  Note that the latter
2714 2715                   * case seems impossible but can occur if hat_pageunload()
2715 2716                   * is called on an ISM mapping while the process is swapped
2716 2717                   * out.
2717 2718                   */
2718 2719                  if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED))
2719 2720                          return;
2720 2721  
2721 2722                  /*
2722 2723                   * If another thread is in the middle of relocating a TSB
2723 2724                   * we can't unload the entry so set a flag so that the
2724 2725                   * TSB will be flushed before it can be accessed by the
2725 2726                   * process.
2726 2727                   */
2727 2728                  if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
2728 2729                          if (ttep == NULL)
2729 2730                                  tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED;
2730 2731                          return;
2731 2732                  }
2732 2733  #if defined(UTSB_PHYS)
2733 2734                  phys = 1;
2734 2735                  tsb_base = (uint64_t)tsbinfop->tsb_pa;
2735 2736  #else
2736 2737                  tsb_base = (uint64_t)tsbinfop->tsb_va;
2737 2738  #endif
2738 2739                  tsb_size = tsbinfop->tsb_szc;
2739 2740          }
2740 2741          if (ttesz >= TTE4M)
2741 2742                  vpshift = MMU_PAGESHIFT4M;
2742 2743  
2743 2744          tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size);
2744 2745          tag = sfmmu_make_tsbtag(vaddr);
2745 2746  
2746 2747          if (ttep == NULL) {
2747 2748                  sfmmu_unload_tsbe(tsbe_addr, tag, phys);
2748 2749          } else {
2749 2750                  if (ttesz >= TTE4M) {
2750 2751                          SFMMU_STAT(sf_tsb_load4m);
2751 2752                  } else {
2752 2753                          SFMMU_STAT(sf_tsb_load8k);
2753 2754                  }
2754 2755  
2755 2756                  sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys);
2756 2757          }
2757 2758  }
2758 2759  
2759 2760  /*
2760 2761   * Unmap all entries from [start, end) matching the given page size.
2761 2762   *
2762 2763   * This function is used primarily to unmap replicated 64K or 512K entries
2763 2764   * from the TSB that are inserted using the base page size TSB pointer, but
2764 2765   * it may also be called to unmap a range of addresses from the TSB.
2765 2766   */
2766 2767  void
2767 2768  sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz)
2768 2769  {
2769 2770          struct tsb_info *tsbinfop;
2770 2771          uint64_t tag;
2771 2772          struct tsbe *tsbe_addr;
2772 2773          caddr_t vaddr;
2773 2774          uint64_t tsb_base;
2774 2775          int vpshift, vpgsz;
2775 2776          uint_t tsb_size;
2776 2777          int phys = 0;
2777 2778  
2778 2779          /*
2779 2780           * Assumptions:
2780 2781           *  If ttesz == 8K, 64K or 512K, we walk through the range 8K
2781 2782           *  at a time shooting down any valid entries we encounter.
2782 2783           *
2783 2784           *  If ttesz >= 4M we walk the range 4M at a time shooting
2784 2785           *  down any valid mappings we find.
2785 2786           */
2786 2787          if (sfmmup == ksfmmup) {
2787 2788                  phys = ktsb_phys;
2788 2789                  if (ttesz >= TTE4M) {
2789 2790  #ifndef sun4v
2790 2791                          ASSERT((ttesz != TTE32M) && (ttesz != TTE256M));
2791 2792  #endif
2792 2793                          tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base;
2793 2794                          tsb_size = ktsb4m_szcode;
2794 2795                  } else {
2795 2796                          tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base;
2796 2797                          tsb_size = ktsb_szcode;
2797 2798                  }
2798 2799          } else {
2799 2800                  SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz);
2800 2801  
2801 2802                  /*
2802 2803                   * If there isn't a TSB for this page size, or the TSB is
2803 2804                   * swapped out, there is nothing to do.  Note that the latter
2804 2805                   * case seems impossible but can occur if hat_pageunload()
2805 2806                   * is called on an ISM mapping while the process is swapped
2806 2807                   * out.
2807 2808                   */
2808 2809                  if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED))
2809 2810                          return;
2810 2811  
2811 2812                  /*
2812 2813                   * If another thread is in the middle of relocating a TSB
2813 2814                   * we can't unload the entry so set a flag so that the
2814 2815                   * TSB will be flushed before it can be accessed by the
2815 2816                   * process.
2816 2817                   */
2817 2818                  if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
2818 2819                          tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED;
2819 2820                          return;
2820 2821                  }
2821 2822  #if defined(UTSB_PHYS)
2822 2823                  phys = 1;
2823 2824                  tsb_base = (uint64_t)tsbinfop->tsb_pa;
2824 2825  #else
2825 2826                  tsb_base = (uint64_t)tsbinfop->tsb_va;
2826 2827  #endif
2827 2828                  tsb_size = tsbinfop->tsb_szc;
2828 2829          }
2829 2830          if (ttesz >= TTE4M) {
2830 2831                  vpshift = MMU_PAGESHIFT4M;
2831 2832                  vpgsz = MMU_PAGESIZE4M;
2832 2833          } else {
2833 2834                  vpshift = MMU_PAGESHIFT;
2834 2835                  vpgsz = MMU_PAGESIZE;
2835 2836          }
2836 2837  
2837 2838          for (vaddr = start; vaddr < end; vaddr += vpgsz) {
2838 2839                  tag = sfmmu_make_tsbtag(vaddr);
2839 2840                  tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size);
2840 2841                  sfmmu_unload_tsbe(tsbe_addr, tag, phys);
2841 2842          }
2842 2843  }
2843 2844  
2844 2845  /*
2845 2846   * Select the optimum TSB size given the number of mappings
2846 2847   * that need to be cached.
2847 2848   */
2848 2849  static int
2849 2850  sfmmu_select_tsb_szc(pgcnt_t pgcnt)
2850 2851  {
2851 2852          int szc = 0;
2852 2853  
2853 2854  #ifdef DEBUG
2854 2855          if (tsb_grow_stress) {
2855 2856                  uint32_t randval = (uint32_t)gettick() >> 4;
2856 2857                  return (randval % (tsb_max_growsize + 1));
2857 2858          }
2858 2859  #endif  /* DEBUG */
2859 2860  
2860 2861          while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc)))
2861 2862                  szc++;
2862 2863          return (szc);
2863 2864  }
2864 2865  
2865 2866  /*
2866 2867   * This function will add a translation to the hme_blk and allocate the
2867 2868   * hme_blk if one does not exist.
2868 2869   * If a page structure is specified then it will add the
2869 2870   * corresponding hment to the mapping list.
2870 2871   * It will also update the hmenum field for the tte.
2871 2872   * Furthermore, it attempts to create a large page translation
2872 2873   * for <addr,hat> at page array pps.  It assumes addr and first
2873 2874   * pp is correctly aligned.  It returns 0 if successful and 1 otherwise.
2874 2875   */
2875 2876  static int
2876 2877  sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr,
2877 2878          page_t **pps, uint_t flags, uint_t rid)
2878 2879  {
2879 2880          struct hmehash_bucket *hmebp;
2880 2881          struct hme_blk *hmeblkp;
2881 2882          int     ret;
2882 2883          uint_t  size;
2883 2884  
2884 2885          /*
2885 2886           * Get mapping size.
2886 2887           */
2887 2888          size = TTE_CSZ(ttep);
2888 2889          ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
2889 2890  
2890 2891          /*
2891 2892           * Acquire the hash bucket.
2892 2893           */
2893 2894          hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size, rid);
2894 2895          ASSERT(hmebp);
2895 2896  
2896 2897          /*
2897 2898           * Find the hment block.
2898 2899           */
2899 2900          hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags,
2900 2901              rid);
2901 2902          ASSERT(hmeblkp);
2902 2903  
2903 2904          /*
2904 2905           * Add the translation.
2905 2906           */
2906 2907          ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags,
2907 2908              rid);
2908 2909  
2909 2910          /*
2910 2911           * Release the hash bucket.
2911 2912           */
2912 2913          sfmmu_tteload_release_hashbucket(hmebp);
2913 2914  
2914 2915          return (ret);
2915 2916  }
2916 2917  
2917 2918  /*
2918 2919   * Function locks and returns a pointer to the hash bucket for vaddr and size.
2919 2920   */
2920 2921  static struct hmehash_bucket *
2921 2922  sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size,
2922 2923      uint_t rid)
2923 2924  {
2924 2925          struct hmehash_bucket *hmebp;
2925 2926          int hmeshift;
2926 2927          void *htagid = sfmmutohtagid(sfmmup, rid);
2927 2928  
2928 2929          ASSERT(htagid != NULL);
2929 2930  
2930 2931          hmeshift = HME_HASH_SHIFT(size);
2931 2932  
2932 2933          hmebp = HME_HASH_FUNCTION(htagid, vaddr, hmeshift);
2933 2934  
2934 2935          SFMMU_HASH_LOCK(hmebp);
2935 2936  
2936 2937          return (hmebp);
2937 2938  }
2938 2939  
2939 2940  /*
2940 2941   * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the
2941 2942   * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is
2942 2943   * allocated.
2943 2944   */
2944 2945  static struct hme_blk *
2945 2946  sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp,
2946 2947          caddr_t vaddr, uint_t size, uint_t flags, uint_t rid)
2947 2948  {
2948 2949          hmeblk_tag hblktag;
2949 2950          int hmeshift;
2950 2951          struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
2951 2952  
2952 2953          SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
2953 2954  
2954 2955          hblktag.htag_id = sfmmutohtagid(sfmmup, rid);
2955 2956          ASSERT(hblktag.htag_id != NULL);
2956 2957          hmeshift = HME_HASH_SHIFT(size);
2957 2958          hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
2958 2959          hblktag.htag_rehash = HME_HASH_REHASH(size);
2959 2960          hblktag.htag_rid = rid;
2960 2961  
2961 2962  ttearray_realloc:
2962 2963  
2963 2964          HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
2964 2965  
2965 2966          /*
2966 2967           * We block until hblk_reserve_lock is released; it's held by
2967 2968           * the thread, temporarily using hblk_reserve, until hblk_reserve is
2968 2969           * replaced by a hblk from sfmmu8_cache.
2969 2970           */
2970 2971          if (hmeblkp == (struct hme_blk *)hblk_reserve &&
2971 2972              hblk_reserve_thread != curthread) {
2972 2973                  SFMMU_HASH_UNLOCK(hmebp);
2973 2974                  mutex_enter(&hblk_reserve_lock);
2974 2975                  mutex_exit(&hblk_reserve_lock);
2975 2976                  SFMMU_STAT(sf_hblk_reserve_hit);
2976 2977                  SFMMU_HASH_LOCK(hmebp);
2977 2978                  goto ttearray_realloc;
2978 2979          }
2979 2980  
2980 2981          if (hmeblkp == NULL) {
2981 2982                  hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
2982 2983                      hblktag, flags, rid);
2983 2984                  ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
2984 2985                  ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
2985 2986          } else {
2986 2987                  /*
2987 2988                   * It is possible for 8k and 64k hblks to collide since they
2988 2989                   * have the same rehash value. This is because we
2989 2990                   * lazily free hblks and 8K/64K blks could be lingering.
2990 2991                   * If we find size mismatch we free the block and & try again.
2991 2992                   */
2992 2993                  if (get_hblk_ttesz(hmeblkp) != size) {
2993 2994                          ASSERT(!hmeblkp->hblk_vcnt);
2994 2995                          ASSERT(!hmeblkp->hblk_hmecnt);
2995 2996                          sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
2996 2997                              &list, 0);
2997 2998                          goto ttearray_realloc;
2998 2999                  }
2999 3000                  if (hmeblkp->hblk_shw_bit) {
3000 3001                          /*
3001 3002                           * if the hblk was previously used as a shadow hblk then
3002 3003                           * we will change it to a normal hblk
3003 3004                           */
3004 3005                          ASSERT(!hmeblkp->hblk_shared);
3005 3006                          if (hmeblkp->hblk_shw_mask) {
3006 3007                                  sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp);
3007 3008                                  ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3008 3009                                  goto ttearray_realloc;
3009 3010                          } else {
3010 3011                                  hmeblkp->hblk_shw_bit = 0;
3011 3012                          }
3012 3013                  }
3013 3014                  SFMMU_STAT(sf_hblk_hit);
3014 3015          }
3015 3016  
3016 3017          /*
3017 3018           * hat_memload() should never call kmem_cache_free() for kernel hmeblks;
3018 3019           * see block comment showing the stacktrace in sfmmu_hblk_alloc();
3019 3020           * set the flag parameter to 1 so that sfmmu_hblks_list_purge() will
3020 3021           * just add these hmeblks to the per-cpu pending queue.
3021 3022           */
3022 3023          sfmmu_hblks_list_purge(&list, 1);
3023 3024  
3024 3025          ASSERT(get_hblk_ttesz(hmeblkp) == size);
3025 3026          ASSERT(!hmeblkp->hblk_shw_bit);
3026 3027          ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3027 3028          ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3028 3029          ASSERT(hmeblkp->hblk_tag.htag_rid == rid);
3029 3030  
3030 3031          return (hmeblkp);
3031 3032  }
3032 3033  
3033 3034  /*
3034 3035   * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1
3035 3036   * otherwise.
3036 3037   */
3037 3038  static int
3038 3039  sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
3039 3040          caddr_t vaddr, page_t **pps, uint_t flags, uint_t rid)
3040 3041  {
3041 3042          page_t *pp = *pps;
3042 3043          int hmenum, size, remap;
3043 3044          tte_t tteold, flush_tte;
3044 3045  #ifdef DEBUG
3045 3046          tte_t orig_old;
3046 3047  #endif /* DEBUG */
3047 3048          struct sf_hment *sfhme;
3048 3049          kmutex_t *pml, *pmtx;
3049 3050          hatlock_t *hatlockp;
3050 3051          int myflt;
3051 3052  
3052 3053          /*
3053 3054           * remove this panic when we decide to let user virtual address
3054 3055           * space be >= USERLIMIT.
3055 3056           */
3056 3057          if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT)
3057 3058                  panic("user addr %p in kernel space", (void *)vaddr);
3058 3059  #if defined(TTE_IS_GLOBAL)
3059 3060          if (TTE_IS_GLOBAL(ttep))
3060 3061                  panic("sfmmu_tteload: creating global tte");
3061 3062  #endif
3062 3063  
3063 3064  #ifdef DEBUG
3064 3065          if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) &&
3065 3066              !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans)
3066 3067                  panic("sfmmu_tteload: non cacheable memory tte");
3067 3068  #endif /* DEBUG */
3068 3069  
3069 3070          /* don't simulate dirty bit for writeable ISM/DISM mappings */
3070 3071          if ((flags & HAT_LOAD_SHARE) && TTE_IS_WRITABLE(ttep)) {
3071 3072                  TTE_SET_REF(ttep);
3072 3073                  TTE_SET_MOD(ttep);
3073 3074          }
3074 3075  
3075 3076          if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) ||
3076 3077              !TTE_IS_MOD(ttep)) {
3077 3078                  /*
3078 3079                   * Don't load TSB for dummy as in ISM.  Also don't preload
3079 3080                   * the TSB if the TTE isn't writable since we're likely to
3080 3081                   * fault on it again -- preloading can be fairly expensive.
3081 3082                   */
3082 3083                  flags |= SFMMU_NO_TSBLOAD;
3083 3084          }
3084 3085  
3085 3086          size = TTE_CSZ(ttep);
3086 3087          switch (size) {
3087 3088          case TTE8K:
3088 3089                  SFMMU_STAT(sf_tteload8k);
3089 3090                  break;
3090 3091          case TTE64K:
3091 3092                  SFMMU_STAT(sf_tteload64k);
3092 3093                  break;
3093 3094          case TTE512K:
3094 3095                  SFMMU_STAT(sf_tteload512k);
3095 3096                  break;
3096 3097          case TTE4M:
3097 3098                  SFMMU_STAT(sf_tteload4m);
3098 3099                  break;
3099 3100          case (TTE32M):
3100 3101                  SFMMU_STAT(sf_tteload32m);
3101 3102                  ASSERT(mmu_page_sizes == max_mmu_page_sizes);
3102 3103                  break;
3103 3104          case (TTE256M):
3104 3105                  SFMMU_STAT(sf_tteload256m);
3105 3106                  ASSERT(mmu_page_sizes == max_mmu_page_sizes);
3106 3107                  break;
3107 3108          }
3108 3109  
3109 3110          ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
3110 3111          SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
3111 3112          ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3112 3113          ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3113 3114  
3114 3115          HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum);
3115 3116  
3116 3117          /*
3117 3118           * Need to grab mlist lock here so that pageunload
3118 3119           * will not change tte behind us.
3119 3120           */
3120 3121          if (pp) {
3121 3122                  pml = sfmmu_mlist_enter(pp);
3122 3123          }
3123 3124  
3124 3125          sfmmu_copytte(&sfhme->hme_tte, &tteold);
3125 3126          /*
3126 3127           * Look for corresponding hment and if valid verify
3127 3128           * pfns are equal.
3128 3129           */
3129 3130          remap = TTE_IS_VALID(&tteold);
3130 3131          if (remap) {
3131 3132                  pfn_t   new_pfn, old_pfn;
3132 3133  
3133 3134                  old_pfn = TTE_TO_PFN(vaddr, &tteold);
3134 3135                  new_pfn = TTE_TO_PFN(vaddr, ttep);
3135 3136  
3136 3137                  if (flags & HAT_LOAD_REMAP) {
3137 3138                          /* make sure we are remapping same type of pages */
3138 3139                          if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) {
3139 3140                                  panic("sfmmu_tteload - tte remap io<->memory");
3140 3141                          }
3141 3142                          if (old_pfn != new_pfn &&
3142 3143                              (pp != NULL || sfhme->hme_page != NULL)) {
3143 3144                                  panic("sfmmu_tteload - tte remap pp != NULL");
3144 3145                          }
3145 3146                  } else if (old_pfn != new_pfn) {
3146 3147                          panic("sfmmu_tteload - tte remap, hmeblkp 0x%p",
3147 3148                              (void *)hmeblkp);
3148 3149                  }
3149 3150                  ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep));
3150 3151          }
3151 3152  
3152 3153          if (pp) {
3153 3154                  if (size == TTE8K) {
3154 3155  #ifdef VAC
3155 3156                          /*
3156 3157                           * Handle VAC consistency
3157 3158                           */
3158 3159                          if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) {
3159 3160                                  sfmmu_vac_conflict(sfmmup, vaddr, pp);
3160 3161                          }
3161 3162  #endif
3162 3163  
3163 3164                          if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) {
3164 3165                                  pmtx = sfmmu_page_enter(pp);
3165 3166                                  PP_CLRRO(pp);
3166 3167                                  sfmmu_page_exit(pmtx);
3167 3168                          } else if (!PP_ISMAPPED(pp) &&
3168 3169                              (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) {
3169 3170                                  pmtx = sfmmu_page_enter(pp);
3170 3171                                  if (!(PP_ISMOD(pp))) {
3171 3172                                          PP_SETRO(pp);
3172 3173                                  }
3173 3174                                  sfmmu_page_exit(pmtx);
3174 3175                          }
3175 3176  
3176 3177                  } else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) {
3177 3178                          /*
3178 3179                           * sfmmu_pagearray_setup failed so return
3179 3180                           */
3180 3181                          sfmmu_mlist_exit(pml);
3181 3182                          return (1);
3182 3183                  }
3183 3184          }
3184 3185  
3185 3186          /*
3186 3187           * Make sure hment is not on a mapping list.
3187 3188           */
3188 3189          ASSERT(remap || (sfhme->hme_page == NULL));
3189 3190  
3190 3191          /* if it is not a remap then hme->next better be NULL */
3191 3192          ASSERT((!remap) ? sfhme->hme_next == NULL : 1);
3192 3193  
3193 3194          if (flags & HAT_LOAD_LOCK) {
3194 3195                  if ((hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) {
3195 3196                          panic("too high lckcnt-hmeblk %p",
3196 3197                              (void *)hmeblkp);
3197 3198                  }
3198 3199                  atomic_inc_32(&hmeblkp->hblk_lckcnt);
3199 3200  
3200 3201                  HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK);
3201 3202          }
3202 3203  
3203 3204  #ifdef VAC
3204 3205          if (pp && PP_ISNC(pp)) {
3205 3206                  /*
3206 3207                   * If the physical page is marked to be uncacheable, like
3207 3208                   * by a vac conflict, make sure the new mapping is also
3208 3209                   * uncacheable.
3209 3210                   */
3210 3211                  TTE_CLR_VCACHEABLE(ttep);
3211 3212                  ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR);
3212 3213          }
3213 3214  #endif
3214 3215          ttep->tte_hmenum = hmenum;
3215 3216  
3216 3217  #ifdef DEBUG
3217 3218          orig_old = tteold;
3218 3219  #endif /* DEBUG */
3219 3220  
3220 3221          while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) {
3221 3222                  if ((sfmmup == KHATID) &&
3222 3223                      (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) {
3223 3224                          sfmmu_copytte(&sfhme->hme_tte, &tteold);
3224 3225                  }
3225 3226  #ifdef DEBUG
3226 3227                  chk_tte(&orig_old, &tteold, ttep, hmeblkp);
3227 3228  #endif /* DEBUG */
3228 3229          }
3229 3230          ASSERT(TTE_IS_VALID(&sfhme->hme_tte));
3230 3231  
3231 3232          if (!TTE_IS_VALID(&tteold)) {
3232 3233  
3233 3234                  atomic_inc_16(&hmeblkp->hblk_vcnt);
3234 3235                  if (rid == SFMMU_INVALID_SHMERID) {
3235 3236                          atomic_inc_ulong(&sfmmup->sfmmu_ttecnt[size]);
3236 3237                  } else {
3237 3238                          sf_srd_t *srdp = sfmmup->sfmmu_srdp;
3238 3239                          sf_region_t *rgnp = srdp->srd_hmergnp[rid];
3239 3240                          /*
3240 3241                           * We already accounted for region ttecnt's in sfmmu
3241 3242                           * during hat_join_region() processing. Here we
3242 3243                           * only update ttecnt's in region struture.
3243 3244                           */
3244 3245                          atomic_inc_ulong(&rgnp->rgn_ttecnt[size]);
3245 3246                  }
3246 3247          }
3247 3248  
3248 3249          myflt = (astosfmmu(curthread->t_procp->p_as) == sfmmup);
3249 3250          if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 &&
3250 3251              sfmmup != ksfmmup) {
3251 3252                  uchar_t tteflag = 1 << size;
3252 3253                  if (rid == SFMMU_INVALID_SHMERID) {
3253 3254                          if (!(sfmmup->sfmmu_tteflags & tteflag)) {
3254 3255                                  hatlockp = sfmmu_hat_enter(sfmmup);
3255 3256                                  sfmmup->sfmmu_tteflags |= tteflag;
3256 3257                                  sfmmu_hat_exit(hatlockp);
3257 3258                          }
3258 3259                  } else if (!(sfmmup->sfmmu_rtteflags & tteflag)) {
3259 3260                          hatlockp = sfmmu_hat_enter(sfmmup);
3260 3261                          sfmmup->sfmmu_rtteflags |= tteflag;
3261 3262                          sfmmu_hat_exit(hatlockp);
3262 3263                  }
3263 3264                  /*
3264 3265                   * Update the current CPU tsbmiss area, so the current thread
3265 3266                   * won't need to take the tsbmiss for the new pagesize.
3266 3267                   * The other threads in the process will update their tsb
3267 3268                   * miss area lazily in sfmmu_tsbmiss_exception() when they
3268 3269                   * fail to find the translation for a newly added pagesize.
3269 3270                   */
3270 3271                  if (size > TTE64K && myflt) {
3271 3272                          struct tsbmiss *tsbmp;
3272 3273                          kpreempt_disable();
3273 3274                          tsbmp = &tsbmiss_area[CPU->cpu_id];
3274 3275                          if (rid == SFMMU_INVALID_SHMERID) {
3275 3276                                  if (!(tsbmp->uhat_tteflags & tteflag)) {
3276 3277                                          tsbmp->uhat_tteflags |= tteflag;
3277 3278                                  }
3278 3279                          } else {
3279 3280                                  if (!(tsbmp->uhat_rtteflags & tteflag)) {
3280 3281                                          tsbmp->uhat_rtteflags |= tteflag;
3281 3282                                  }
3282 3283                          }
3283 3284                          kpreempt_enable();
3284 3285                  }
3285 3286          }
3286 3287  
3287 3288          if (size >= TTE4M && (flags & HAT_LOAD_TEXT) &&
3288 3289              !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) {
3289 3290                  hatlockp = sfmmu_hat_enter(sfmmup);
3290 3291                  SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG);
3291 3292                  sfmmu_hat_exit(hatlockp);
3292 3293          }
3293 3294  
3294 3295          flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) &
3295 3296              hw_tte.tte_intlo;
3296 3297          flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) &
3297 3298              hw_tte.tte_inthi;
3298 3299  
3299 3300          if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) {
3300 3301                  /*
3301 3302                   * If remap and new tte differs from old tte we need
3302 3303                   * to sync the mod bit and flush TLB/TSB.  We don't
3303 3304                   * need to sync ref bit because we currently always set
3304 3305                   * ref bit in tteload.
3305 3306                   */
3306 3307                  ASSERT(TTE_IS_REF(ttep));
3307 3308                  if (TTE_IS_MOD(&tteold)) {
3308 3309                          sfmmu_ttesync(sfmmup, vaddr, &tteold, pp);
3309 3310                  }
3310 3311                  /*
3311 3312                   * hwtte bits shouldn't change for SRD hmeblks as long as SRD
3312 3313                   * hmes are only used for read only text. Adding this code for
3313 3314                   * completeness and future use of shared hmeblks with writable
3314 3315                   * mappings of VMODSORT vnodes.
3315 3316                   */
3316 3317                  if (hmeblkp->hblk_shared) {
3317 3318                          cpuset_t cpuset = sfmmu_rgntlb_demap(vaddr,
3318 3319                              sfmmup->sfmmu_srdp->srd_hmergnp[rid], hmeblkp, 1);
3319 3320                          xt_sync(cpuset);
3320 3321                          SFMMU_STAT_ADD(sf_region_remap_demap, 1);
3321 3322                  } else {
3322 3323                          sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0);
3323 3324                          xt_sync(sfmmup->sfmmu_cpusran);
3324 3325                  }
3325 3326          }
3326 3327  
3327 3328          if ((flags & SFMMU_NO_TSBLOAD) == 0) {
3328 3329                  /*
3329 3330                   * We only preload 8K and 4M mappings into the TSB, since
3330 3331                   * 64K and 512K mappings are replicated and hence don't
3331 3332                   * have a single, unique TSB entry. Ditto for 32M/256M.
3332 3333                   */
3333 3334                  if (size == TTE8K || size == TTE4M) {
3334 3335                          sf_scd_t *scdp;
3335 3336                          hatlockp = sfmmu_hat_enter(sfmmup);
3336 3337                          /*
3337 3338                           * Don't preload private TSB if the mapping is used
3338 3339                           * by the shctx in the SCD.
3339 3340                           */
3340 3341                          scdp = sfmmup->sfmmu_scdp;
3341 3342                          if (rid == SFMMU_INVALID_SHMERID || scdp == NULL ||
3342 3343                              !SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
3343 3344                                  sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte,
3344 3345                                      size);
3345 3346                          }
3346 3347                          sfmmu_hat_exit(hatlockp);
3347 3348                  }
3348 3349          }
3349 3350          if (pp) {
3350 3351                  if (!remap) {
3351 3352                          HME_ADD(sfhme, pp);
3352 3353                          atomic_inc_16(&hmeblkp->hblk_hmecnt);
3353 3354                          ASSERT(hmeblkp->hblk_hmecnt > 0);
3354 3355  
3355 3356                          /*
3356 3357                           * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
3357 3358                           * see pageunload() for comment.
3358 3359                           */
3359 3360                  }
3360 3361                  sfmmu_mlist_exit(pml);
3361 3362          }
3362 3363  
3363 3364          return (0);
3364 3365  }
3365 3366  /*
3366 3367   * Function unlocks hash bucket.
3367 3368   */
3368 3369  static void
3369 3370  sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp)
3370 3371  {
3371 3372          ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3372 3373          SFMMU_HASH_UNLOCK(hmebp);
3373 3374  }
3374 3375  
3375 3376  /*
3376 3377   * function which checks and sets up page array for a large
3377 3378   * translation.  Will set p_vcolor, p_index, p_ro fields.
3378 3379   * Assumes addr and pfnum of first page are properly aligned.
3379 3380   * Will check for physical contiguity. If check fails it return
3380 3381   * non null.
3381 3382   */
3382 3383  static int
3383 3384  sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap)
3384 3385  {
3385 3386          int     i, index, ttesz;
3386 3387          pfn_t   pfnum;
3387 3388          pgcnt_t npgs;
3388 3389          page_t *pp, *pp1;
3389 3390          kmutex_t *pmtx;
3390 3391  #ifdef VAC
3391 3392          int osz;
3392 3393          int cflags = 0;
3393 3394          int vac_err = 0;
3394 3395  #endif
3395 3396          int newidx = 0;
3396 3397  
3397 3398          ttesz = TTE_CSZ(ttep);
3398 3399  
3399 3400          ASSERT(ttesz > TTE8K);
3400 3401  
3401 3402          npgs = TTEPAGES(ttesz);
3402 3403          index = PAGESZ_TO_INDEX(ttesz);
3403 3404  
3404 3405          pfnum = (*pps)->p_pagenum;
3405 3406          ASSERT(IS_P2ALIGNED(pfnum, npgs));
3406 3407  
3407 3408          /*
3408 3409           * Save the first pp so we can do HAT_TMPNC at the end.
3409 3410           */
3410 3411          pp1 = *pps;
3411 3412  #ifdef VAC
3412 3413          osz = fnd_mapping_sz(pp1);
3413 3414  #endif
3414 3415  
3415 3416          for (i = 0; i < npgs; i++, pps++) {
3416 3417                  pp = *pps;
3417 3418                  ASSERT(PAGE_LOCKED(pp));
3418 3419                  ASSERT(pp->p_szc >= ttesz);
3419 3420                  ASSERT(pp->p_szc == pp1->p_szc);
3420 3421                  ASSERT(sfmmu_mlist_held(pp));
3421 3422  
3422 3423                  /*
3423 3424                   * XXX is it possible to maintain P_RO on the root only?
3424 3425                   */
3425 3426                  if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) {
3426 3427                          pmtx = sfmmu_page_enter(pp);
3427 3428                          PP_CLRRO(pp);
3428 3429                          sfmmu_page_exit(pmtx);
3429 3430                  } else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) &&
3430 3431                      !PP_ISMOD(pp)) {
3431 3432                          pmtx = sfmmu_page_enter(pp);
3432 3433                          if (!(PP_ISMOD(pp))) {
3433 3434                                  PP_SETRO(pp);
3434 3435                          }
3435 3436                          sfmmu_page_exit(pmtx);
3436 3437                  }
3437 3438  
3438 3439                  /*
3439 3440                   * If this is a remap we skip vac & contiguity checks.
3440 3441                   */
3441 3442                  if (remap)
3442 3443                          continue;
3443 3444  
3444 3445                  /*
3445 3446                   * set p_vcolor and detect any vac conflicts.
3446 3447                   */
3447 3448  #ifdef VAC
3448 3449                  if (vac_err == 0) {
3449 3450                          vac_err = sfmmu_vacconflict_array(addr, pp, &cflags);
3450 3451  
3451 3452                  }
3452 3453  #endif
3453 3454  
3454 3455                  /*
3455 3456                   * Save current index in case we need to undo it.
3456 3457                   * Note: "PAGESZ_TO_INDEX(sz)   (1 << (sz))"
3457 3458                   *      "SFMMU_INDEX_SHIFT      6"
3458 3459                   *       "SFMMU_INDEX_MASK      ((1 << SFMMU_INDEX_SHIFT) - 1)"
3459 3460                   *       "PP_MAPINDEX(p_index)  (p_index & SFMMU_INDEX_MASK)"
3460 3461                   *
3461 3462                   * So:  index = PAGESZ_TO_INDEX(ttesz);
3462 3463                   *      if ttesz == 1 then index = 0x2
3463 3464                   *                  2 then index = 0x4
3464 3465                   *                  3 then index = 0x8
3465 3466                   *                  4 then index = 0x10
3466 3467                   *                  5 then index = 0x20
3467 3468                   * The code below checks if it's a new pagesize (ie, newidx)
3468 3469                   * in case we need to take it back out of p_index,
3469 3470                   * and then or's the new index into the existing index.
3470 3471                   */
3471 3472                  if ((PP_MAPINDEX(pp) & index) == 0)
3472 3473                          newidx = 1;
3473 3474                  pp->p_index = (PP_MAPINDEX(pp) | index);
3474 3475  
3475 3476                  /*
3476 3477                   * contiguity check
3477 3478                   */
3478 3479                  if (pp->p_pagenum != pfnum) {
3479 3480                          /*
3480 3481                           * If we fail the contiguity test then
3481 3482                           * the only thing we need to fix is the p_index field.
3482 3483                           * We might get a few extra flushes but since this
3483 3484                           * path is rare that is ok.  The p_ro field will
3484 3485                           * get automatically fixed on the next tteload to
3485 3486                           * the page.  NO TNC bit is set yet.
3486 3487                           */
3487 3488                          while (i >= 0) {
3488 3489                                  pp = *pps;
3489 3490                                  if (newidx)
3490 3491                                          pp->p_index = (PP_MAPINDEX(pp) &
3491 3492                                              ~index);
3492 3493                                  pps--;
3493 3494                                  i--;
3494 3495                          }
3495 3496                          return (1);
3496 3497                  }
3497 3498                  pfnum++;
3498 3499                  addr += MMU_PAGESIZE;
3499 3500          }
3500 3501  
3501 3502  #ifdef VAC
3502 3503          if (vac_err) {
3503 3504                  if (ttesz > osz) {
3504 3505                          /*
3505 3506                           * There are some smaller mappings that causes vac
3506 3507                           * conflicts. Convert all existing small mappings to
3507 3508                           * TNC.
3508 3509                           */
3509 3510                          SFMMU_STAT_ADD(sf_uncache_conflict, npgs);
3510 3511                          sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH,
3511 3512                              npgs);
3512 3513                  } else {
3513 3514                          /* EMPTY */
3514 3515                          /*
3515 3516                           * If there exists an big page mapping,
3516 3517                           * that means the whole existing big page
3517 3518                           * has TNC setting already. No need to covert to
3518 3519                           * TNC again.
3519 3520                           */
3520 3521                          ASSERT(PP_ISTNC(pp1));
3521 3522                  }
3522 3523          }
3523 3524  #endif  /* VAC */
3524 3525  
3525 3526          return (0);
3526 3527  }
3527 3528  
3528 3529  #ifdef VAC
3529 3530  /*
3530 3531   * Routine that detects vac consistency for a large page. It also
3531 3532   * sets virtual color for all pp's for this big mapping.
3532 3533   */
3533 3534  static int
3534 3535  sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags)
3535 3536  {
3536 3537          int vcolor, ocolor;
3537 3538  
3538 3539          ASSERT(sfmmu_mlist_held(pp));
3539 3540  
3540 3541          if (PP_ISNC(pp)) {
3541 3542                  return (HAT_TMPNC);
3542 3543          }
3543 3544  
3544 3545          vcolor = addr_to_vcolor(addr);
3545 3546          if (PP_NEWPAGE(pp)) {
3546 3547                  PP_SET_VCOLOR(pp, vcolor);
3547 3548                  return (0);
3548 3549          }
3549 3550  
3550 3551          ocolor = PP_GET_VCOLOR(pp);
3551 3552          if (ocolor == vcolor) {
3552 3553                  return (0);
3553 3554          }
3554 3555  
3555 3556          if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) {
3556 3557                  /*
3557 3558                   * Previous user of page had a differnet color
3558 3559                   * but since there are no current users
3559 3560                   * we just flush the cache and change the color.
3560 3561                   * As an optimization for large pages we flush the
3561 3562                   * entire cache of that color and set a flag.
3562 3563                   */
3563 3564                  SFMMU_STAT(sf_pgcolor_conflict);
3564 3565                  if (!CacheColor_IsFlushed(*cflags, ocolor)) {
3565 3566                          CacheColor_SetFlushed(*cflags, ocolor);
3566 3567                          sfmmu_cache_flushcolor(ocolor, pp->p_pagenum);
3567 3568                  }
3568 3569                  PP_SET_VCOLOR(pp, vcolor);
3569 3570                  return (0);
3570 3571          }
3571 3572  
3572 3573          /*
3573 3574           * We got a real conflict with a current mapping.
3574 3575           * set flags to start unencaching all mappings
3575 3576           * and return failure so we restart looping
3576 3577           * the pp array from the beginning.
3577 3578           */
3578 3579          return (HAT_TMPNC);
3579 3580  }
3580 3581  #endif  /* VAC */
3581 3582  
3582 3583  /*
3583 3584   * creates a large page shadow hmeblk for a tte.
3584 3585   * The purpose of this routine is to allow us to do quick unloads because
3585 3586   * the vm layer can easily pass a very large but sparsely populated range.
3586 3587   */
3587 3588  static struct hme_blk *
3588 3589  sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags)
3589 3590  {
3590 3591          struct hmehash_bucket *hmebp;
3591 3592          hmeblk_tag hblktag;
3592 3593          int hmeshift, size, vshift;
3593 3594          uint_t shw_mask, newshw_mask;
3594 3595          struct hme_blk *hmeblkp;
3595 3596  
3596 3597          ASSERT(sfmmup != KHATID);
3597 3598          if (mmu_page_sizes == max_mmu_page_sizes) {
3598 3599                  ASSERT(ttesz < TTE256M);
3599 3600          } else {
3600 3601                  ASSERT(ttesz < TTE4M);
3601 3602                  ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
3602 3603                  ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
3603 3604          }
3604 3605  
3605 3606          if (ttesz == TTE8K) {
3606 3607                  size = TTE512K;
3607 3608          } else {
3608 3609                  size = ++ttesz;
3609 3610          }
3610 3611  
3611 3612          hblktag.htag_id = sfmmup;
3612 3613          hmeshift = HME_HASH_SHIFT(size);
3613 3614          hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
3614 3615          hblktag.htag_rehash = HME_HASH_REHASH(size);
3615 3616          hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3616 3617          hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
3617 3618  
3618 3619          SFMMU_HASH_LOCK(hmebp);
3619 3620  
3620 3621          HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
3621 3622          ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
3622 3623          if (hmeblkp == NULL) {
3623 3624                  hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
3624 3625                      hblktag, flags, SFMMU_INVALID_SHMERID);
3625 3626          }
3626 3627          ASSERT(hmeblkp);
3627 3628          if (!hmeblkp->hblk_shw_mask) {
3628 3629                  /*
3629 3630                   * if this is a unused hblk it was just allocated or could
3630 3631                   * potentially be a previous large page hblk so we need to
3631 3632                   * set the shadow bit.
3632 3633                   */
3633 3634                  ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3634 3635                  hmeblkp->hblk_shw_bit = 1;
3635 3636          } else if (hmeblkp->hblk_shw_bit == 0) {
3636 3637                  panic("sfmmu_shadow_hcreate: shw bit not set in hmeblkp 0x%p",
3637 3638                      (void *)hmeblkp);
3638 3639          }
3639 3640          ASSERT(hmeblkp->hblk_shw_bit == 1);
3640 3641          ASSERT(!hmeblkp->hblk_shared);
3641 3642          vshift = vaddr_to_vshift(hblktag, vaddr, size);
3642 3643          ASSERT(vshift < 8);
3643 3644          /*
3644 3645           * Atomically set shw mask bit
3645 3646           */
3646 3647          do {
3647 3648                  shw_mask = hmeblkp->hblk_shw_mask;
3648 3649                  newshw_mask = shw_mask | (1 << vshift);
3649 3650                  newshw_mask = atomic_cas_32(&hmeblkp->hblk_shw_mask, shw_mask,
3650 3651                      newshw_mask);
3651 3652          } while (newshw_mask != shw_mask);
3652 3653  
3653 3654          SFMMU_HASH_UNLOCK(hmebp);
3654 3655  
3655 3656          return (hmeblkp);
3656 3657  }
3657 3658  
3658 3659  /*
3659 3660   * This routine cleanup a previous shadow hmeblk and changes it to
3660 3661   * a regular hblk.  This happens rarely but it is possible
3661 3662   * when a process wants to use large pages and there are hblks still
3662 3663   * lying around from the previous as that used these hmeblks.
3663 3664   * The alternative was to cleanup the shadow hblks at unload time
3664 3665   * but since so few user processes actually use large pages, it is
3665 3666   * better to be lazy and cleanup at this time.
3666 3667   */
3667 3668  static void
3668 3669  sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
3669 3670          struct hmehash_bucket *hmebp)
3670 3671  {
3671 3672          caddr_t addr, endaddr;
3672 3673          int hashno, size;
3673 3674  
3674 3675          ASSERT(hmeblkp->hblk_shw_bit);
3675 3676          ASSERT(!hmeblkp->hblk_shared);
3676 3677  
3677 3678          ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3678 3679  
3679 3680          if (!hmeblkp->hblk_shw_mask) {
3680 3681                  hmeblkp->hblk_shw_bit = 0;
3681 3682                  return;
3682 3683          }
3683 3684          addr = (caddr_t)get_hblk_base(hmeblkp);
3684 3685          endaddr = get_hblk_endaddr(hmeblkp);
3685 3686          size = get_hblk_ttesz(hmeblkp);
3686 3687          hashno = size - 1;
3687 3688          ASSERT(hashno > 0);
3688 3689          SFMMU_HASH_UNLOCK(hmebp);
3689 3690  
3690 3691          sfmmu_free_hblks(sfmmup, addr, endaddr, hashno);
3691 3692  
3692 3693          SFMMU_HASH_LOCK(hmebp);
3693 3694  }
3694 3695  
3695 3696  static void
3696 3697  sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr,
3697 3698          int hashno)
3698 3699  {
3699 3700          int hmeshift, shadow = 0;
3700 3701          hmeblk_tag hblktag;
3701 3702          struct hmehash_bucket *hmebp;
3702 3703          struct hme_blk *hmeblkp;
3703 3704          struct hme_blk *nx_hblk, *pr_hblk, *list = NULL;
3704 3705  
3705 3706          ASSERT(hashno > 0);
3706 3707          hblktag.htag_id = sfmmup;
3707 3708          hblktag.htag_rehash = hashno;
3708 3709          hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3709 3710  
3710 3711          hmeshift = HME_HASH_SHIFT(hashno);
3711 3712  
3712 3713          while (addr < endaddr) {
3713 3714                  hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3714 3715                  hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
3715 3716                  SFMMU_HASH_LOCK(hmebp);
3716 3717                  /* inline HME_HASH_SEARCH */
3717 3718                  hmeblkp = hmebp->hmeblkp;
3718 3719                  pr_hblk = NULL;
3719 3720                  while (hmeblkp) {
3720 3721                          if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) {
3721 3722                                  /* found hme_blk */
3722 3723                                  ASSERT(!hmeblkp->hblk_shared);
3723 3724                                  if (hmeblkp->hblk_shw_bit) {
3724 3725                                          if (hmeblkp->hblk_shw_mask) {
3725 3726                                                  shadow = 1;
3726 3727                                                  sfmmu_shadow_hcleanup(sfmmup,
3727 3728                                                      hmeblkp, hmebp);
3728 3729                                                  break;
3729 3730                                          } else {
3730 3731                                                  hmeblkp->hblk_shw_bit = 0;
3731 3732                                          }
3732 3733                                  }
3733 3734  
3734 3735                                  /*
3735 3736                                   * Hblk_hmecnt and hblk_vcnt could be non zero
3736 3737                                   * since hblk_unload() does not gurantee that.
3737 3738                                   *
3738 3739                                   * XXX - this could cause tteload() to spin
3739 3740                                   * where sfmmu_shadow_hcleanup() is called.
3740 3741                                   */
3741 3742                          }
3742 3743  
3743 3744                          nx_hblk = hmeblkp->hblk_next;
3744 3745                          if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
3745 3746                                  sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3746 3747                                      &list, 0);
3747 3748                          } else {
3748 3749                                  pr_hblk = hmeblkp;
3749 3750                          }
3750 3751                          hmeblkp = nx_hblk;
3751 3752                  }
3752 3753  
3753 3754                  SFMMU_HASH_UNLOCK(hmebp);
3754 3755  
3755 3756                  if (shadow) {
3756 3757                          /*
3757 3758                           * We found another shadow hblk so cleaned its
3758 3759                           * children.  We need to go back and cleanup
3759 3760                           * the original hblk so we don't change the
3760 3761                           * addr.
3761 3762                           */
3762 3763                          shadow = 0;
3763 3764                  } else {
3764 3765                          addr = (caddr_t)roundup((uintptr_t)addr + 1,
3765 3766                              (1 << hmeshift));
3766 3767                  }
3767 3768          }
3768 3769          sfmmu_hblks_list_purge(&list, 0);
3769 3770  }
3770 3771  
3771 3772  /*
3772 3773   * This routine's job is to delete stale invalid shared hmeregions hmeblks that
3773 3774   * may still linger on after pageunload.
3774 3775   */
3775 3776  static void
3776 3777  sfmmu_cleanup_rhblk(sf_srd_t *srdp, caddr_t addr, uint_t rid, int ttesz)
3777 3778  {
3778 3779          int hmeshift;
3779 3780          hmeblk_tag hblktag;
3780 3781          struct hmehash_bucket *hmebp;
3781 3782          struct hme_blk *hmeblkp;
3782 3783          struct hme_blk *pr_hblk;
3783 3784          struct hme_blk *list = NULL;
3784 3785  
3785 3786          ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3786 3787          ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3787 3788  
3788 3789          hmeshift = HME_HASH_SHIFT(ttesz);
3789 3790          hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3790 3791          hblktag.htag_rehash = ttesz;
3791 3792          hblktag.htag_rid = rid;
3792 3793          hblktag.htag_id = srdp;
3793 3794          hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift);
3794 3795  
3795 3796          SFMMU_HASH_LOCK(hmebp);
3796 3797          HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3797 3798          if (hmeblkp != NULL) {
3798 3799                  ASSERT(hmeblkp->hblk_shared);
3799 3800                  ASSERT(!hmeblkp->hblk_shw_bit);
3800 3801                  if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
3801 3802                          panic("sfmmu_cleanup_rhblk: valid hmeblk");
3802 3803                  }
3803 3804                  ASSERT(!hmeblkp->hblk_lckcnt);
3804 3805                  sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3805 3806                      &list, 0);
3806 3807          }
3807 3808          SFMMU_HASH_UNLOCK(hmebp);
3808 3809          sfmmu_hblks_list_purge(&list, 0);
3809 3810  }
3810 3811  
3811 3812  /* ARGSUSED */
3812 3813  static void
3813 3814  sfmmu_rgn_cb_noop(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr,
3814 3815      size_t r_size, void *r_obj, u_offset_t r_objoff)
3815 3816  {
3816 3817  }
3817 3818  
3818 3819  /*
3819 3820   * Searches for an hmeblk which maps addr, then unloads this mapping
3820 3821   * and updates *eaddrp, if the hmeblk is found.
3821 3822   */
3822 3823  static void
3823 3824  sfmmu_unload_hmeregion_va(sf_srd_t *srdp, uint_t rid, caddr_t addr,
3824 3825      caddr_t eaddr, int ttesz, caddr_t *eaddrp)
3825 3826  {
3826 3827          int hmeshift;
3827 3828          hmeblk_tag hblktag;
3828 3829          struct hmehash_bucket *hmebp;
3829 3830          struct hme_blk *hmeblkp;
3830 3831          struct hme_blk *pr_hblk;
3831 3832          struct hme_blk *list = NULL;
3832 3833  
3833 3834          ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3834 3835          ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3835 3836          ASSERT(ttesz >= HBLK_MIN_TTESZ);
3836 3837  
3837 3838          hmeshift = HME_HASH_SHIFT(ttesz);
3838 3839          hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3839 3840          hblktag.htag_rehash = ttesz;
3840 3841          hblktag.htag_rid = rid;
3841 3842          hblktag.htag_id = srdp;
3842 3843          hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift);
3843 3844  
3844 3845          SFMMU_HASH_LOCK(hmebp);
3845 3846          HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3846 3847          if (hmeblkp != NULL) {
3847 3848                  ASSERT(hmeblkp->hblk_shared);
3848 3849                  ASSERT(!hmeblkp->hblk_lckcnt);
3849 3850                  if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
3850 3851                          *eaddrp = sfmmu_hblk_unload(NULL, hmeblkp, addr,
3851 3852                              eaddr, NULL, HAT_UNLOAD);
3852 3853                          ASSERT(*eaddrp > addr);
3853 3854                  }
3854 3855                  ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3855 3856                  sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3856 3857                      &list, 0);
3857 3858          }
3858 3859          SFMMU_HASH_UNLOCK(hmebp);
3859 3860          sfmmu_hblks_list_purge(&list, 0);
3860 3861  }
3861 3862  
3862 3863  static void
3863 3864  sfmmu_unload_hmeregion(sf_srd_t *srdp, sf_region_t *rgnp)
3864 3865  {
3865 3866          int ttesz = rgnp->rgn_pgszc;
3866 3867          size_t rsz = rgnp->rgn_size;
3867 3868          caddr_t rsaddr = rgnp->rgn_saddr;
3868 3869          caddr_t readdr = rsaddr + rsz;
3869 3870          caddr_t rhsaddr;
3870 3871          caddr_t va;
3871 3872          uint_t rid = rgnp->rgn_id;
3872 3873          caddr_t cbsaddr;
3873 3874          caddr_t cbeaddr;
3874 3875          hat_rgn_cb_func_t rcbfunc;
3875 3876          ulong_t cnt;
3876 3877  
3877 3878          ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3878 3879          ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3879 3880  
3880 3881          ASSERT(IS_P2ALIGNED(rsaddr, TTEBYTES(ttesz)));
3881 3882          ASSERT(IS_P2ALIGNED(rsz, TTEBYTES(ttesz)));
3882 3883          if (ttesz < HBLK_MIN_TTESZ) {
3883 3884                  ttesz = HBLK_MIN_TTESZ;
3884 3885                  rhsaddr = (caddr_t)P2ALIGN((uintptr_t)rsaddr, HBLK_MIN_BYTES);
3885 3886          } else {
3886 3887                  rhsaddr = rsaddr;
3887 3888          }
3888 3889  
3889 3890          if ((rcbfunc = rgnp->rgn_cb_function) == NULL) {
3890 3891                  rcbfunc = sfmmu_rgn_cb_noop;
3891 3892          }
3892 3893  
3893 3894          while (ttesz >= HBLK_MIN_TTESZ) {
3894 3895                  cbsaddr = rsaddr;
3895 3896                  cbeaddr = rsaddr;
3896 3897                  if (!(rgnp->rgn_hmeflags & (1 << ttesz))) {
3897 3898                          ttesz--;
3898 3899                          continue;
3899 3900                  }
3900 3901                  cnt = 0;
3901 3902                  va = rsaddr;
3902 3903                  while (va < readdr) {
3903 3904                          ASSERT(va >= rhsaddr);
3904 3905                          if (va != cbeaddr) {
3905 3906                                  if (cbeaddr != cbsaddr) {
3906 3907                                          ASSERT(cbeaddr > cbsaddr);
3907 3908                                          (*rcbfunc)(cbsaddr, cbeaddr,
3908 3909                                              rsaddr, rsz, rgnp->rgn_obj,
3909 3910                                              rgnp->rgn_objoff);
3910 3911                                  }
3911 3912                                  cbsaddr = va;
3912 3913                                  cbeaddr = va;
3913 3914                          }
3914 3915                          sfmmu_unload_hmeregion_va(srdp, rid, va, readdr,
3915 3916                              ttesz, &cbeaddr);
3916 3917                          cnt++;
3917 3918                          va = rhsaddr + (cnt << TTE_PAGE_SHIFT(ttesz));
3918 3919                  }
3919 3920                  if (cbeaddr != cbsaddr) {
3920 3921                          ASSERT(cbeaddr > cbsaddr);
3921 3922                          (*rcbfunc)(cbsaddr, cbeaddr, rsaddr,
3922 3923                              rsz, rgnp->rgn_obj,
3923 3924                              rgnp->rgn_objoff);
3924 3925                  }
3925 3926                  ttesz--;
3926 3927          }
3927 3928  }
3928 3929  
3929 3930  /*
3930 3931   * Release one hardware address translation lock on the given address range.
3931 3932   */
3932 3933  void
3933 3934  hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len)
3934 3935  {
3935 3936          struct hmehash_bucket *hmebp;
3936 3937          hmeblk_tag hblktag;
3937 3938          int hmeshift, hashno = 1;
3938 3939          struct hme_blk *hmeblkp, *list = NULL;
3939 3940          caddr_t endaddr;
3940 3941  
3941 3942          ASSERT(sfmmup != NULL);
3942 3943  
3943 3944          ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
3944 3945          ASSERT((len & MMU_PAGEOFFSET) == 0);
3945 3946          endaddr = addr + len;
3946 3947          hblktag.htag_id = sfmmup;
3947 3948          hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3948 3949  
3949 3950          /*
3950 3951           * Spitfire supports 4 page sizes.
3951 3952           * Most pages are expected to be of the smallest page size (8K) and
3952 3953           * these will not need to be rehashed. 64K pages also don't need to be
3953 3954           * rehashed because an hmeblk spans 64K of address space. 512K pages
3954 3955           * might need 1 rehash and and 4M pages might need 2 rehashes.
3955 3956           */
3956 3957          while (addr < endaddr) {
3957 3958                  hmeshift = HME_HASH_SHIFT(hashno);
3958 3959                  hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3959 3960                  hblktag.htag_rehash = hashno;
3960 3961                  hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
3961 3962  
3962 3963                  SFMMU_HASH_LOCK(hmebp);
3963 3964  
3964 3965                  HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
3965 3966                  if (hmeblkp != NULL) {
3966 3967                          ASSERT(!hmeblkp->hblk_shared);
3967 3968                          /*
3968 3969                           * If we encounter a shadow hmeblk then
3969 3970                           * we know there are no valid hmeblks mapping
3970 3971                           * this address at this size or larger.
3971 3972                           * Just increment address by the smallest
3972 3973                           * page size.
3973 3974                           */
3974 3975                          if (hmeblkp->hblk_shw_bit) {
3975 3976                                  addr += MMU_PAGESIZE;
3976 3977                          } else {
3977 3978                                  addr = sfmmu_hblk_unlock(hmeblkp, addr,
3978 3979                                      endaddr);
3979 3980                          }
3980 3981                          SFMMU_HASH_UNLOCK(hmebp);
3981 3982                          hashno = 1;
3982 3983                          continue;
3983 3984                  }
3984 3985                  SFMMU_HASH_UNLOCK(hmebp);
3985 3986  
3986 3987                  if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
3987 3988                          /*
3988 3989                           * We have traversed the whole list and rehashed
3989 3990                           * if necessary without finding the address to unlock
3990 3991                           * which should never happen.
3991 3992                           */
3992 3993                          panic("sfmmu_unlock: addr not found. "
3993 3994                              "addr %p hat %p", (void *)addr, (void *)sfmmup);
3994 3995                  } else {
3995 3996                          hashno++;
3996 3997                  }
3997 3998          }
3998 3999  
3999 4000          sfmmu_hblks_list_purge(&list, 0);
4000 4001  }
4001 4002  
4002 4003  void
4003 4004  hat_unlock_region(struct hat *sfmmup, caddr_t addr, size_t len,
4004 4005      hat_region_cookie_t rcookie)
4005 4006  {
4006 4007          sf_srd_t *srdp;
4007 4008          sf_region_t *rgnp;
4008 4009          int ttesz;
4009 4010          uint_t rid;
4010 4011          caddr_t eaddr;
4011 4012          caddr_t va;
4012 4013          int hmeshift;
4013 4014          hmeblk_tag hblktag;
4014 4015          struct hmehash_bucket *hmebp;
4015 4016          struct hme_blk *hmeblkp;
4016 4017          struct hme_blk *pr_hblk;
4017 4018          struct hme_blk *list;
4018 4019  
4019 4020          if (rcookie == HAT_INVALID_REGION_COOKIE) {
4020 4021                  hat_unlock(sfmmup, addr, len);
4021 4022                  return;
4022 4023          }
4023 4024  
4024 4025          ASSERT(sfmmup != NULL);
4025 4026          ASSERT(sfmmup != ksfmmup);
4026 4027  
4027 4028          srdp = sfmmup->sfmmu_srdp;
4028 4029          rid = (uint_t)((uint64_t)rcookie);
4029 4030          VERIFY3U(rid, <, SFMMU_MAX_HME_REGIONS);
4030 4031          eaddr = addr + len;
4031 4032          va = addr;
4032 4033          list = NULL;
4033 4034          rgnp = srdp->srd_hmergnp[rid];
4034 4035          SFMMU_VALIDATE_HMERID(sfmmup, rid, addr, len);
4035 4036  
4036 4037          ASSERT(IS_P2ALIGNED(addr, TTEBYTES(rgnp->rgn_pgszc)));
4037 4038          ASSERT(IS_P2ALIGNED(len, TTEBYTES(rgnp->rgn_pgszc)));
4038 4039          if (rgnp->rgn_pgszc < HBLK_MIN_TTESZ) {
4039 4040                  ttesz = HBLK_MIN_TTESZ;
4040 4041          } else {
4041 4042                  ttesz = rgnp->rgn_pgszc;
4042 4043          }
4043 4044          while (va < eaddr) {
4044 4045                  while (ttesz < rgnp->rgn_pgszc &&
4045 4046                      IS_P2ALIGNED(va, TTEBYTES(ttesz + 1))) {
4046 4047                          ttesz++;
4047 4048                  }
4048 4049                  while (ttesz >= HBLK_MIN_TTESZ) {
4049 4050                          if (!(rgnp->rgn_hmeflags & (1 << ttesz))) {
4050 4051                                  ttesz--;
4051 4052                                  continue;
4052 4053                          }
4053 4054                          hmeshift = HME_HASH_SHIFT(ttesz);
4054 4055                          hblktag.htag_bspage = HME_HASH_BSPAGE(va, hmeshift);
4055 4056                          hblktag.htag_rehash = ttesz;
4056 4057                          hblktag.htag_rid = rid;
4057 4058                          hblktag.htag_id = srdp;
4058 4059                          hmebp = HME_HASH_FUNCTION(srdp, va, hmeshift);
4059 4060                          SFMMU_HASH_LOCK(hmebp);
4060 4061                          HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk,
4061 4062                              &list);
4062 4063                          if (hmeblkp == NULL) {
4063 4064                                  SFMMU_HASH_UNLOCK(hmebp);
4064 4065                                  ttesz--;
4065 4066                                  continue;
4066 4067                          }
4067 4068                          ASSERT(hmeblkp->hblk_shared);
4068 4069                          va = sfmmu_hblk_unlock(hmeblkp, va, eaddr);
4069 4070                          ASSERT(va >= eaddr ||
4070 4071                              IS_P2ALIGNED((uintptr_t)va, TTEBYTES(ttesz)));
4071 4072                          SFMMU_HASH_UNLOCK(hmebp);
4072 4073                          break;
4073 4074                  }
4074 4075                  if (ttesz < HBLK_MIN_TTESZ) {
4075 4076                          panic("hat_unlock_region: addr not found "
4076 4077                              "addr %p hat %p", (void *)va, (void *)sfmmup);
4077 4078                  }
4078 4079          }
4079 4080          sfmmu_hblks_list_purge(&list, 0);
4080 4081  }
4081 4082  
4082 4083  /*
4083 4084   * Function to unlock a range of addresses in an hmeblk.  It returns the
4084 4085   * next address that needs to be unlocked.
4085 4086   * Should be called with the hash lock held.
4086 4087   */
4087 4088  static caddr_t
4088 4089  sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr)
4089 4090  {
4090 4091          struct sf_hment *sfhme;
4091 4092          tte_t tteold, ttemod;
4092 4093          int ttesz, ret;
4093 4094  
4094 4095          ASSERT(in_hblk_range(hmeblkp, addr));
4095 4096          ASSERT(hmeblkp->hblk_shw_bit == 0);
4096 4097  
4097 4098          endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
4098 4099          ttesz = get_hblk_ttesz(hmeblkp);
4099 4100  
4100 4101          HBLKTOHME(sfhme, hmeblkp, addr);
4101 4102          while (addr < endaddr) {
4102 4103  readtte:
4103 4104                  sfmmu_copytte(&sfhme->hme_tte, &tteold);
4104 4105                  if (TTE_IS_VALID(&tteold)) {
4105 4106  
4106 4107                          ttemod = tteold;
4107 4108  
4108 4109                          ret = sfmmu_modifytte_try(&tteold, &ttemod,
4109 4110                              &sfhme->hme_tte);
4110 4111  
4111 4112                          if (ret < 0)
4112 4113                                  goto readtte;
4113 4114  
4114 4115                          if (hmeblkp->hblk_lckcnt == 0)
4115 4116                                  panic("zero hblk lckcnt");
4116 4117  
4117 4118                          if (((uintptr_t)addr + TTEBYTES(ttesz)) >
4118 4119                              (uintptr_t)endaddr)
4119 4120                                  panic("can't unlock large tte");
4120 4121  
4121 4122                          ASSERT(hmeblkp->hblk_lckcnt > 0);
4122 4123                          atomic_dec_32(&hmeblkp->hblk_lckcnt);
4123 4124                          HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
4124 4125                  } else {
4125 4126                          panic("sfmmu_hblk_unlock: invalid tte");
4126 4127                  }
4127 4128                  addr += TTEBYTES(ttesz);
4128 4129                  sfhme++;
4129 4130          }
4130 4131          return (addr);
4131 4132  }
4132 4133  
4133 4134  /*
4134 4135   * Physical Address Mapping Framework
4135 4136   *
4136 4137   * General rules:
4137 4138   *
4138 4139   * (1) Applies only to seg_kmem memory pages. To make things easier,
4139 4140   *     seg_kpm addresses are also accepted by the routines, but nothing
4140 4141   *     is done with them since by definition their PA mappings are static.
4141 4142   * (2) hat_add_callback() may only be called while holding the page lock
4142 4143   *     SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()),
4143 4144   *     or passing HAC_PAGELOCK flag.
4144 4145   * (3) prehandler() and posthandler() may not call hat_add_callback() or
4145 4146   *     hat_delete_callback(), nor should they allocate memory. Post quiesce
4146 4147   *     callbacks may not sleep or acquire adaptive mutex locks.
4147 4148   * (4) Either prehandler() or posthandler() (but not both) may be specified
4148 4149   *     as being NULL.  Specifying an errhandler() is optional.
4149 4150   *
4150 4151   * Details of using the framework:
4151 4152   *
4152 4153   * registering a callback (hat_register_callback())
4153 4154   *
4154 4155   *      Pass prehandler, posthandler, errhandler addresses
4155 4156   *      as described below. If capture_cpus argument is nonzero,
4156 4157   *      suspend callback to the prehandler will occur with CPUs
4157 4158   *      captured and executing xc_loop() and CPUs will remain
4158 4159   *      captured until after the posthandler suspend callback
4159 4160   *      occurs.
4160 4161   *
4161 4162   * adding a callback (hat_add_callback())
4162 4163   *
4163 4164   *      as_pagelock();
4164 4165   *      hat_add_callback();
4165 4166   *      save returned pfn in private data structures or program registers;
4166 4167   *      as_pageunlock();
4167 4168   *
4168 4169   * prehandler()
4169 4170   *
4170 4171   *      Stop all accesses by physical address to this memory page.
4171 4172   *      Called twice: the first, PRESUSPEND, is a context safe to acquire
4172 4173   *      adaptive locks. The second, SUSPEND, is called at high PIL with
4173 4174   *      CPUs captured so adaptive locks may NOT be acquired (and all spin
4174 4175   *      locks must be XCALL_PIL or higher locks).
4175 4176   *
4176 4177   *      May return the following errors:
4177 4178   *              EIO:    A fatal error has occurred. This will result in panic.
4178 4179   *              EAGAIN: The page cannot be suspended. This will fail the
4179 4180   *                      relocation.
4180 4181   *              0:      Success.
4181 4182   *
4182 4183   * posthandler()
4183 4184   *
4184 4185   *      Save new pfn in private data structures or program registers;
4185 4186   *      not allowed to fail (non-zero return values will result in panic).
4186 4187   *
4187 4188   * errhandler()
4188 4189   *
4189 4190   *      called when an error occurs related to the callback.  Currently
4190 4191   *      the only such error is HAT_CB_ERR_LEAKED which indicates that
4191 4192   *      a page is being freed, but there are still outstanding callback(s)
4192 4193   *      registered on the page.
4193 4194   *
4194 4195   * removing a callback (hat_delete_callback(); e.g., prior to freeing memory)
4195 4196   *
4196 4197   *      stop using physical address
4197 4198   *      hat_delete_callback();
4198 4199   *
4199 4200   */
4200 4201  
4201 4202  /*
4202 4203   * Register a callback class.  Each subsystem should do this once and
4203 4204   * cache the id_t returned for use in setting up and tearing down callbacks.
4204 4205   *
4205 4206   * There is no facility for removing callback IDs once they are created;
4206 4207   * the "key" should be unique for each module, so in case a module is unloaded
4207 4208   * and subsequently re-loaded, we can recycle the module's previous entry.
4208 4209   */
4209 4210  id_t
4210 4211  hat_register_callback(int key,
4211 4212          int (*prehandler)(caddr_t, uint_t, uint_t, void *),
4212 4213          int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t),
4213 4214          int (*errhandler)(caddr_t, uint_t, uint_t, void *),
4214 4215          int capture_cpus)
4215 4216  {
4216 4217          id_t id;
4217 4218  
4218 4219          /*
4219 4220           * Search the table for a pre-existing callback associated with
4220 4221           * the identifier "key".  If one exists, we re-use that entry in
4221 4222           * the table for this instance, otherwise we assign the next
4222 4223           * available table slot.
4223 4224           */
4224 4225          for (id = 0; id < sfmmu_max_cb_id; id++) {
4225 4226                  if (sfmmu_cb_table[id].key == key)
4226 4227                          break;
4227 4228          }
4228 4229  
4229 4230          if (id == sfmmu_max_cb_id) {
4230 4231                  id = sfmmu_cb_nextid++;
4231 4232                  if (id >= sfmmu_max_cb_id)
4232 4233                          panic("hat_register_callback: out of callback IDs");
4233 4234          }
4234 4235  
4235 4236          ASSERT(prehandler != NULL || posthandler != NULL);
4236 4237  
4237 4238          sfmmu_cb_table[id].key = key;
4238 4239          sfmmu_cb_table[id].prehandler = prehandler;
4239 4240          sfmmu_cb_table[id].posthandler = posthandler;
4240 4241          sfmmu_cb_table[id].errhandler = errhandler;
4241 4242          sfmmu_cb_table[id].capture_cpus = capture_cpus;
4242 4243  
4243 4244          return (id);
4244 4245  }
4245 4246  
4246 4247  #define HAC_COOKIE_NONE (void *)-1
4247 4248  
4248 4249  /*
4249 4250   * Add relocation callbacks to the specified addr/len which will be called
4250 4251   * when relocating the associated page. See the description of pre and
4251 4252   * posthandler above for more details.
4252 4253   *
4253 4254   * If HAC_PAGELOCK is included in flags, the underlying memory page is
4254 4255   * locked internally so the caller must be able to deal with the callback
4255 4256   * running even before this function has returned.  If HAC_PAGELOCK is not
4256 4257   * set, it is assumed that the underlying memory pages are locked.
4257 4258   *
4258 4259   * Since the caller must track the individual page boundaries anyway,
4259 4260   * we only allow a callback to be added to a single page (large
4260 4261   * or small).  Thus [addr, addr + len) MUST be contained within a single
4261 4262   * page.
4262 4263   *
4263 4264   * Registering multiple callbacks on the same [addr, addr+len) is supported,
4264 4265   * _provided_that_ a unique parameter is specified for each callback.
4265 4266   * If multiple callbacks are registered on the same range the callback will
4266 4267   * be invoked with each unique parameter. Registering the same callback with
4267 4268   * the same argument more than once will result in corrupted kernel state.
4268 4269   *
4269 4270   * Returns the pfn of the underlying kernel page in *rpfn
4270 4271   * on success, or PFN_INVALID on failure.
4271 4272   *
4272 4273   * cookiep (if passed) provides storage space for an opaque cookie
4273 4274   * to return later to hat_delete_callback(). This cookie makes the callback
4274 4275   * deletion significantly quicker by avoiding a potentially lengthy hash
4275 4276   * search.
4276 4277   *
4277 4278   * Returns values:
4278 4279   *    0:      success
4279 4280   *    ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP)
4280 4281   *    EINVAL: callback ID is not valid
4281 4282   *    ENXIO:  ["vaddr", "vaddr" + len) is not mapped in the kernel's address
4282 4283   *            space
4283 4284   *    ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary
4284 4285   */
4285 4286  int
4286 4287  hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags,
4287 4288          void *pvt, pfn_t *rpfn, void **cookiep)
4288 4289  {
4289 4290          struct          hmehash_bucket *hmebp;
4290 4291          hmeblk_tag      hblktag;
4291 4292          struct hme_blk  *hmeblkp;
4292 4293          int             hmeshift, hashno;
4293 4294          caddr_t         saddr, eaddr, baseaddr;
4294 4295          struct pa_hment *pahmep;
4295 4296          struct sf_hment *sfhmep, *osfhmep;
4296 4297          kmutex_t        *pml;
4297 4298          tte_t           tte;
4298 4299          page_t          *pp;
4299 4300          vnode_t         *vp;
4300 4301          u_offset_t      off;
4301 4302          pfn_t           pfn;
4302 4303          int             kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP;
4303 4304          int             locked = 0;
4304 4305  
4305 4306          /*
4306 4307           * For KPM mappings, just return the physical address since we
4307 4308           * don't need to register any callbacks.
4308 4309           */
4309 4310          if (IS_KPM_ADDR(vaddr)) {
4310 4311                  uint64_t paddr;
4311 4312                  SFMMU_KPM_VTOP(vaddr, paddr);
4312 4313                  *rpfn = btop(paddr);
4313 4314                  if (cookiep != NULL)
4314 4315                          *cookiep = HAC_COOKIE_NONE;
4315 4316                  return (0);
4316 4317          }
4317 4318  
4318 4319          if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) {
4319 4320                  *rpfn = PFN_INVALID;
4320 4321                  return (EINVAL);
4321 4322          }
4322 4323  
4323 4324          if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) {
4324 4325                  *rpfn = PFN_INVALID;
4325 4326                  return (ENOMEM);
4326 4327          }
4327 4328  
4328 4329          sfhmep = &pahmep->sfment;
4329 4330  
4330 4331          saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK);
4331 4332          eaddr = saddr + len;
4332 4333  
4333 4334  rehash:
4334 4335          /* Find the mapping(s) for this page */
4335 4336          for (hashno = TTE64K, hmeblkp = NULL;
4336 4337              hmeblkp == NULL && hashno <= mmu_hashcnt;
4337 4338              hashno++) {
4338 4339                  hmeshift = HME_HASH_SHIFT(hashno);
4339 4340                  hblktag.htag_id = ksfmmup;
4340 4341                  hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4341 4342                  hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift);
4342 4343                  hblktag.htag_rehash = hashno;
4343 4344                  hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift);
4344 4345  
4345 4346                  SFMMU_HASH_LOCK(hmebp);
4346 4347  
4347 4348                  HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
4348 4349  
4349 4350                  if (hmeblkp == NULL)
4350 4351                          SFMMU_HASH_UNLOCK(hmebp);
4351 4352          }
4352 4353  
4353 4354          if (hmeblkp == NULL) {
4354 4355                  kmem_cache_free(pa_hment_cache, pahmep);
4355 4356                  *rpfn = PFN_INVALID;
4356 4357                  return (ENXIO);
4357 4358          }
4358 4359  
4359 4360          ASSERT(!hmeblkp->hblk_shared);
4360 4361  
4361 4362          HBLKTOHME(osfhmep, hmeblkp, saddr);
4362 4363          sfmmu_copytte(&osfhmep->hme_tte, &tte);
4363 4364  
4364 4365          if (!TTE_IS_VALID(&tte)) {
4365 4366                  SFMMU_HASH_UNLOCK(hmebp);
4366 4367                  kmem_cache_free(pa_hment_cache, pahmep);
4367 4368                  *rpfn = PFN_INVALID;
4368 4369                  return (ENXIO);
4369 4370          }
4370 4371  
4371 4372          /*
4372 4373           * Make sure the boundaries for the callback fall within this
4373 4374           * single mapping.
4374 4375           */
4375 4376          baseaddr = (caddr_t)get_hblk_base(hmeblkp);
4376 4377          ASSERT(saddr >= baseaddr);
4377 4378          if (eaddr > saddr + TTEBYTES(TTE_CSZ(&tte))) {
4378 4379                  SFMMU_HASH_UNLOCK(hmebp);
4379 4380                  kmem_cache_free(pa_hment_cache, pahmep);
4380 4381                  *rpfn = PFN_INVALID;
4381 4382                  return (ERANGE);
4382 4383          }
4383 4384  
4384 4385          pfn = sfmmu_ttetopfn(&tte, vaddr);
4385 4386  
4386 4387          /*
4387 4388           * The pfn may not have a page_t underneath in which case we
4388 4389           * just return it. This can happen if we are doing I/O to a
4389 4390           * static portion of the kernel's address space, for instance.
4390 4391           */
4391 4392          pp = osfhmep->hme_page;
4392 4393          if (pp == NULL) {
4393 4394                  SFMMU_HASH_UNLOCK(hmebp);
4394 4395                  kmem_cache_free(pa_hment_cache, pahmep);
4395 4396                  *rpfn = pfn;
4396 4397                  if (cookiep)
4397 4398                          *cookiep = HAC_COOKIE_NONE;
4398 4399                  return (0);
4399 4400          }
4400 4401          ASSERT(pp == PP_PAGEROOT(pp));
4401 4402  
4402 4403          vp = pp->p_vnode;
4403 4404          off = pp->p_offset;
4404 4405  
4405 4406          pml = sfmmu_mlist_enter(pp);
4406 4407  
4407 4408          if (flags & HAC_PAGELOCK) {
4408 4409                  if (!page_trylock(pp, SE_SHARED)) {
4409 4410                          /*
4410 4411                           * Somebody is holding SE_EXCL lock. Might
4411 4412                           * even be hat_page_relocate(). Drop all
4412 4413                           * our locks, lookup the page in &kvp, and
4413 4414                           * retry. If it doesn't exist in &kvp and &zvp,
4414 4415                           * then we must be dealing with a kernel mapped
4415 4416                           * page which doesn't actually belong to
4416 4417                           * segkmem so we punt.
4417 4418                           */
4418 4419                          sfmmu_mlist_exit(pml);
4419 4420                          SFMMU_HASH_UNLOCK(hmebp);
4420 4421                          pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED);
4421 4422  
4422 4423                          /* check zvp before giving up */
4423 4424                          if (pp == NULL)
4424 4425                                  pp = page_lookup(&zvp, (u_offset_t)saddr,
4425 4426                                      SE_SHARED);
4426 4427  
4427 4428                          /* Okay, we didn't find it, give up */
4428 4429                          if (pp == NULL) {
4429 4430                                  kmem_cache_free(pa_hment_cache, pahmep);
4430 4431                                  *rpfn = pfn;
4431 4432                                  if (cookiep)
4432 4433                                          *cookiep = HAC_COOKIE_NONE;
4433 4434                                  return (0);
4434 4435                          }
4435 4436                          page_unlock(pp);
4436 4437                          goto rehash;
4437 4438                  }
4438 4439                  locked = 1;
4439 4440          }
4440 4441  
4441 4442          if (!PAGE_LOCKED(pp) && !panicstr)
4442 4443                  panic("hat_add_callback: page 0x%p not locked", (void *)pp);
4443 4444  
4444 4445          if (osfhmep->hme_page != pp || pp->p_vnode != vp ||
4445 4446              pp->p_offset != off) {
4446 4447                  /*
4447 4448                   * The page moved before we got our hands on it.  Drop
4448 4449                   * all the locks and try again.
4449 4450                   */
4450 4451                  ASSERT((flags & HAC_PAGELOCK) != 0);
4451 4452                  sfmmu_mlist_exit(pml);
4452 4453                  SFMMU_HASH_UNLOCK(hmebp);
4453 4454                  page_unlock(pp);
4454 4455                  locked = 0;
4455 4456                  goto rehash;
4456 4457          }
4457 4458  
4458 4459          if (!VN_ISKAS(vp)) {
4459 4460                  /*
4460 4461                   * This is not a segkmem page but another page which
4461 4462                   * has been kernel mapped. It had better have at least
4462 4463                   * a share lock on it. Return the pfn.
4463 4464                   */
4464 4465                  sfmmu_mlist_exit(pml);
4465 4466                  SFMMU_HASH_UNLOCK(hmebp);
4466 4467                  if (locked)
4467 4468                          page_unlock(pp);
4468 4469                  kmem_cache_free(pa_hment_cache, pahmep);
4469 4470                  ASSERT(PAGE_LOCKED(pp));
4470 4471                  *rpfn = pfn;
4471 4472                  if (cookiep)
4472 4473                          *cookiep = HAC_COOKIE_NONE;
4473 4474                  return (0);
4474 4475          }
4475 4476  
4476 4477          /*
4477 4478           * Setup this pa_hment and link its embedded dummy sf_hment into
4478 4479           * the mapping list.
4479 4480           */
4480 4481          pp->p_share++;
4481 4482          pahmep->cb_id = callback_id;
4482 4483          pahmep->addr = vaddr;
4483 4484          pahmep->len = len;
4484 4485          pahmep->refcnt = 1;
4485 4486          pahmep->flags = 0;
4486 4487          pahmep->pvt = pvt;
4487 4488  
4488 4489          sfhmep->hme_tte.ll = 0;
4489 4490          sfhmep->hme_data = pahmep;
4490 4491          sfhmep->hme_prev = osfhmep;
4491 4492          sfhmep->hme_next = osfhmep->hme_next;
4492 4493  
4493 4494          if (osfhmep->hme_next)
4494 4495                  osfhmep->hme_next->hme_prev = sfhmep;
4495 4496  
4496 4497          osfhmep->hme_next = sfhmep;
4497 4498  
4498 4499          sfmmu_mlist_exit(pml);
4499 4500          SFMMU_HASH_UNLOCK(hmebp);
4500 4501  
4501 4502          if (locked)
4502 4503                  page_unlock(pp);
4503 4504  
4504 4505          *rpfn = pfn;
4505 4506          if (cookiep)
4506 4507                  *cookiep = (void *)pahmep;
4507 4508  
4508 4509          return (0);
4509 4510  }
4510 4511  
4511 4512  /*
4512 4513   * Remove the relocation callbacks from the specified addr/len.
4513 4514   */
4514 4515  void
4515 4516  hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags,
4516 4517          void *cookie)
4517 4518  {
4518 4519          struct          hmehash_bucket *hmebp;
4519 4520          hmeblk_tag      hblktag;
4520 4521          struct hme_blk  *hmeblkp;
4521 4522          int             hmeshift, hashno;
4522 4523          caddr_t         saddr;
4523 4524          struct pa_hment *pahmep;
4524 4525          struct sf_hment *sfhmep, *osfhmep;
4525 4526          kmutex_t        *pml;
4526 4527          tte_t           tte;
4527 4528          page_t          *pp;
4528 4529          vnode_t         *vp;
4529 4530          u_offset_t      off;
4530 4531          int             locked = 0;
4531 4532  
4532 4533          /*
4533 4534           * If the cookie is HAC_COOKIE_NONE then there is no pa_hment to
4534 4535           * remove so just return.
4535 4536           */
4536 4537          if (cookie == HAC_COOKIE_NONE || IS_KPM_ADDR(vaddr))
4537 4538                  return;
4538 4539  
4539 4540          saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK);
4540 4541  
4541 4542  rehash:
4542 4543          /* Find the mapping(s) for this page */
4543 4544          for (hashno = TTE64K, hmeblkp = NULL;
4544 4545              hmeblkp == NULL && hashno <= mmu_hashcnt;
4545 4546              hashno++) {
4546 4547                  hmeshift = HME_HASH_SHIFT(hashno);
4547 4548                  hblktag.htag_id = ksfmmup;
4548 4549                  hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4549 4550                  hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift);
4550 4551                  hblktag.htag_rehash = hashno;
4551 4552                  hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift);
4552 4553  
4553 4554                  SFMMU_HASH_LOCK(hmebp);
4554 4555  
4555 4556                  HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
4556 4557  
4557 4558                  if (hmeblkp == NULL)
4558 4559                          SFMMU_HASH_UNLOCK(hmebp);
4559 4560          }
4560 4561  
4561 4562          if (hmeblkp == NULL)
4562 4563                  return;
4563 4564  
4564 4565          ASSERT(!hmeblkp->hblk_shared);
4565 4566  
4566 4567          HBLKTOHME(osfhmep, hmeblkp, saddr);
4567 4568  
4568 4569          sfmmu_copytte(&osfhmep->hme_tte, &tte);
4569 4570          if (!TTE_IS_VALID(&tte)) {
4570 4571                  SFMMU_HASH_UNLOCK(hmebp);
4571 4572                  return;
4572 4573          }
4573 4574  
4574 4575          pp = osfhmep->hme_page;
4575 4576          if (pp == NULL) {
4576 4577                  SFMMU_HASH_UNLOCK(hmebp);
4577 4578                  ASSERT(cookie == NULL);
4578 4579                  return;
4579 4580          }
4580 4581  
4581 4582          vp = pp->p_vnode;
4582 4583          off = pp->p_offset;
4583 4584  
4584 4585          pml = sfmmu_mlist_enter(pp);
4585 4586  
4586 4587          if (flags & HAC_PAGELOCK) {
4587 4588                  if (!page_trylock(pp, SE_SHARED)) {
4588 4589                          /*
4589 4590                           * Somebody is holding SE_EXCL lock. Might
4590 4591                           * even be hat_page_relocate(). Drop all
4591 4592                           * our locks, lookup the page in &kvp, and
4592 4593                           * retry. If it doesn't exist in &kvp and &zvp,
4593 4594                           * then we must be dealing with a kernel mapped
4594 4595                           * page which doesn't actually belong to
4595 4596                           * segkmem so we punt.
4596 4597                           */
4597 4598                          sfmmu_mlist_exit(pml);
4598 4599                          SFMMU_HASH_UNLOCK(hmebp);
4599 4600                          pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED);
4600 4601                          /* check zvp before giving up */
4601 4602                          if (pp == NULL)
4602 4603                                  pp = page_lookup(&zvp, (u_offset_t)saddr,
4603 4604                                      SE_SHARED);
4604 4605  
4605 4606                          if (pp == NULL) {
4606 4607                                  ASSERT(cookie == NULL);
4607 4608                                  return;
4608 4609                          }
4609 4610                          page_unlock(pp);
4610 4611                          goto rehash;
4611 4612                  }
4612 4613                  locked = 1;
4613 4614          }
4614 4615  
4615 4616          ASSERT(PAGE_LOCKED(pp));
4616 4617  
4617 4618          if (osfhmep->hme_page != pp || pp->p_vnode != vp ||
4618 4619              pp->p_offset != off) {
4619 4620                  /*
4620 4621                   * The page moved before we got our hands on it.  Drop
4621 4622                   * all the locks and try again.
4622 4623                   */
4623 4624                  ASSERT((flags & HAC_PAGELOCK) != 0);
4624 4625                  sfmmu_mlist_exit(pml);
4625 4626                  SFMMU_HASH_UNLOCK(hmebp);
4626 4627                  page_unlock(pp);
4627 4628                  locked = 0;
4628 4629                  goto rehash;
4629 4630          }
4630 4631  
4631 4632          if (!VN_ISKAS(vp)) {
4632 4633                  /*
4633 4634                   * This is not a segkmem page but another page which
4634 4635                   * has been kernel mapped.
4635 4636                   */
4636 4637                  sfmmu_mlist_exit(pml);
4637 4638                  SFMMU_HASH_UNLOCK(hmebp);
4638 4639                  if (locked)
4639 4640                          page_unlock(pp);
4640 4641                  ASSERT(cookie == NULL);
4641 4642                  return;
4642 4643          }
4643 4644  
4644 4645          if (cookie != NULL) {
4645 4646                  pahmep = (struct pa_hment *)cookie;
4646 4647                  sfhmep = &pahmep->sfment;
4647 4648          } else {
4648 4649                  for (sfhmep = pp->p_mapping; sfhmep != NULL;
4649 4650                      sfhmep = sfhmep->hme_next) {
4650 4651  
4651 4652                          /*
4652 4653                           * skip va<->pa mappings
4653 4654                           */
4654 4655                          if (!IS_PAHME(sfhmep))
4655 4656                                  continue;
4656 4657  
4657 4658                          pahmep = sfhmep->hme_data;
4658 4659                          ASSERT(pahmep != NULL);
4659 4660  
4660 4661                          /*
4661 4662                           * if pa_hment matches, remove it
4662 4663                           */
4663 4664                          if ((pahmep->pvt == pvt) &&
4664 4665                              (pahmep->addr == vaddr) &&
4665 4666                              (pahmep->len == len)) {
4666 4667                                  break;
4667 4668                          }
4668 4669                  }
4669 4670          }
4670 4671  
4671 4672          if (sfhmep == NULL) {
4672 4673                  if (!panicstr) {
4673 4674                          panic("hat_delete_callback: pa_hment not found, pp %p",
4674 4675                              (void *)pp);
4675 4676                  }
4676 4677                  return;
4677 4678          }
4678 4679  
4679 4680          /*
4680 4681           * Note: at this point a valid kernel mapping must still be
4681 4682           * present on this page.
4682 4683           */
4683 4684          pp->p_share--;
4684 4685          if (pp->p_share <= 0)
4685 4686                  panic("hat_delete_callback: zero p_share");
4686 4687  
4687 4688          if (--pahmep->refcnt == 0) {
4688 4689                  if (pahmep->flags != 0)
4689 4690                          panic("hat_delete_callback: pa_hment is busy");
4690 4691  
4691 4692                  /*
4692 4693                   * Remove sfhmep from the mapping list for the page.
4693 4694                   */
4694 4695                  if (sfhmep->hme_prev) {
4695 4696                          sfhmep->hme_prev->hme_next = sfhmep->hme_next;
4696 4697                  } else {
4697 4698                          pp->p_mapping = sfhmep->hme_next;
4698 4699                  }
4699 4700  
4700 4701                  if (sfhmep->hme_next)
4701 4702                          sfhmep->hme_next->hme_prev = sfhmep->hme_prev;
4702 4703  
4703 4704                  sfmmu_mlist_exit(pml);
4704 4705                  SFMMU_HASH_UNLOCK(hmebp);
4705 4706  
4706 4707                  if (locked)
4707 4708                          page_unlock(pp);
4708 4709  
4709 4710                  kmem_cache_free(pa_hment_cache, pahmep);
4710 4711                  return;
4711 4712          }
4712 4713  
4713 4714          sfmmu_mlist_exit(pml);
4714 4715          SFMMU_HASH_UNLOCK(hmebp);
4715 4716          if (locked)
4716 4717                  page_unlock(pp);
4717 4718  }
4718 4719  
4719 4720  /*
4720 4721   * hat_probe returns 1 if the translation for the address 'addr' is
4721 4722   * loaded, zero otherwise.
4722 4723   *
4723 4724   * hat_probe should be used only for advisorary purposes because it may
4724 4725   * occasionally return the wrong value. The implementation must guarantee that
4725 4726   * returning the wrong value is a very rare event. hat_probe is used
4726 4727   * to implement optimizations in the segment drivers.
4727 4728   *
4728 4729   */
4729 4730  int
4730 4731  hat_probe(struct hat *sfmmup, caddr_t addr)
4731 4732  {
4732 4733          pfn_t pfn;
4733 4734          tte_t tte;
4734 4735  
4735 4736          ASSERT(sfmmup != NULL);
4736 4737  
4737 4738          ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
4738 4739  
4739 4740          if (sfmmup == ksfmmup) {
4740 4741                  while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte))
4741 4742                      == PFN_SUSPENDED) {
4742 4743                          sfmmu_vatopfn_suspended(addr, sfmmup, &tte);
4743 4744                  }
4744 4745          } else {
4745 4746                  pfn = sfmmu_uvatopfn(addr, sfmmup, NULL);
4746 4747          }
4747 4748  
4748 4749          if (pfn != PFN_INVALID)
4749 4750                  return (1);
4750 4751          else
4751 4752                  return (0);
4752 4753  }
4753 4754  
4754 4755  ssize_t
4755 4756  hat_getpagesize(struct hat *sfmmup, caddr_t addr)
4756 4757  {
4757 4758          tte_t tte;
4758 4759  
4759 4760          if (sfmmup == ksfmmup) {
4760 4761                  if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4761 4762                          return (-1);
4762 4763                  }
4763 4764          } else {
4764 4765                  if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4765 4766                          return (-1);
4766 4767                  }
4767 4768          }
4768 4769  
4769 4770          ASSERT(TTE_IS_VALID(&tte));
4770 4771          return (TTEBYTES(TTE_CSZ(&tte)));
4771 4772  }
4772 4773  
4773 4774  uint_t
4774 4775  hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr)
4775 4776  {
4776 4777          tte_t tte;
4777 4778  
4778 4779          if (sfmmup == ksfmmup) {
4779 4780                  if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4780 4781                          tte.ll = 0;
4781 4782                  }
4782 4783          } else {
4783 4784                  if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4784 4785                          tte.ll = 0;
4785 4786                  }
4786 4787          }
4787 4788          if (TTE_IS_VALID(&tte)) {
4788 4789                  *attr = sfmmu_ptov_attr(&tte);
4789 4790                  return (0);
4790 4791          }
4791 4792          *attr = 0;
4792 4793          return ((uint_t)0xffffffff);
4793 4794  }
4794 4795  
4795 4796  /*
4796 4797   * Enables more attributes on specified address range (ie. logical OR)
4797 4798   */
4798 4799  void
4799 4800  hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4800 4801  {
4801 4802          ASSERT(hat->sfmmu_as != NULL);
4802 4803  
4803 4804          sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR);
4804 4805  }
4805 4806  
4806 4807  /*
4807 4808   * Assigns attributes to the specified address range.  All the attributes
4808 4809   * are specified.
4809 4810   */
4810 4811  void
4811 4812  hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4812 4813  {
4813 4814          ASSERT(hat->sfmmu_as != NULL);
4814 4815  
4815 4816          sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR);
4816 4817  }
4817 4818  
4818 4819  /*
4819 4820   * Remove attributes on the specified address range (ie. loginal NAND)
4820 4821   */
4821 4822  void
4822 4823  hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4823 4824  {
4824 4825          ASSERT(hat->sfmmu_as != NULL);
4825 4826  
4826 4827          sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR);
4827 4828  }
4828 4829  
4829 4830  /*
4830 4831   * Change attributes on an address range to that specified by attr and mode.
4831 4832   */
4832 4833  static void
4833 4834  sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr,
4834 4835          int mode)
4835 4836  {
4836 4837          struct hmehash_bucket *hmebp;
4837 4838          hmeblk_tag hblktag;
4838 4839          int hmeshift, hashno = 1;
4839 4840          struct hme_blk *hmeblkp, *list = NULL;
4840 4841          caddr_t endaddr;
4841 4842          cpuset_t cpuset;
4842 4843          demap_range_t dmr;
4843 4844  
4844 4845          CPUSET_ZERO(cpuset);
4845 4846  
4846 4847          ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
4847 4848          ASSERT((len & MMU_PAGEOFFSET) == 0);
4848 4849          ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
4849 4850  
4850 4851          if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) &&
4851 4852              ((addr + len) > (caddr_t)USERLIMIT)) {
4852 4853                  panic("user addr %p in kernel space",
4853 4854                      (void *)addr);
4854 4855          }
4855 4856  
4856 4857          endaddr = addr + len;
4857 4858          hblktag.htag_id = sfmmup;
4858 4859          hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4859 4860          DEMAP_RANGE_INIT(sfmmup, &dmr);
4860 4861  
4861 4862          while (addr < endaddr) {
4862 4863                  hmeshift = HME_HASH_SHIFT(hashno);
4863 4864                  hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
4864 4865                  hblktag.htag_rehash = hashno;
4865 4866                  hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
4866 4867  
4867 4868                  SFMMU_HASH_LOCK(hmebp);
4868 4869  
4869 4870                  HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
4870 4871                  if (hmeblkp != NULL) {
4871 4872                          ASSERT(!hmeblkp->hblk_shared);
4872 4873                          /*
4873 4874                           * We've encountered a shadow hmeblk so skip the range
4874 4875                           * of the next smaller mapping size.
4875 4876                           */
4876 4877                          if (hmeblkp->hblk_shw_bit) {
4877 4878                                  ASSERT(sfmmup != ksfmmup);
4878 4879                                  ASSERT(hashno > 1);
4879 4880                                  addr = (caddr_t)P2END((uintptr_t)addr,
4880 4881                                      TTEBYTES(hashno - 1));
4881 4882                          } else {
4882 4883                                  addr = sfmmu_hblk_chgattr(sfmmup,
4883 4884                                      hmeblkp, addr, endaddr, &dmr, attr, mode);
4884 4885                          }
4885 4886                          SFMMU_HASH_UNLOCK(hmebp);
4886 4887                          hashno = 1;
4887 4888                          continue;
4888 4889                  }
4889 4890                  SFMMU_HASH_UNLOCK(hmebp);
4890 4891  
4891 4892                  if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
4892 4893                          /*
4893 4894                           * We have traversed the whole list and rehashed
4894 4895                           * if necessary without finding the address to chgattr.
4895 4896                           * This is ok, so we increment the address by the
4896 4897                           * smallest hmeblk range for kernel mappings or for
4897 4898                           * user mappings with no large pages, and the largest
4898 4899                           * hmeblk range, to account for shadow hmeblks, for
4899 4900                           * user mappings with large pages and continue.
4900 4901                           */
4901 4902                          if (sfmmup == ksfmmup)
4902 4903                                  addr = (caddr_t)P2END((uintptr_t)addr,
4903 4904                                      TTEBYTES(1));
4904 4905                          else
4905 4906                                  addr = (caddr_t)P2END((uintptr_t)addr,
4906 4907                                      TTEBYTES(hashno));
4907 4908                          hashno = 1;
4908 4909                  } else {
4909 4910                          hashno++;
4910 4911                  }
4911 4912          }
4912 4913  
4913 4914          sfmmu_hblks_list_purge(&list, 0);
4914 4915          DEMAP_RANGE_FLUSH(&dmr);
4915 4916          cpuset = sfmmup->sfmmu_cpusran;
4916 4917          xt_sync(cpuset);
4917 4918  }
4918 4919  
4919 4920  /*
4920 4921   * This function chgattr on a range of addresses in an hmeblk.  It returns the
4921 4922   * next addres that needs to be chgattr.
4922 4923   * It should be called with the hash lock held.
4923 4924   * XXX It should be possible to optimize chgattr by not flushing every time but
4924 4925   * on the other hand:
4925 4926   * 1. do one flush crosscall.
4926 4927   * 2. only flush if we are increasing permissions (make sure this will work)
4927 4928   */
4928 4929  static caddr_t
4929 4930  sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
4930 4931          caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode)
4931 4932  {
4932 4933          tte_t tte, tteattr, tteflags, ttemod;
4933 4934          struct sf_hment *sfhmep;
4934 4935          int ttesz;
4935 4936          struct page *pp = NULL;
4936 4937          kmutex_t *pml, *pmtx;
4937 4938          int ret;
4938 4939          int use_demap_range;
4939 4940  #if defined(SF_ERRATA_57)
4940 4941          int check_exec;
4941 4942  #endif
4942 4943  
4943 4944          ASSERT(in_hblk_range(hmeblkp, addr));
4944 4945          ASSERT(hmeblkp->hblk_shw_bit == 0);
4945 4946          ASSERT(!hmeblkp->hblk_shared);
4946 4947  
4947 4948          endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
4948 4949          ttesz = get_hblk_ttesz(hmeblkp);
4949 4950  
4950 4951          /*
4951 4952           * Flush the current demap region if addresses have been
4952 4953           * skipped or the page size doesn't match.
4953 4954           */
4954 4955          use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp));
4955 4956          if (use_demap_range) {
4956 4957                  DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
4957 4958          } else if (dmrp != NULL) {
4958 4959                  DEMAP_RANGE_FLUSH(dmrp);
4959 4960          }
4960 4961  
4961 4962          tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags);
4962 4963  #if defined(SF_ERRATA_57)
4963 4964          check_exec = (sfmmup != ksfmmup) &&
4964 4965              AS_TYPE_64BIT(sfmmup->sfmmu_as) &&
4965 4966              TTE_IS_EXECUTABLE(&tteattr);
4966 4967  #endif
4967 4968          HBLKTOHME(sfhmep, hmeblkp, addr);
4968 4969          while (addr < endaddr) {
4969 4970                  sfmmu_copytte(&sfhmep->hme_tte, &tte);
4970 4971                  if (TTE_IS_VALID(&tte)) {
4971 4972                          if ((tte.ll & tteflags.ll) == tteattr.ll) {
4972 4973                                  /*
4973 4974                                   * if the new attr is the same as old
4974 4975                                   * continue
4975 4976                                   */
4976 4977                                  goto next_addr;
4977 4978                          }
4978 4979                          if (!TTE_IS_WRITABLE(&tteattr)) {
4979 4980                                  /*
4980 4981                                   * make sure we clear hw modify bit if we
4981 4982                                   * removing write protections
4982 4983                                   */
4983 4984                                  tteflags.tte_intlo |= TTE_HWWR_INT;
4984 4985                          }
4985 4986  
4986 4987                          pml = NULL;
4987 4988                          pp = sfhmep->hme_page;
4988 4989                          if (pp) {
4989 4990                                  pml = sfmmu_mlist_enter(pp);
4990 4991                          }
4991 4992  
4992 4993                          if (pp != sfhmep->hme_page) {
4993 4994                                  /*
4994 4995                                   * tte must have been unloaded.
4995 4996                                   */
4996 4997                                  ASSERT(pml);
4997 4998                                  sfmmu_mlist_exit(pml);
4998 4999                                  continue;
4999 5000                          }
5000 5001  
5001 5002                          ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5002 5003  
5003 5004                          ttemod = tte;
5004 5005                          ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll;
5005 5006                          ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte));
5006 5007  
5007 5008  #if defined(SF_ERRATA_57)
5008 5009                          if (check_exec && addr < errata57_limit)
5009 5010                                  ttemod.tte_exec_perm = 0;
5010 5011  #endif
5011 5012                          ret = sfmmu_modifytte_try(&tte, &ttemod,
5012 5013                              &sfhmep->hme_tte);
5013 5014  
5014 5015                          if (ret < 0) {
5015 5016                                  /* tte changed underneath us */
5016 5017                                  if (pml) {
5017 5018                                          sfmmu_mlist_exit(pml);
5018 5019                                  }
5019 5020                                  continue;
5020 5021                          }
5021 5022  
5022 5023                          if (tteflags.tte_intlo & TTE_HWWR_INT) {
5023 5024                                  /*
5024 5025                                   * need to sync if we are clearing modify bit.
5025 5026                                   */
5026 5027                                  sfmmu_ttesync(sfmmup, addr, &tte, pp);
5027 5028                          }
5028 5029  
5029 5030                          if (pp && PP_ISRO(pp)) {
5030 5031                                  if (tteattr.tte_intlo & TTE_WRPRM_INT) {
5031 5032                                          pmtx = sfmmu_page_enter(pp);
5032 5033                                          PP_CLRRO(pp);
5033 5034                                          sfmmu_page_exit(pmtx);
5034 5035                                  }
5035 5036                          }
5036 5037  
5037 5038                          if (ret > 0 && use_demap_range) {
5038 5039                                  DEMAP_RANGE_MARKPG(dmrp, addr);
5039 5040                          } else if (ret > 0) {
5040 5041                                  sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
5041 5042                          }
5042 5043  
5043 5044                          if (pml) {
5044 5045                                  sfmmu_mlist_exit(pml);
5045 5046                          }
5046 5047                  }
5047 5048  next_addr:
5048 5049                  addr += TTEBYTES(ttesz);
5049 5050                  sfhmep++;
5050 5051                  DEMAP_RANGE_NEXTPG(dmrp);
5051 5052          }
5052 5053          return (addr);
5053 5054  }
5054 5055  
5055 5056  /*
5056 5057   * This routine converts virtual attributes to physical ones.  It will
5057 5058   * update the tteflags field with the tte mask corresponding to the attributes
5058 5059   * affected and it returns the new attributes.  It will also clear the modify
5059 5060   * bit if we are taking away write permission.  This is necessary since the
5060 5061   * modify bit is the hardware permission bit and we need to clear it in order
5061 5062   * to detect write faults.
5062 5063   */
5063 5064  static uint64_t
5064 5065  sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp)
5065 5066  {
5066 5067          tte_t ttevalue;
5067 5068  
5068 5069          ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
5069 5070  
5070 5071          switch (mode) {
5071 5072          case SFMMU_CHGATTR:
5072 5073                  /* all attributes specified */
5073 5074                  ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr);
5074 5075                  ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr);
5075 5076                  ttemaskp->tte_inthi = TTEINTHI_ATTR;
5076 5077                  ttemaskp->tte_intlo = TTEINTLO_ATTR;
5077 5078                  break;
5078 5079          case SFMMU_SETATTR:
5079 5080                  ASSERT(!(attr & ~HAT_PROT_MASK));
5080 5081                  ttemaskp->ll = 0;
5081 5082                  ttevalue.ll = 0;
5082 5083                  /*
5083 5084                   * a valid tte implies exec and read for sfmmu
5084 5085                   * so no need to do anything about them.
5085 5086                   * since priviledged access implies user access
5086 5087                   * PROT_USER doesn't make sense either.
5087 5088                   */
5088 5089                  if (attr & PROT_WRITE) {
5089 5090                          ttemaskp->tte_intlo |= TTE_WRPRM_INT;
5090 5091                          ttevalue.tte_intlo |= TTE_WRPRM_INT;
5091 5092                  }
5092 5093                  break;
5093 5094          case SFMMU_CLRATTR:
5094 5095                  /* attributes will be nand with current ones */
5095 5096                  if (attr & ~(PROT_WRITE | PROT_USER)) {
5096 5097                          panic("sfmmu: attr %x not supported", attr);
5097 5098                  }
5098 5099                  ttemaskp->ll = 0;
5099 5100                  ttevalue.ll = 0;
5100 5101                  if (attr & PROT_WRITE) {
5101 5102                          /* clear both writable and modify bit */
5102 5103                          ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT;
5103 5104                  }
5104 5105                  if (attr & PROT_USER) {
5105 5106                          ttemaskp->tte_intlo |= TTE_PRIV_INT;
5106 5107                          ttevalue.tte_intlo |= TTE_PRIV_INT;
5107 5108                  }
5108 5109                  break;
5109 5110          default:
5110 5111                  panic("sfmmu_vtop_attr: bad mode %x", mode);
5111 5112          }
5112 5113          ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0);
5113 5114          return (ttevalue.ll);
5114 5115  }
5115 5116  
5116 5117  static uint_t
5117 5118  sfmmu_ptov_attr(tte_t *ttep)
5118 5119  {
5119 5120          uint_t attr;
5120 5121  
5121 5122          ASSERT(TTE_IS_VALID(ttep));
5122 5123  
5123 5124          attr = PROT_READ;
5124 5125  
5125 5126          if (TTE_IS_WRITABLE(ttep)) {
5126 5127                  attr |= PROT_WRITE;
5127 5128          }
5128 5129          if (TTE_IS_EXECUTABLE(ttep)) {
5129 5130                  attr |= PROT_EXEC;
5130 5131          }
5131 5132          if (!TTE_IS_PRIVILEGED(ttep)) {
5132 5133                  attr |= PROT_USER;
5133 5134          }
5134 5135          if (TTE_IS_NFO(ttep)) {
5135 5136                  attr |= HAT_NOFAULT;
5136 5137          }
5137 5138          if (TTE_IS_NOSYNC(ttep)) {
5138 5139                  attr |= HAT_NOSYNC;
5139 5140          }
5140 5141          if (TTE_IS_SIDEFFECT(ttep)) {
5141 5142                  attr |= SFMMU_SIDEFFECT;
5142 5143          }
5143 5144          if (!TTE_IS_VCACHEABLE(ttep)) {
5144 5145                  attr |= SFMMU_UNCACHEVTTE;
5145 5146          }
5146 5147          if (!TTE_IS_PCACHEABLE(ttep)) {
5147 5148                  attr |= SFMMU_UNCACHEPTTE;
5148 5149          }
5149 5150          return (attr);
5150 5151  }
5151 5152  
5152 5153  /*
5153 5154   * hat_chgprot is a deprecated hat call.  New segment drivers
5154 5155   * should store all attributes and use hat_*attr calls.
5155 5156   *
5156 5157   * Change the protections in the virtual address range
5157 5158   * given to the specified virtual protection.  If vprot is ~PROT_WRITE,
5158 5159   * then remove write permission, leaving the other
5159 5160   * permissions unchanged.  If vprot is ~PROT_USER, remove user permissions.
5160 5161   *
5161 5162   */
5162 5163  void
5163 5164  hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot)
5164 5165  {
5165 5166          struct hmehash_bucket *hmebp;
5166 5167          hmeblk_tag hblktag;
5167 5168          int hmeshift, hashno = 1;
5168 5169          struct hme_blk *hmeblkp, *list = NULL;
5169 5170          caddr_t endaddr;
5170 5171          cpuset_t cpuset;
5171 5172          demap_range_t dmr;
5172 5173  
5173 5174          ASSERT((len & MMU_PAGEOFFSET) == 0);
5174 5175          ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
5175 5176  
5176 5177          ASSERT(sfmmup->sfmmu_as != NULL);
5177 5178  
5178 5179          CPUSET_ZERO(cpuset);
5179 5180  
5180 5181          if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) &&
5181 5182              ((addr + len) > (caddr_t)USERLIMIT)) {
5182 5183                  panic("user addr %p vprot %x in kernel space",
5183 5184                      (void *)addr, vprot);
5184 5185          }
5185 5186          endaddr = addr + len;
5186 5187          hblktag.htag_id = sfmmup;
5187 5188          hblktag.htag_rid = SFMMU_INVALID_SHMERID;
5188 5189          DEMAP_RANGE_INIT(sfmmup, &dmr);
5189 5190  
5190 5191          while (addr < endaddr) {
5191 5192                  hmeshift = HME_HASH_SHIFT(hashno);
5192 5193                  hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
5193 5194                  hblktag.htag_rehash = hashno;
5194 5195                  hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
5195 5196  
5196 5197                  SFMMU_HASH_LOCK(hmebp);
5197 5198  
5198 5199                  HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
5199 5200                  if (hmeblkp != NULL) {
5200 5201                          ASSERT(!hmeblkp->hblk_shared);
5201 5202                          /*
5202 5203                           * We've encountered a shadow hmeblk so skip the range
5203 5204                           * of the next smaller mapping size.
5204 5205                           */
5205 5206                          if (hmeblkp->hblk_shw_bit) {
5206 5207                                  ASSERT(sfmmup != ksfmmup);
5207 5208                                  ASSERT(hashno > 1);
5208 5209                                  addr = (caddr_t)P2END((uintptr_t)addr,
5209 5210                                      TTEBYTES(hashno - 1));
5210 5211                          } else {
5211 5212                                  addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp,
5212 5213                                      addr, endaddr, &dmr, vprot);
5213 5214                          }
5214 5215                          SFMMU_HASH_UNLOCK(hmebp);
5215 5216                          hashno = 1;
5216 5217                          continue;
5217 5218                  }
5218 5219                  SFMMU_HASH_UNLOCK(hmebp);
5219 5220  
5220 5221                  if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
5221 5222                          /*
5222 5223                           * We have traversed the whole list and rehashed
5223 5224                           * if necessary without finding the address to chgprot.
5224 5225                           * This is ok so we increment the address by the
5225 5226                           * smallest hmeblk range for kernel mappings and the
5226 5227                           * largest hmeblk range, to account for shadow hmeblks,
5227 5228                           * for user mappings and continue.
5228 5229                           */
5229 5230                          if (sfmmup == ksfmmup)
5230 5231                                  addr = (caddr_t)P2END((uintptr_t)addr,
5231 5232                                      TTEBYTES(1));
5232 5233                          else
5233 5234                                  addr = (caddr_t)P2END((uintptr_t)addr,
5234 5235                                      TTEBYTES(hashno));
5235 5236                          hashno = 1;
5236 5237                  } else {
5237 5238                          hashno++;
5238 5239                  }
5239 5240          }
5240 5241  
5241 5242          sfmmu_hblks_list_purge(&list, 0);
5242 5243          DEMAP_RANGE_FLUSH(&dmr);
5243 5244          cpuset = sfmmup->sfmmu_cpusran;
5244 5245          xt_sync(cpuset);
5245 5246  }
5246 5247  
5247 5248  /*
5248 5249   * This function chgprots a range of addresses in an hmeblk.  It returns the
5249 5250   * next addres that needs to be chgprot.
5250 5251   * It should be called with the hash lock held.
5251 5252   * XXX It shold be possible to optimize chgprot by not flushing every time but
5252 5253   * on the other hand:
5253 5254   * 1. do one flush crosscall.
5254 5255   * 2. only flush if we are increasing permissions (make sure this will work)
5255 5256   */
5256 5257  static caddr_t
5257 5258  sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5258 5259          caddr_t endaddr, demap_range_t *dmrp, uint_t vprot)
5259 5260  {
5260 5261          uint_t pprot;
5261 5262          tte_t tte, ttemod;
5262 5263          struct sf_hment *sfhmep;
5263 5264          uint_t tteflags;
5264 5265          int ttesz;
5265 5266          struct page *pp = NULL;
5266 5267          kmutex_t *pml, *pmtx;
5267 5268          int ret;
5268 5269          int use_demap_range;
5269 5270  #if defined(SF_ERRATA_57)
5270 5271          int check_exec;
5271 5272  #endif
5272 5273  
5273 5274          ASSERT(in_hblk_range(hmeblkp, addr));
5274 5275          ASSERT(hmeblkp->hblk_shw_bit == 0);
5275 5276          ASSERT(!hmeblkp->hblk_shared);
5276 5277  
5277 5278  #ifdef DEBUG
5278 5279          if (get_hblk_ttesz(hmeblkp) != TTE8K &&
5279 5280              (endaddr < get_hblk_endaddr(hmeblkp))) {
5280 5281                  panic("sfmmu_hblk_chgprot: partial chgprot of large page");
5281 5282          }
5282 5283  #endif /* DEBUG */
5283 5284  
5284 5285          endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5285 5286          ttesz = get_hblk_ttesz(hmeblkp);
5286 5287  
5287 5288          pprot = sfmmu_vtop_prot(vprot, &tteflags);
5288 5289  #if defined(SF_ERRATA_57)
5289 5290          check_exec = (sfmmup != ksfmmup) &&
5290 5291              AS_TYPE_64BIT(sfmmup->sfmmu_as) &&
5291 5292              ((vprot & PROT_EXEC) == PROT_EXEC);
5292 5293  #endif
5293 5294          HBLKTOHME(sfhmep, hmeblkp, addr);
5294 5295  
5295 5296          /*
5296 5297           * Flush the current demap region if addresses have been
5297 5298           * skipped or the page size doesn't match.
5298 5299           */
5299 5300          use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE);
5300 5301          if (use_demap_range) {
5301 5302                  DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
5302 5303          } else if (dmrp != NULL) {
5303 5304                  DEMAP_RANGE_FLUSH(dmrp);
5304 5305          }
5305 5306  
5306 5307          while (addr < endaddr) {
5307 5308                  sfmmu_copytte(&sfhmep->hme_tte, &tte);
5308 5309                  if (TTE_IS_VALID(&tte)) {
5309 5310                          if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) {
5310 5311                                  /*
5311 5312                                   * if the new protection is the same as old
5312 5313                                   * continue
5313 5314                                   */
5314 5315                                  goto next_addr;
5315 5316                          }
5316 5317                          pml = NULL;
5317 5318                          pp = sfhmep->hme_page;
5318 5319                          if (pp) {
5319 5320                                  pml = sfmmu_mlist_enter(pp);
5320 5321                          }
5321 5322                          if (pp != sfhmep->hme_page) {
5322 5323                                  /*
5323 5324                                   * tte most have been unloaded
5324 5325                                   * underneath us.  Recheck
5325 5326                                   */
5326 5327                                  ASSERT(pml);
5327 5328                                  sfmmu_mlist_exit(pml);
5328 5329                                  continue;
5329 5330                          }
5330 5331  
5331 5332                          ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5332 5333  
5333 5334                          ttemod = tte;
5334 5335                          TTE_SET_LOFLAGS(&ttemod, tteflags, pprot);
5335 5336  #if defined(SF_ERRATA_57)
5336 5337                          if (check_exec && addr < errata57_limit)
5337 5338                                  ttemod.tte_exec_perm = 0;
5338 5339  #endif
5339 5340                          ret = sfmmu_modifytte_try(&tte, &ttemod,
5340 5341                              &sfhmep->hme_tte);
5341 5342  
5342 5343                          if (ret < 0) {
5343 5344                                  /* tte changed underneath us */
5344 5345                                  if (pml) {
5345 5346                                          sfmmu_mlist_exit(pml);
5346 5347                                  }
5347 5348                                  continue;
5348 5349                          }
5349 5350  
5350 5351                          if (tteflags & TTE_HWWR_INT) {
5351 5352                                  /*
5352 5353                                   * need to sync if we are clearing modify bit.
5353 5354                                   */
5354 5355                                  sfmmu_ttesync(sfmmup, addr, &tte, pp);
5355 5356                          }
5356 5357  
5357 5358                          if (pp && PP_ISRO(pp)) {
5358 5359                                  if (pprot & TTE_WRPRM_INT) {
5359 5360                                          pmtx = sfmmu_page_enter(pp);
5360 5361                                          PP_CLRRO(pp);
5361 5362                                          sfmmu_page_exit(pmtx);
5362 5363                                  }
5363 5364                          }
5364 5365  
5365 5366                          if (ret > 0 && use_demap_range) {
5366 5367                                  DEMAP_RANGE_MARKPG(dmrp, addr);
5367 5368                          } else if (ret > 0) {
5368 5369                                  sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
5369 5370                          }
5370 5371  
5371 5372                          if (pml) {
5372 5373                                  sfmmu_mlist_exit(pml);
5373 5374                          }
5374 5375                  }
5375 5376  next_addr:
5376 5377                  addr += TTEBYTES(ttesz);
5377 5378                  sfhmep++;
5378 5379                  DEMAP_RANGE_NEXTPG(dmrp);
5379 5380          }
5380 5381          return (addr);
5381 5382  }
5382 5383  
5383 5384  /*
5384 5385   * This routine is deprecated and should only be used by hat_chgprot.
5385 5386   * The correct routine is sfmmu_vtop_attr.
5386 5387   * This routine converts virtual page protections to physical ones.  It will
5387 5388   * update the tteflags field with the tte mask corresponding to the protections
5388 5389   * affected and it returns the new protections.  It will also clear the modify
5389 5390   * bit if we are taking away write permission.  This is necessary since the
5390 5391   * modify bit is the hardware permission bit and we need to clear it in order
5391 5392   * to detect write faults.
5392 5393   * It accepts the following special protections:
5393 5394   * ~PROT_WRITE = remove write permissions.
5394 5395   * ~PROT_USER = remove user permissions.
5395 5396   */
5396 5397  static uint_t
5397 5398  sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp)
5398 5399  {
5399 5400          if (vprot == (uint_t)~PROT_WRITE) {
5400 5401                  *tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT;
5401 5402                  return (0);             /* will cause wrprm to be cleared */
5402 5403          }
5403 5404          if (vprot == (uint_t)~PROT_USER) {
5404 5405                  *tteflagsp = TTE_PRIV_INT;
5405 5406                  return (0);             /* will cause privprm to be cleared */
5406 5407          }
5407 5408          if ((vprot == 0) || (vprot == PROT_USER) ||
5408 5409              ((vprot & PROT_ALL) != vprot)) {
5409 5410                  panic("sfmmu_vtop_prot -- bad prot %x", vprot);
5410 5411          }
5411 5412  
5412 5413          switch (vprot) {
5413 5414          case (PROT_READ):
5414 5415          case (PROT_EXEC):
5415 5416          case (PROT_EXEC | PROT_READ):
5416 5417                  *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
5417 5418                  return (TTE_PRIV_INT);          /* set prv and clr wrt */
5418 5419          case (PROT_WRITE):
5419 5420          case (PROT_WRITE | PROT_READ):
5420 5421          case (PROT_EXEC | PROT_WRITE):
5421 5422          case (PROT_EXEC | PROT_WRITE | PROT_READ):
5422 5423                  *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
5423 5424                  return (TTE_PRIV_INT | TTE_WRPRM_INT);  /* set prv and wrt */
5424 5425          case (PROT_USER | PROT_READ):
5425 5426          case (PROT_USER | PROT_EXEC):
5426 5427          case (PROT_USER | PROT_EXEC | PROT_READ):
5427 5428                  *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
5428 5429                  return (0);                     /* clr prv and wrt */
5429 5430          case (PROT_USER | PROT_WRITE):
5430 5431          case (PROT_USER | PROT_WRITE | PROT_READ):
5431 5432          case (PROT_USER | PROT_EXEC | PROT_WRITE):
5432 5433          case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ):
5433 5434                  *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
5434 5435                  return (TTE_WRPRM_INT);         /* clr prv and set wrt */
5435 5436          default:
5436 5437                  panic("sfmmu_vtop_prot -- bad prot %x", vprot);
5437 5438          }
5438 5439          return (0);
5439 5440  }
5440 5441  
5441 5442  /*
5442 5443   * Alternate unload for very large virtual ranges. With a true 64 bit VA,
5443 5444   * the normal algorithm would take too long for a very large VA range with
5444 5445   * few real mappings. This routine just walks thru all HMEs in the global
5445 5446   * hash table to find and remove mappings.
5446 5447   */
5447 5448  static void
5448 5449  hat_unload_large_virtual(
5449 5450          struct hat              *sfmmup,
5450 5451          caddr_t                 startaddr,
5451 5452          size_t                  len,
5452 5453          uint_t                  flags,
5453 5454          hat_callback_t          *callback)
5454 5455  {
5455 5456          struct hmehash_bucket *hmebp;
5456 5457          struct hme_blk *hmeblkp;
5457 5458          struct hme_blk *pr_hblk = NULL;
5458 5459          struct hme_blk *nx_hblk;
5459 5460          struct hme_blk *list = NULL;
5460 5461          int i;
5461 5462          demap_range_t dmr, *dmrp;
5462 5463          cpuset_t cpuset;
5463 5464          caddr_t endaddr = startaddr + len;
5464 5465          caddr_t sa;
5465 5466          caddr_t ea;
5466 5467          caddr_t cb_sa[MAX_CB_ADDR];
5467 5468          caddr_t cb_ea[MAX_CB_ADDR];
5468 5469          int     addr_cnt = 0;
5469 5470          int     a = 0;
5470 5471  
5471 5472          if (sfmmup->sfmmu_free) {
5472 5473                  dmrp = NULL;
5473 5474          } else {
5474 5475                  dmrp = &dmr;
5475 5476                  DEMAP_RANGE_INIT(sfmmup, dmrp);
5476 5477          }
5477 5478  
5478 5479          /*
5479 5480           * Loop through all the hash buckets of HME blocks looking for matches.
5480 5481           */
5481 5482          for (i = 0; i <= UHMEHASH_SZ; i++) {
5482 5483                  hmebp = &uhme_hash[i];
5483 5484                  SFMMU_HASH_LOCK(hmebp);
5484 5485                  hmeblkp = hmebp->hmeblkp;
5485 5486                  pr_hblk = NULL;
5486 5487                  while (hmeblkp) {
5487 5488                          nx_hblk = hmeblkp->hblk_next;
5488 5489  
5489 5490                          /*
5490 5491                           * skip if not this context, if a shadow block or
5491 5492                           * if the mapping is not in the requested range
5492 5493                           */
5493 5494                          if (hmeblkp->hblk_tag.htag_id != sfmmup ||
5494 5495                              hmeblkp->hblk_shw_bit ||
5495 5496                              (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr ||
5496 5497                              (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) {
5497 5498                                  pr_hblk = hmeblkp;
5498 5499                                  goto next_block;
5499 5500                          }
5500 5501  
5501 5502                          ASSERT(!hmeblkp->hblk_shared);
5502 5503                          /*
5503 5504                           * unload if there are any current valid mappings
5504 5505                           */
5505 5506                          if (hmeblkp->hblk_vcnt != 0 ||
5506 5507                              hmeblkp->hblk_hmecnt != 0)
5507 5508                                  (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
5508 5509                                      sa, ea, dmrp, flags);
5509 5510  
5510 5511                          /*
5511 5512                           * on unmap we also release the HME block itself, once
5512 5513                           * all mappings are gone.
5513 5514                           */
5514 5515                          if ((flags & HAT_UNLOAD_UNMAP) != 0 &&
5515 5516                              !hmeblkp->hblk_vcnt &&
5516 5517                              !hmeblkp->hblk_hmecnt) {
5517 5518                                  ASSERT(!hmeblkp->hblk_lckcnt);
5518 5519                                  sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
5519 5520                                      &list, 0);
5520 5521                          } else {
5521 5522                                  pr_hblk = hmeblkp;
5522 5523                          }
5523 5524  
5524 5525                          if (callback == NULL)
5525 5526                                  goto next_block;
5526 5527  
5527 5528                          /*
5528 5529                           * HME blocks may span more than one page, but we may be
5529 5530                           * unmapping only one page, so check for a smaller range
5530 5531                           * for the callback
5531 5532                           */
5532 5533                          if (sa < startaddr)
5533 5534                                  sa = startaddr;
5534 5535                          if (--ea > endaddr)
5535 5536                                  ea = endaddr - 1;
5536 5537  
5537 5538                          cb_sa[addr_cnt] = sa;
5538 5539                          cb_ea[addr_cnt] = ea;
5539 5540                          if (++addr_cnt == MAX_CB_ADDR) {
5540 5541                                  if (dmrp != NULL) {
5541 5542                                          DEMAP_RANGE_FLUSH(dmrp);
5542 5543                                          cpuset = sfmmup->sfmmu_cpusran;
5543 5544                                          xt_sync(cpuset);
5544 5545                                  }
5545 5546  
5546 5547                                  for (a = 0; a < MAX_CB_ADDR; ++a) {
5547 5548                                          callback->hcb_start_addr = cb_sa[a];
5548 5549                                          callback->hcb_end_addr = cb_ea[a];
5549 5550                                          callback->hcb_function(callback);
5550 5551                                  }
5551 5552                                  addr_cnt = 0;
5552 5553                          }
5553 5554  
5554 5555  next_block:
5555 5556                          hmeblkp = nx_hblk;
5556 5557                  }
5557 5558                  SFMMU_HASH_UNLOCK(hmebp);
5558 5559          }
5559 5560  
5560 5561          sfmmu_hblks_list_purge(&list, 0);
5561 5562          if (dmrp != NULL) {
5562 5563                  DEMAP_RANGE_FLUSH(dmrp);
5563 5564                  cpuset = sfmmup->sfmmu_cpusran;
5564 5565                  xt_sync(cpuset);
5565 5566          }
5566 5567  
5567 5568          for (a = 0; a < addr_cnt; ++a) {
5568 5569                  callback->hcb_start_addr = cb_sa[a];
5569 5570                  callback->hcb_end_addr = cb_ea[a];
5570 5571                  callback->hcb_function(callback);
5571 5572          }
5572 5573  
5573 5574          /*
5574 5575           * Check TSB and TLB page sizes if the process isn't exiting.
5575 5576           */
5576 5577          if (!sfmmup->sfmmu_free)
5577 5578                  sfmmu_check_page_sizes(sfmmup, 0);
5578 5579  }
5579 5580  
5580 5581  /*
5581 5582   * Unload all the mappings in the range [addr..addr+len). addr and len must
5582 5583   * be MMU_PAGESIZE aligned.
5583 5584   */
5584 5585  
5585 5586  extern struct seg *segkmap;
5586 5587  #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \
5587 5588  segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size))
5588 5589  
5589 5590  
5590 5591  void
5591 5592  hat_unload_callback(
5592 5593          struct hat *sfmmup,
5593 5594          caddr_t addr,
5594 5595          size_t len,
5595 5596          uint_t flags,
5596 5597          hat_callback_t *callback)
5597 5598  {
5598 5599          struct hmehash_bucket *hmebp;
5599 5600          hmeblk_tag hblktag;
5600 5601          int hmeshift, hashno, iskernel;
5601 5602          struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
5602 5603          caddr_t endaddr;
5603 5604          cpuset_t cpuset;
5604 5605          int addr_count = 0;
5605 5606          int a;
5606 5607          caddr_t cb_start_addr[MAX_CB_ADDR];
5607 5608          caddr_t cb_end_addr[MAX_CB_ADDR];
5608 5609          int issegkmap = ISSEGKMAP(sfmmup, addr);
5609 5610          demap_range_t dmr, *dmrp;
5610 5611  
5611 5612          ASSERT(sfmmup->sfmmu_as != NULL);
5612 5613  
5613 5614          ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \
5614 5615              AS_LOCK_HELD(sfmmup->sfmmu_as));
5615 5616  
5616 5617          ASSERT(sfmmup != NULL);
5617 5618          ASSERT((len & MMU_PAGEOFFSET) == 0);
5618 5619          ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
5619 5620  
5620 5621          /*
5621 5622           * Probing through a large VA range (say 63 bits) will be slow, even
5622 5623           * at 4 Meg steps between the probes. So, when the virtual address range
5623 5624           * is very large, search the HME entries for what to unload.
5624 5625           *
5625 5626           *      len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need
5626 5627           *
5627 5628           *      UHMEHASH_SZ is number of hash buckets to examine
5628 5629           *
5629 5630           */
5630 5631          if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) {
5631 5632                  hat_unload_large_virtual(sfmmup, addr, len, flags, callback);
5632 5633                  return;
5633 5634          }
5634 5635  
5635 5636          CPUSET_ZERO(cpuset);
5636 5637  
5637 5638          /*
5638 5639           * If the process is exiting, we can save a lot of fuss since
5639 5640           * we'll flush the TLB when we free the ctx anyway.
5640 5641           */
5641 5642          if (sfmmup->sfmmu_free) {
5642 5643                  dmrp = NULL;
5643 5644          } else {
5644 5645                  dmrp = &dmr;
5645 5646                  DEMAP_RANGE_INIT(sfmmup, dmrp);
5646 5647          }
5647 5648  
5648 5649          endaddr = addr + len;
5649 5650          hblktag.htag_id = sfmmup;
5650 5651          hblktag.htag_rid = SFMMU_INVALID_SHMERID;
5651 5652  
5652 5653          /*
5653 5654           * It is likely for the vm to call unload over a wide range of
5654 5655           * addresses that are actually very sparsely populated by
5655 5656           * translations.  In order to speed this up the sfmmu hat supports
5656 5657           * the concept of shadow hmeblks. Dummy large page hmeblks that
5657 5658           * correspond to actual small translations are allocated at tteload
5658 5659           * time and are referred to as shadow hmeblks.  Now, during unload
5659 5660           * time, we first check if we have a shadow hmeblk for that
5660 5661           * translation.  The absence of one means the corresponding address
5661 5662           * range is empty and can be skipped.
5662 5663           *
5663 5664           * The kernel is an exception to above statement and that is why
5664 5665           * we don't use shadow hmeblks and hash starting from the smallest
5665 5666           * page size.
5666 5667           */
5667 5668          if (sfmmup == KHATID) {
5668 5669                  iskernel = 1;
5669 5670                  hashno = TTE64K;
5670 5671          } else {
5671 5672                  iskernel = 0;
5672 5673                  if (mmu_page_sizes == max_mmu_page_sizes) {
5673 5674                          hashno = TTE256M;
5674 5675                  } else {
5675 5676                          hashno = TTE4M;
5676 5677                  }
5677 5678          }
5678 5679          while (addr < endaddr) {
5679 5680                  hmeshift = HME_HASH_SHIFT(hashno);
5680 5681                  hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
5681 5682                  hblktag.htag_rehash = hashno;
5682 5683                  hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
5683 5684  
5684 5685                  SFMMU_HASH_LOCK(hmebp);
5685 5686  
5686 5687                  HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
5687 5688                  if (hmeblkp == NULL) {
5688 5689                          /*
5689 5690                           * didn't find an hmeblk. skip the appropiate
5690 5691                           * address range.
5691 5692                           */
5692 5693                          SFMMU_HASH_UNLOCK(hmebp);
5693 5694                          if (iskernel) {
5694 5695                                  if (hashno < mmu_hashcnt) {
5695 5696                                          hashno++;
5696 5697                                          continue;
5697 5698                                  } else {
5698 5699                                          hashno = TTE64K;
5699 5700                                          addr = (caddr_t)roundup((uintptr_t)addr
5700 5701                                              + 1, MMU_PAGESIZE64K);
5701 5702                                          continue;
5702 5703                                  }
5703 5704                          }
5704 5705                          addr = (caddr_t)roundup((uintptr_t)addr + 1,
5705 5706                              (1 << hmeshift));
5706 5707                          if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5707 5708                                  ASSERT(hashno == TTE64K);
5708 5709                                  continue;
5709 5710                          }
5710 5711                          if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5711 5712                                  hashno = TTE512K;
5712 5713                                  continue;
5713 5714                          }
5714 5715                          if (mmu_page_sizes == max_mmu_page_sizes) {
5715 5716                                  if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5716 5717                                          hashno = TTE4M;
5717 5718                                          continue;
5718 5719                                  }
5719 5720                                  if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5720 5721                                          hashno = TTE32M;
5721 5722                                          continue;
5722 5723                                  }
5723 5724                                  hashno = TTE256M;
5724 5725                                  continue;
5725 5726                          } else {
5726 5727                                  hashno = TTE4M;
5727 5728                                  continue;
5728 5729                          }
5729 5730                  }
5730 5731                  ASSERT(hmeblkp);
5731 5732                  ASSERT(!hmeblkp->hblk_shared);
5732 5733                  if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5733 5734                          /*
5734 5735                           * If the valid count is zero we can skip the range
5735 5736                           * mapped by this hmeblk.
5736 5737                           * We free hblks in the case of HAT_UNMAP.  HAT_UNMAP
5737 5738                           * is used by segment drivers as a hint
5738 5739                           * that the mapping resource won't be used any longer.
5739 5740                           * The best example of this is during exit().
5740 5741                           */
5741 5742                          addr = (caddr_t)roundup((uintptr_t)addr + 1,
5742 5743                              get_hblk_span(hmeblkp));
5743 5744                          if ((flags & HAT_UNLOAD_UNMAP) ||
5744 5745                              (iskernel && !issegkmap)) {
5745 5746                                  sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
5746 5747                                      &list, 0);
5747 5748                          }
5748 5749                          SFMMU_HASH_UNLOCK(hmebp);
5749 5750  
5750 5751                          if (iskernel) {
5751 5752                                  hashno = TTE64K;
5752 5753                                  continue;
5753 5754                          }
5754 5755                          if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5755 5756                                  ASSERT(hashno == TTE64K);
5756 5757                                  continue;
5757 5758                          }
5758 5759                          if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5759 5760                                  hashno = TTE512K;
5760 5761                                  continue;
5761 5762                          }
5762 5763                          if (mmu_page_sizes == max_mmu_page_sizes) {
5763 5764                                  if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5764 5765                                          hashno = TTE4M;
5765 5766                                          continue;
5766 5767                                  }
5767 5768                                  if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5768 5769                                          hashno = TTE32M;
5769 5770                                          continue;
5770 5771                                  }
5771 5772                                  hashno = TTE256M;
5772 5773                                  continue;
5773 5774                          } else {
5774 5775                                  hashno = TTE4M;
5775 5776                                  continue;
5776 5777                          }
5777 5778                  }
5778 5779                  if (hmeblkp->hblk_shw_bit) {
5779 5780                          /*
5780 5781                           * If we encounter a shadow hmeblk we know there is
5781 5782                           * smaller sized hmeblks mapping the same address space.
5782 5783                           * Decrement the hash size and rehash.
5783 5784                           */
5784 5785                          ASSERT(sfmmup != KHATID);
5785 5786                          hashno--;
5786 5787                          SFMMU_HASH_UNLOCK(hmebp);
5787 5788                          continue;
5788 5789                  }
5789 5790  
5790 5791                  /*
5791 5792                   * track callback address ranges.
5792 5793                   * only start a new range when it's not contiguous
5793 5794                   */
5794 5795                  if (callback != NULL) {
5795 5796                          if (addr_count > 0 &&
5796 5797                              addr == cb_end_addr[addr_count - 1])
5797 5798                                  --addr_count;
5798 5799                          else
5799 5800                                  cb_start_addr[addr_count] = addr;
5800 5801                  }
5801 5802  
5802 5803                  addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr,
5803 5804                      dmrp, flags);
5804 5805  
5805 5806                  if (callback != NULL)
5806 5807                          cb_end_addr[addr_count++] = addr;
5807 5808  
5808 5809                  if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) &&
5809 5810                      !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5810 5811                          sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 0);
5811 5812                  }
5812 5813                  SFMMU_HASH_UNLOCK(hmebp);
5813 5814  
5814 5815                  /*
5815 5816                   * Notify our caller as to exactly which pages
5816 5817                   * have been unloaded. We do these in clumps,
5817 5818                   * to minimize the number of xt_sync()s that need to occur.
5818 5819                   */
5819 5820                  if (callback != NULL && addr_count == MAX_CB_ADDR) {
5820 5821                          if (dmrp != NULL) {
5821 5822                                  DEMAP_RANGE_FLUSH(dmrp);
5822 5823                                  cpuset = sfmmup->sfmmu_cpusran;
5823 5824                                  xt_sync(cpuset);
5824 5825                          }
5825 5826  
5826 5827                          for (a = 0; a < MAX_CB_ADDR; ++a) {
5827 5828                                  callback->hcb_start_addr = cb_start_addr[a];
5828 5829                                  callback->hcb_end_addr = cb_end_addr[a];
5829 5830                                  callback->hcb_function(callback);
5830 5831                          }
5831 5832                          addr_count = 0;
5832 5833                  }
5833 5834                  if (iskernel) {
5834 5835                          hashno = TTE64K;
5835 5836                          continue;
5836 5837                  }
5837 5838                  if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5838 5839                          ASSERT(hashno == TTE64K);
5839 5840                          continue;
5840 5841                  }
5841 5842                  if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5842 5843                          hashno = TTE512K;
5843 5844                          continue;
5844 5845                  }
5845 5846                  if (mmu_page_sizes == max_mmu_page_sizes) {
5846 5847                          if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5847 5848                                  hashno = TTE4M;
5848 5849                                  continue;
5849 5850                          }
5850 5851                          if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5851 5852                                  hashno = TTE32M;
5852 5853                                  continue;
5853 5854                          }
5854 5855                          hashno = TTE256M;
5855 5856                  } else {
5856 5857                          hashno = TTE4M;
5857 5858                  }
5858 5859          }
5859 5860  
5860 5861          sfmmu_hblks_list_purge(&list, 0);
5861 5862          if (dmrp != NULL) {
5862 5863                  DEMAP_RANGE_FLUSH(dmrp);
5863 5864                  cpuset = sfmmup->sfmmu_cpusran;
5864 5865                  xt_sync(cpuset);
5865 5866          }
5866 5867          if (callback && addr_count != 0) {
5867 5868                  for (a = 0; a < addr_count; ++a) {
5868 5869                          callback->hcb_start_addr = cb_start_addr[a];
5869 5870                          callback->hcb_end_addr = cb_end_addr[a];
5870 5871                          callback->hcb_function(callback);
5871 5872                  }
5872 5873          }
5873 5874  
5874 5875          /*
5875 5876           * Check TSB and TLB page sizes if the process isn't exiting.
5876 5877           */
5877 5878          if (!sfmmup->sfmmu_free)
5878 5879                  sfmmu_check_page_sizes(sfmmup, 0);
5879 5880  }
5880 5881  
5881 5882  /*
5882 5883   * Unload all the mappings in the range [addr..addr+len). addr and len must
5883 5884   * be MMU_PAGESIZE aligned.
5884 5885   */
5885 5886  void
5886 5887  hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags)
5887 5888  {
5888 5889          hat_unload_callback(sfmmup, addr, len, flags, NULL);
5889 5890  }
5890 5891  
5891 5892  
5892 5893  /*
5893 5894   * Find the largest mapping size for this page.
5894 5895   */
5895 5896  int
5896 5897  fnd_mapping_sz(page_t *pp)
5897 5898  {
5898 5899          int sz;
5899 5900          int p_index;
5900 5901  
5901 5902          p_index = PP_MAPINDEX(pp);
5902 5903  
5903 5904          sz = 0;
5904 5905          p_index >>= 1;  /* don't care about 8K bit */
5905 5906          for (; p_index; p_index >>= 1) {
5906 5907                  sz++;
5907 5908          }
5908 5909  
5909 5910          return (sz);
5910 5911  }
5911 5912  
5912 5913  /*
5913 5914   * This function unloads a range of addresses for an hmeblk.
5914 5915   * It returns the next address to be unloaded.
5915 5916   * It should be called with the hash lock held.
5916 5917   */
5917 5918  static caddr_t
5918 5919  sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5919 5920          caddr_t endaddr, demap_range_t *dmrp, uint_t flags)
5920 5921  {
5921 5922          tte_t   tte, ttemod;
5922 5923          struct  sf_hment *sfhmep;
5923 5924          int     ttesz;
5924 5925          long    ttecnt;
5925 5926          page_t *pp;
5926 5927          kmutex_t *pml;
5927 5928          int ret;
5928 5929          int use_demap_range;
5929 5930  
5930 5931          ASSERT(in_hblk_range(hmeblkp, addr));
5931 5932          ASSERT(!hmeblkp->hblk_shw_bit);
5932 5933          ASSERT(sfmmup != NULL || hmeblkp->hblk_shared);
5933 5934          ASSERT(sfmmup == NULL || !hmeblkp->hblk_shared);
5934 5935          ASSERT(dmrp == NULL || !hmeblkp->hblk_shared);
5935 5936  
5936 5937  #ifdef DEBUG
5937 5938          if (get_hblk_ttesz(hmeblkp) != TTE8K &&
5938 5939              (endaddr < get_hblk_endaddr(hmeblkp))) {
5939 5940                  panic("sfmmu_hblk_unload: partial unload of large page");
5940 5941          }
5941 5942  #endif /* DEBUG */
5942 5943  
5943 5944          endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5944 5945          ttesz = get_hblk_ttesz(hmeblkp);
5945 5946  
5946 5947          use_demap_range = ((dmrp == NULL) ||
5947 5948              (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)));
5948 5949  
5949 5950          if (use_demap_range) {
5950 5951                  DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
5951 5952          } else if (dmrp != NULL) {
5952 5953                  DEMAP_RANGE_FLUSH(dmrp);
5953 5954          }
5954 5955          ttecnt = 0;
5955 5956          HBLKTOHME(sfhmep, hmeblkp, addr);
5956 5957  
5957 5958          while (addr < endaddr) {
5958 5959                  pml = NULL;
5959 5960                  sfmmu_copytte(&sfhmep->hme_tte, &tte);
5960 5961                  if (TTE_IS_VALID(&tte)) {
5961 5962                          pp = sfhmep->hme_page;
5962 5963                          if (pp != NULL) {
5963 5964                                  pml = sfmmu_mlist_enter(pp);
5964 5965                          }
5965 5966  
5966 5967                          /*
5967 5968                           * Verify if hme still points to 'pp' now that
5968 5969                           * we have p_mapping lock.
5969 5970                           */
5970 5971                          if (sfhmep->hme_page != pp) {
5971 5972                                  if (pp != NULL && sfhmep->hme_page != NULL) {
5972 5973                                          ASSERT(pml != NULL);
5973 5974                                          sfmmu_mlist_exit(pml);
5974 5975                                          /* Re-start this iteration. */
5975 5976                                          continue;
5976 5977                                  }
5977 5978                                  ASSERT((pp != NULL) &&
5978 5979                                      (sfhmep->hme_page == NULL));
5979 5980                                  goto tte_unloaded;
5980 5981                          }
5981 5982  
5982 5983                          /*
5983 5984                           * This point on we have both HASH and p_mapping
5984 5985                           * lock.
5985 5986                           */
5986 5987                          ASSERT(pp == sfhmep->hme_page);
5987 5988                          ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5988 5989  
5989 5990                          /*
5990 5991                           * We need to loop on modify tte because it is
5991 5992                           * possible for pagesync to come along and
5992 5993                           * change the software bits beneath us.
5993 5994                           *
5994 5995                           * Page_unload can also invalidate the tte after
5995 5996                           * we read tte outside of p_mapping lock.
5996 5997                           */
5997 5998  again:
5998 5999                          ttemod = tte;
5999 6000  
6000 6001                          TTE_SET_INVALID(&ttemod);
6001 6002                          ret = sfmmu_modifytte_try(&tte, &ttemod,
6002 6003                              &sfhmep->hme_tte);
6003 6004  
6004 6005                          if (ret <= 0) {
6005 6006                                  if (TTE_IS_VALID(&tte)) {
6006 6007                                          ASSERT(ret < 0);
6007 6008                                          goto again;
6008 6009                                  }
6009 6010                                  if (pp != NULL) {
6010 6011                                          panic("sfmmu_hblk_unload: pp = 0x%p "
6011 6012                                              "tte became invalid under mlist"
6012 6013                                              " lock = 0x%p", (void *)pp,
6013 6014                                              (void *)pml);
6014 6015                                  }
6015 6016                                  continue;
6016 6017                          }
6017 6018  
6018 6019                          if (!(flags & HAT_UNLOAD_NOSYNC)) {
6019 6020                                  sfmmu_ttesync(sfmmup, addr, &tte, pp);
6020 6021                          }
6021 6022  
6022 6023                          /*
6023 6024                           * Ok- we invalidated the tte. Do the rest of the job.
6024 6025                           */
6025 6026                          ttecnt++;
6026 6027  
6027 6028                          if (flags & HAT_UNLOAD_UNLOCK) {
6028 6029                                  ASSERT(hmeblkp->hblk_lckcnt > 0);
6029 6030                                  atomic_dec_32(&hmeblkp->hblk_lckcnt);
6030 6031                                  HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
6031 6032                          }
6032 6033  
6033 6034                          /*
6034 6035                           * Normally we would need to flush the page
6035 6036                           * from the virtual cache at this point in
6036 6037                           * order to prevent a potential cache alias
6037 6038                           * inconsistency.
6038 6039                           * The particular scenario we need to worry
6039 6040                           * about is:
6040 6041                           * Given:  va1 and va2 are two virtual address
6041 6042                           * that alias and map the same physical
6042 6043                           * address.
6043 6044                           * 1.   mapping exists from va1 to pa and data
6044 6045                           * has been read into the cache.
6045 6046                           * 2.   unload va1.
6046 6047                           * 3.   load va2 and modify data using va2.
6047 6048                           * 4    unload va2.
6048 6049                           * 5.   load va1 and reference data.  Unless we
6049 6050                           * flush the data cache when we unload we will
6050 6051                           * get stale data.
6051 6052                           * Fortunately, page coloring eliminates the
6052 6053                           * above scenario by remembering the color a
6053 6054                           * physical page was last or is currently
6054 6055                           * mapped to.  Now, we delay the flush until
6055 6056                           * the loading of translations.  Only when the
6056 6057                           * new translation is of a different color
6057 6058                           * are we forced to flush.
6058 6059                           */
6059 6060                          if (use_demap_range) {
6060 6061                                  /*
6061 6062                                   * Mark this page as needing a demap.
6062 6063                                   */
6063 6064                                  DEMAP_RANGE_MARKPG(dmrp, addr);
6064 6065                          } else {
6065 6066                                  ASSERT(sfmmup != NULL);
6066 6067                                  ASSERT(!hmeblkp->hblk_shared);
6067 6068                                  sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
6068 6069                                      sfmmup->sfmmu_free, 0);
6069 6070                          }
6070 6071  
6071 6072                          if (pp) {
6072 6073                                  /*
6073 6074                                   * Remove the hment from the mapping list
6074 6075                                   */
6075 6076                                  ASSERT(hmeblkp->hblk_hmecnt > 0);
6076 6077  
6077 6078                                  /*
6078 6079                                   * Again, we cannot
6079 6080                                   * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS);
6080 6081                                   */
6081 6082                                  HME_SUB(sfhmep, pp);
6082 6083                                  membar_stst();
6083 6084                                  atomic_dec_16(&hmeblkp->hblk_hmecnt);
6084 6085                          }
6085 6086  
6086 6087                          ASSERT(hmeblkp->hblk_vcnt > 0);
6087 6088                          atomic_dec_16(&hmeblkp->hblk_vcnt);
6088 6089  
6089 6090                          ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
6090 6091                              !hmeblkp->hblk_lckcnt);
6091 6092  
6092 6093  #ifdef VAC
6093 6094                          if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) {
6094 6095                                  if (PP_ISTNC(pp)) {
6095 6096                                          /*
6096 6097                                           * If page was temporary
6097 6098                                           * uncached, try to recache
6098 6099                                           * it. Note that HME_SUB() was
6099 6100                                           * called above so p_index and
6100 6101                                           * mlist had been updated.
6101 6102                                           */
6102 6103                                          conv_tnc(pp, ttesz);
6103 6104                                  } else if (pp->p_mapping == NULL) {
6104 6105                                          ASSERT(kpm_enable);
6105 6106                                          /*
6106 6107                                           * Page is marked to be in VAC conflict
6107 6108                                           * to an existing kpm mapping and/or is
6108 6109                                           * kpm mapped using only the regular
6109 6110                                           * pagesize.
6110 6111                                           */
6111 6112                                          sfmmu_kpm_hme_unload(pp);
6112 6113                                  }
6113 6114                          }
6114 6115  #endif  /* VAC */
6115 6116                  } else if ((pp = sfhmep->hme_page) != NULL) {
6116 6117                                  /*
6117 6118                                   * TTE is invalid but the hme
6118 6119                                   * still exists. let pageunload
6119 6120                                   * complete its job.
6120 6121                                   */
6121 6122                                  ASSERT(pml == NULL);
6122 6123                                  pml = sfmmu_mlist_enter(pp);
6123 6124                                  if (sfhmep->hme_page != NULL) {
6124 6125                                          sfmmu_mlist_exit(pml);
6125 6126                                          continue;
6126 6127                                  }
6127 6128                                  ASSERT(sfhmep->hme_page == NULL);
6128 6129                  } else if (hmeblkp->hblk_hmecnt != 0) {
6129 6130                          /*
6130 6131                           * pageunload may have not finished decrementing
6131 6132                           * hblk_vcnt and hblk_hmecnt. Find page_t if any and
6132 6133                           * wait for pageunload to finish. Rely on pageunload
6133 6134                           * to decrement hblk_hmecnt after hblk_vcnt.
6134 6135                           */
6135 6136                          pfn_t pfn = TTE_TO_TTEPFN(&tte);
6136 6137                          ASSERT(pml == NULL);
6137 6138                          if (pf_is_memory(pfn)) {
6138 6139                                  pp = page_numtopp_nolock(pfn);
6139 6140                                  if (pp != NULL) {
6140 6141                                          pml = sfmmu_mlist_enter(pp);
6141 6142                                          sfmmu_mlist_exit(pml);
6142 6143                                          pml = NULL;
6143 6144                                  }
6144 6145                          }
6145 6146                  }
6146 6147  
6147 6148  tte_unloaded:
6148 6149                  /*
6149 6150                   * At this point, the tte we are looking at
6150 6151                   * should be unloaded, and hme has been unlinked
6151 6152                   * from page too. This is important because in
6152 6153                   * pageunload, it does ttesync() then HME_SUB.
6153 6154                   * We need to make sure HME_SUB has been completed
6154 6155                   * so we know ttesync() has been completed. Otherwise,
6155 6156                   * at exit time, after return from hat layer, VM will
6156 6157                   * release as structure which hat_setstat() (called
6157 6158                   * by ttesync()) needs.
6158 6159                   */
6159 6160  #ifdef DEBUG
6160 6161                  {
6161 6162                          tte_t   dtte;
6162 6163  
6163 6164                          ASSERT(sfhmep->hme_page == NULL);
6164 6165  
6165 6166                          sfmmu_copytte(&sfhmep->hme_tte, &dtte);
6166 6167                          ASSERT(!TTE_IS_VALID(&dtte));
6167 6168                  }
6168 6169  #endif
6169 6170  
6170 6171                  if (pml) {
6171 6172                          sfmmu_mlist_exit(pml);
6172 6173                  }
6173 6174  
6174 6175                  addr += TTEBYTES(ttesz);
6175 6176                  sfhmep++;
6176 6177                  DEMAP_RANGE_NEXTPG(dmrp);
6177 6178          }
6178 6179          /*
6179 6180           * For shared hmeblks this routine is only called when region is freed
  
    | 
      ↓ open down ↓ | 
    5499 lines elided | 
    
      ↑ open up ↑ | 
  
6180 6181           * and no longer referenced.  So no need to decrement ttecnt
6181 6182           * in the region structure here.
6182 6183           */
6183 6184          if (ttecnt > 0 && sfmmup != NULL) {
6184 6185                  atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt);
6185 6186          }
6186 6187          return (addr);
6187 6188  }
6188 6189  
6189 6190  /*
6190      - * Invalidate a virtual address range for the local CPU.
6191      - * For best performance ensure that the va range is completely
6192      - * mapped, otherwise the entire TLB will be flushed.
     6191 + * Flush the TLB for the local CPU
     6192 + * Invoked from a slave CPU during panic() dumps.
6193 6193   */
6194 6194  void
6195      -hat_flush_range(struct hat *sfmmup, caddr_t va, size_t size)
     6195 +hat_flush(void)
6196 6196  {
6197      -        ssize_t sz;
6198      -        caddr_t endva = va + size;
6199      -
6200      -        while (va < endva) {
6201      -                sz = hat_getpagesize(sfmmup, va);
6202      -                if (sz < 0) {
6203      -                        vtag_flushall();
6204      -                        break;
6205      -                }
6206      -                vtag_flushpage(va, (uint64_t)sfmmup);
6207      -                va += sz;
6208      -        }
     6197 +        vtag_flushall();
6209 6198  }
6210 6199  
6211 6200  /*
6212 6201   * Synchronize all the mappings in the range [addr..addr+len).
6213 6202   * Can be called with clearflag having two states:
6214 6203   * HAT_SYNC_DONTZERO means just return the rm stats
6215 6204   * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats
6216 6205   */
6217 6206  void
6218 6207  hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag)
6219 6208  {
6220 6209          struct hmehash_bucket *hmebp;
6221 6210          hmeblk_tag hblktag;
6222 6211          int hmeshift, hashno = 1;
6223 6212          struct hme_blk *hmeblkp, *list = NULL;
6224 6213          caddr_t endaddr;
6225 6214          cpuset_t cpuset;
6226 6215  
6227 6216          ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
6228 6217          ASSERT((len & MMU_PAGEOFFSET) == 0);
6229 6218          ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
6230 6219              (clearflag == HAT_SYNC_ZERORM));
6231 6220  
6232 6221          CPUSET_ZERO(cpuset);
6233 6222  
6234 6223          endaddr = addr + len;
6235 6224          hblktag.htag_id = sfmmup;
6236 6225          hblktag.htag_rid = SFMMU_INVALID_SHMERID;
6237 6226  
6238 6227          /*
6239 6228           * Spitfire supports 4 page sizes.
6240 6229           * Most pages are expected to be of the smallest page
6241 6230           * size (8K) and these will not need to be rehashed. 64K
6242 6231           * pages also don't need to be rehashed because the an hmeblk
6243 6232           * spans 64K of address space. 512K pages might need 1 rehash and
6244 6233           * and 4M pages 2 rehashes.
6245 6234           */
6246 6235          while (addr < endaddr) {
6247 6236                  hmeshift = HME_HASH_SHIFT(hashno);
6248 6237                  hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
6249 6238                  hblktag.htag_rehash = hashno;
6250 6239                  hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
6251 6240  
6252 6241                  SFMMU_HASH_LOCK(hmebp);
6253 6242  
6254 6243                  HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
6255 6244                  if (hmeblkp != NULL) {
6256 6245                          ASSERT(!hmeblkp->hblk_shared);
6257 6246                          /*
6258 6247                           * We've encountered a shadow hmeblk so skip the range
6259 6248                           * of the next smaller mapping size.
6260 6249                           */
6261 6250                          if (hmeblkp->hblk_shw_bit) {
6262 6251                                  ASSERT(sfmmup != ksfmmup);
6263 6252                                  ASSERT(hashno > 1);
6264 6253                                  addr = (caddr_t)P2END((uintptr_t)addr,
6265 6254                                      TTEBYTES(hashno - 1));
6266 6255                          } else {
6267 6256                                  addr = sfmmu_hblk_sync(sfmmup, hmeblkp,
6268 6257                                      addr, endaddr, clearflag);
6269 6258                          }
6270 6259                          SFMMU_HASH_UNLOCK(hmebp);
6271 6260                          hashno = 1;
6272 6261                          continue;
6273 6262                  }
6274 6263                  SFMMU_HASH_UNLOCK(hmebp);
6275 6264  
6276 6265                  if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
6277 6266                          /*
6278 6267                           * We have traversed the whole list and rehashed
6279 6268                           * if necessary without finding the address to sync.
6280 6269                           * This is ok so we increment the address by the
6281 6270                           * smallest hmeblk range for kernel mappings and the
6282 6271                           * largest hmeblk range, to account for shadow hmeblks,
6283 6272                           * for user mappings and continue.
6284 6273                           */
6285 6274                          if (sfmmup == ksfmmup)
6286 6275                                  addr = (caddr_t)P2END((uintptr_t)addr,
6287 6276                                      TTEBYTES(1));
6288 6277                          else
6289 6278                                  addr = (caddr_t)P2END((uintptr_t)addr,
6290 6279                                      TTEBYTES(hashno));
6291 6280                          hashno = 1;
6292 6281                  } else {
6293 6282                          hashno++;
6294 6283                  }
6295 6284          }
6296 6285          sfmmu_hblks_list_purge(&list, 0);
6297 6286          cpuset = sfmmup->sfmmu_cpusran;
6298 6287          xt_sync(cpuset);
6299 6288  }
6300 6289  
6301 6290  static caddr_t
6302 6291  sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
6303 6292          caddr_t endaddr, int clearflag)
6304 6293  {
6305 6294          tte_t   tte, ttemod;
6306 6295          struct sf_hment *sfhmep;
6307 6296          int ttesz;
6308 6297          struct page *pp;
6309 6298          kmutex_t *pml;
6310 6299          int ret;
6311 6300  
6312 6301          ASSERT(hmeblkp->hblk_shw_bit == 0);
6313 6302          ASSERT(!hmeblkp->hblk_shared);
6314 6303  
6315 6304          endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
6316 6305  
6317 6306          ttesz = get_hblk_ttesz(hmeblkp);
6318 6307          HBLKTOHME(sfhmep, hmeblkp, addr);
6319 6308  
6320 6309          while (addr < endaddr) {
6321 6310                  sfmmu_copytte(&sfhmep->hme_tte, &tte);
6322 6311                  if (TTE_IS_VALID(&tte)) {
6323 6312                          pml = NULL;
6324 6313                          pp = sfhmep->hme_page;
6325 6314                          if (pp) {
6326 6315                                  pml = sfmmu_mlist_enter(pp);
6327 6316                          }
6328 6317                          if (pp != sfhmep->hme_page) {
6329 6318                                  /*
6330 6319                                   * tte most have been unloaded
6331 6320                                   * underneath us.  Recheck
6332 6321                                   */
6333 6322                                  ASSERT(pml);
6334 6323                                  sfmmu_mlist_exit(pml);
6335 6324                                  continue;
6336 6325                          }
6337 6326  
6338 6327                          ASSERT(pp == NULL || sfmmu_mlist_held(pp));
6339 6328  
6340 6329                          if (clearflag == HAT_SYNC_ZERORM) {
6341 6330                                  ttemod = tte;
6342 6331                                  TTE_CLR_RM(&ttemod);
6343 6332                                  ret = sfmmu_modifytte_try(&tte, &ttemod,
6344 6333                                      &sfhmep->hme_tte);
6345 6334                                  if (ret < 0) {
6346 6335                                          if (pml) {
6347 6336                                                  sfmmu_mlist_exit(pml);
6348 6337                                          }
6349 6338                                          continue;
6350 6339                                  }
6351 6340  
6352 6341                                  if (ret > 0) {
6353 6342                                          sfmmu_tlb_demap(addr, sfmmup,
6354 6343                                              hmeblkp, 0, 0);
6355 6344                                  }
6356 6345                          }
6357 6346                          sfmmu_ttesync(sfmmup, addr, &tte, pp);
6358 6347                          if (pml) {
6359 6348                                  sfmmu_mlist_exit(pml);
6360 6349                          }
6361 6350                  }
6362 6351                  addr += TTEBYTES(ttesz);
6363 6352                  sfhmep++;
6364 6353          }
6365 6354          return (addr);
6366 6355  }
6367 6356  
6368 6357  /*
6369 6358   * This function will sync a tte to the page struct and it will
6370 6359   * update the hat stats. Currently it allows us to pass a NULL pp
6371 6360   * and we will simply update the stats.  We may want to change this
6372 6361   * so we only keep stats for pages backed by pp's.
6373 6362   */
6374 6363  static void
6375 6364  sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp)
6376 6365  {
6377 6366          uint_t rm = 0;
6378 6367          int     sz;
6379 6368          pgcnt_t npgs;
6380 6369  
6381 6370          ASSERT(TTE_IS_VALID(ttep));
6382 6371  
6383 6372          if (TTE_IS_NOSYNC(ttep)) {
6384 6373                  return;
6385 6374          }
6386 6375  
6387 6376          if (TTE_IS_REF(ttep))  {
6388 6377                  rm = P_REF;
6389 6378          }
6390 6379          if (TTE_IS_MOD(ttep))  {
6391 6380                  rm |= P_MOD;
6392 6381          }
6393 6382  
6394 6383          if (rm == 0) {
6395 6384                  return;
6396 6385          }
6397 6386  
6398 6387          sz = TTE_CSZ(ttep);
6399 6388          if (sfmmup != NULL && sfmmup->sfmmu_rmstat) {
6400 6389                  int i;
6401 6390                  caddr_t vaddr = addr;
6402 6391  
6403 6392                  for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) {
6404 6393                          hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm);
6405 6394                  }
6406 6395  
6407 6396          }
6408 6397  
6409 6398          /*
6410 6399           * XXX I want to use cas to update nrm bits but they
6411 6400           * currently belong in common/vm and not in hat where
6412 6401           * they should be.
6413 6402           * The nrm bits are protected by the same mutex as
6414 6403           * the one that protects the page's mapping list.
6415 6404           */
6416 6405          if (!pp)
6417 6406                  return;
6418 6407          ASSERT(sfmmu_mlist_held(pp));
6419 6408          /*
6420 6409           * If the tte is for a large page, we need to sync all the
6421 6410           * pages covered by the tte.
6422 6411           */
6423 6412          if (sz != TTE8K) {
6424 6413                  ASSERT(pp->p_szc != 0);
6425 6414                  pp = PP_GROUPLEADER(pp, sz);
6426 6415                  ASSERT(sfmmu_mlist_held(pp));
6427 6416          }
6428 6417  
6429 6418          /* Get number of pages from tte size. */
6430 6419          npgs = TTEPAGES(sz);
6431 6420  
6432 6421          do {
6433 6422                  ASSERT(pp);
6434 6423                  ASSERT(sfmmu_mlist_held(pp));
6435 6424                  if (((rm & P_REF) != 0 && !PP_ISREF(pp)) ||
6436 6425                      ((rm & P_MOD) != 0 && !PP_ISMOD(pp)))
6437 6426                          hat_page_setattr(pp, rm);
6438 6427  
6439 6428                  /*
6440 6429                   * Are we done? If not, we must have a large mapping.
6441 6430                   * For large mappings we need to sync the rest of the pages
6442 6431                   * covered by this tte; goto the next page.
6443 6432                   */
6444 6433          } while (--npgs > 0 && (pp = PP_PAGENEXT(pp)));
6445 6434  }
6446 6435  
6447 6436  /*
6448 6437   * Execute pre-callback handler of each pa_hment linked to pp
6449 6438   *
6450 6439   * Inputs:
6451 6440   *   flag: either HAT_PRESUSPEND or HAT_SUSPEND.
6452 6441   *   capture_cpus: pointer to return value (below)
6453 6442   *
6454 6443   * Returns:
6455 6444   *   Propagates the subsystem callback return values back to the caller;
6456 6445   *   returns 0 on success.  If capture_cpus is non-NULL, the value returned
6457 6446   *   is zero if all of the pa_hments are of a type that do not require
6458 6447   *   capturing CPUs prior to suspending the mapping, else it is 1.
6459 6448   */
6460 6449  static int
6461 6450  hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus)
6462 6451  {
6463 6452          struct sf_hment *sfhmep;
6464 6453          struct pa_hment *pahmep;
6465 6454          int (*f)(caddr_t, uint_t, uint_t, void *);
6466 6455          int             ret;
6467 6456          id_t            id;
6468 6457          int             locked = 0;
6469 6458          kmutex_t        *pml;
6470 6459  
6471 6460          ASSERT(PAGE_EXCL(pp));
6472 6461          if (!sfmmu_mlist_held(pp)) {
6473 6462                  pml = sfmmu_mlist_enter(pp);
6474 6463                  locked = 1;
6475 6464          }
6476 6465  
6477 6466          if (capture_cpus)
6478 6467                  *capture_cpus = 0;
6479 6468  
6480 6469  top:
6481 6470          for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6482 6471                  /*
6483 6472                   * skip sf_hments corresponding to VA<->PA mappings;
6484 6473                   * for pa_hment's, hme_tte.ll is zero
6485 6474                   */
6486 6475                  if (!IS_PAHME(sfhmep))
6487 6476                          continue;
6488 6477  
6489 6478                  pahmep = sfhmep->hme_data;
6490 6479                  ASSERT(pahmep != NULL);
6491 6480  
6492 6481                  /*
6493 6482                   * skip if pre-handler has been called earlier in this loop
6494 6483                   */
6495 6484                  if (pahmep->flags & flag)
6496 6485                          continue;
6497 6486  
6498 6487                  id = pahmep->cb_id;
6499 6488                  ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid);
6500 6489                  if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0)
6501 6490                          *capture_cpus = 1;
6502 6491                  if ((f = sfmmu_cb_table[id].prehandler) == NULL) {
6503 6492                          pahmep->flags |= flag;
6504 6493                          continue;
6505 6494                  }
6506 6495  
6507 6496                  /*
6508 6497                   * Drop the mapping list lock to avoid locking order issues.
6509 6498                   */
6510 6499                  if (locked)
6511 6500                          sfmmu_mlist_exit(pml);
6512 6501  
6513 6502                  ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt);
6514 6503                  if (ret != 0)
6515 6504                          return (ret);   /* caller must do the cleanup */
6516 6505  
6517 6506                  if (locked) {
6518 6507                          pml = sfmmu_mlist_enter(pp);
6519 6508                          pahmep->flags |= flag;
6520 6509                          goto top;
6521 6510                  }
6522 6511  
6523 6512                  pahmep->flags |= flag;
6524 6513          }
6525 6514  
6526 6515          if (locked)
6527 6516                  sfmmu_mlist_exit(pml);
6528 6517  
6529 6518          return (0);
6530 6519  }
6531 6520  
6532 6521  /*
6533 6522   * Execute post-callback handler of each pa_hment linked to pp
6534 6523   *
6535 6524   * Same overall assumptions and restrictions apply as for
6536 6525   * hat_pageprocess_precallbacks().
6537 6526   */
6538 6527  static void
6539 6528  hat_pageprocess_postcallbacks(struct page *pp, uint_t flag)
6540 6529  {
6541 6530          pfn_t pgpfn = pp->p_pagenum;
6542 6531          pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1;
6543 6532          pfn_t newpfn;
6544 6533          struct sf_hment *sfhmep;
6545 6534          struct pa_hment *pahmep;
6546 6535          int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t);
6547 6536          id_t    id;
6548 6537          int     locked = 0;
6549 6538          kmutex_t *pml;
6550 6539  
6551 6540          ASSERT(PAGE_EXCL(pp));
6552 6541          if (!sfmmu_mlist_held(pp)) {
6553 6542                  pml = sfmmu_mlist_enter(pp);
6554 6543                  locked = 1;
6555 6544          }
6556 6545  
6557 6546  top:
6558 6547          for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6559 6548                  /*
6560 6549                   * skip sf_hments corresponding to VA<->PA mappings;
6561 6550                   * for pa_hment's, hme_tte.ll is zero
6562 6551                   */
6563 6552                  if (!IS_PAHME(sfhmep))
6564 6553                          continue;
6565 6554  
6566 6555                  pahmep = sfhmep->hme_data;
6567 6556                  ASSERT(pahmep != NULL);
6568 6557  
6569 6558                  if ((pahmep->flags & flag) == 0)
6570 6559                          continue;
6571 6560  
6572 6561                  pahmep->flags &= ~flag;
6573 6562  
6574 6563                  id = pahmep->cb_id;
6575 6564                  ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid);
6576 6565                  if ((f = sfmmu_cb_table[id].posthandler) == NULL)
6577 6566                          continue;
6578 6567  
6579 6568                  /*
6580 6569                   * Convert the base page PFN into the constituent PFN
6581 6570                   * which is needed by the callback handler.
6582 6571                   */
6583 6572                  newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask);
6584 6573  
6585 6574                  /*
6586 6575                   * Drop the mapping list lock to avoid locking order issues.
6587 6576                   */
6588 6577                  if (locked)
6589 6578                          sfmmu_mlist_exit(pml);
6590 6579  
6591 6580                  if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn)
6592 6581                      != 0)
6593 6582                          panic("sfmmu: posthandler failed");
6594 6583  
6595 6584                  if (locked) {
6596 6585                          pml = sfmmu_mlist_enter(pp);
6597 6586                          goto top;
6598 6587                  }
6599 6588          }
6600 6589  
6601 6590          if (locked)
6602 6591                  sfmmu_mlist_exit(pml);
6603 6592  }
6604 6593  
6605 6594  /*
6606 6595   * Suspend locked kernel mapping
6607 6596   */
6608 6597  void
6609 6598  hat_pagesuspend(struct page *pp)
6610 6599  {
6611 6600          struct sf_hment *sfhmep;
6612 6601          sfmmu_t *sfmmup;
6613 6602          tte_t tte, ttemod;
6614 6603          struct hme_blk *hmeblkp;
6615 6604          caddr_t addr;
6616 6605          int index, cons;
6617 6606          cpuset_t cpuset;
6618 6607  
6619 6608          ASSERT(PAGE_EXCL(pp));
6620 6609          ASSERT(sfmmu_mlist_held(pp));
6621 6610  
6622 6611          mutex_enter(&kpr_suspendlock);
6623 6612  
6624 6613          /*
6625 6614           * We're about to suspend a kernel mapping so mark this thread as
6626 6615           * non-traceable by DTrace. This prevents us from running into issues
6627 6616           * with probe context trying to touch a suspended page
6628 6617           * in the relocation codepath itself.
6629 6618           */
6630 6619          curthread->t_flag |= T_DONTDTRACE;
6631 6620  
6632 6621          index = PP_MAPINDEX(pp);
6633 6622          cons = TTE8K;
6634 6623  
6635 6624  retry:
6636 6625          for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6637 6626  
6638 6627                  if (IS_PAHME(sfhmep))
6639 6628                          continue;
6640 6629  
6641 6630                  if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons)
6642 6631                          continue;
6643 6632  
6644 6633                  /*
6645 6634                   * Loop until we successfully set the suspend bit in
6646 6635                   * the TTE.
6647 6636                   */
6648 6637  again:
6649 6638                  sfmmu_copytte(&sfhmep->hme_tte, &tte);
6650 6639                  ASSERT(TTE_IS_VALID(&tte));
6651 6640  
6652 6641                  ttemod = tte;
6653 6642                  TTE_SET_SUSPEND(&ttemod);
6654 6643                  if (sfmmu_modifytte_try(&tte, &ttemod,
6655 6644                      &sfhmep->hme_tte) < 0)
6656 6645                          goto again;
6657 6646  
6658 6647                  /*
6659 6648                   * Invalidate TSB entry
6660 6649                   */
6661 6650                  hmeblkp = sfmmu_hmetohblk(sfhmep);
6662 6651  
6663 6652                  sfmmup = hblktosfmmu(hmeblkp);
6664 6653                  ASSERT(sfmmup == ksfmmup);
6665 6654                  ASSERT(!hmeblkp->hblk_shared);
6666 6655  
6667 6656                  addr = tte_to_vaddr(hmeblkp, tte);
6668 6657  
6669 6658                  /*
6670 6659                   * No need to make sure that the TSB for this sfmmu is
6671 6660                   * not being relocated since it is ksfmmup and thus it
6672 6661                   * will never be relocated.
6673 6662                   */
6674 6663                  SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
6675 6664  
6676 6665                  /*
6677 6666                   * Update xcall stats
6678 6667                   */
6679 6668                  cpuset = cpu_ready_set;
6680 6669                  CPUSET_DEL(cpuset, CPU->cpu_id);
6681 6670  
6682 6671                  /* LINTED: constant in conditional context */
6683 6672                  SFMMU_XCALL_STATS(ksfmmup);
6684 6673  
6685 6674                  /*
6686 6675                   * Flush TLB entry on remote CPU's
6687 6676                   */
6688 6677                  xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
6689 6678                      (uint64_t)ksfmmup);
6690 6679                  xt_sync(cpuset);
6691 6680  
6692 6681                  /*
6693 6682                   * Flush TLB entry on local CPU
6694 6683                   */
6695 6684                  vtag_flushpage(addr, (uint64_t)ksfmmup);
6696 6685          }
6697 6686  
6698 6687          while (index != 0) {
6699 6688                  index = index >> 1;
6700 6689                  if (index != 0)
6701 6690                          cons++;
6702 6691                  if (index & 0x1) {
6703 6692                          pp = PP_GROUPLEADER(pp, cons);
6704 6693                          goto retry;
6705 6694                  }
6706 6695          }
6707 6696  }
6708 6697  
6709 6698  #ifdef  DEBUG
6710 6699  
6711 6700  #define N_PRLE  1024
6712 6701  struct prle {
6713 6702          page_t *targ;
6714 6703          page_t *repl;
6715 6704          int status;
6716 6705          int pausecpus;
6717 6706          hrtime_t whence;
6718 6707  };
6719 6708  
6720 6709  static struct prle page_relocate_log[N_PRLE];
6721 6710  static int prl_entry;
6722 6711  static kmutex_t prl_mutex;
6723 6712  
6724 6713  #define PAGE_RELOCATE_LOG(t, r, s, p)                                   \
6725 6714          mutex_enter(&prl_mutex);                                        \
6726 6715          page_relocate_log[prl_entry].targ = *(t);                       \
6727 6716          page_relocate_log[prl_entry].repl = *(r);                       \
6728 6717          page_relocate_log[prl_entry].status = (s);                      \
6729 6718          page_relocate_log[prl_entry].pausecpus = (p);                   \
6730 6719          page_relocate_log[prl_entry].whence = gethrtime();              \
6731 6720          prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1;     \
6732 6721          mutex_exit(&prl_mutex);
6733 6722  
6734 6723  #else   /* !DEBUG */
6735 6724  #define PAGE_RELOCATE_LOG(t, r, s, p)
6736 6725  #endif
6737 6726  
6738 6727  /*
6739 6728   * Core Kernel Page Relocation Algorithm
6740 6729   *
6741 6730   * Input:
6742 6731   *
6743 6732   * target :     constituent pages are SE_EXCL locked.
6744 6733   * replacement: constituent pages are SE_EXCL locked.
6745 6734   *
6746 6735   * Output:
6747 6736   *
6748 6737   * nrelocp:     number of pages relocated
6749 6738   */
6750 6739  int
6751 6740  hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp)
6752 6741  {
6753 6742          page_t          *targ, *repl;
6754 6743          page_t          *tpp, *rpp;
6755 6744          kmutex_t        *low, *high;
6756 6745          spgcnt_t        npages, i;
6757 6746          page_t          *pl = NULL;
6758 6747          int             old_pil;
6759 6748          cpuset_t        cpuset;
6760 6749          int             cap_cpus;
6761 6750          int             ret;
6762 6751  #ifdef VAC
6763 6752          int             cflags = 0;
6764 6753  #endif
6765 6754  
6766 6755          if (!kcage_on || PP_ISNORELOC(*target)) {
6767 6756                  PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1);
6768 6757                  return (EAGAIN);
6769 6758          }
6770 6759  
6771 6760          mutex_enter(&kpr_mutex);
6772 6761          kreloc_thread = curthread;
6773 6762  
6774 6763          targ = *target;
6775 6764          repl = *replacement;
6776 6765          ASSERT(repl != NULL);
6777 6766          ASSERT(targ->p_szc == repl->p_szc);
6778 6767  
6779 6768          npages = page_get_pagecnt(targ->p_szc);
6780 6769  
6781 6770          /*
6782 6771           * unload VA<->PA mappings that are not locked
6783 6772           */
6784 6773          tpp = targ;
6785 6774          for (i = 0; i < npages; i++) {
6786 6775                  (void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC);
6787 6776                  tpp++;
6788 6777          }
6789 6778  
6790 6779          /*
6791 6780           * Do "presuspend" callbacks, in a context from which we can still
6792 6781           * block as needed. Note that we don't hold the mapping list lock
6793 6782           * of "targ" at this point due to potential locking order issues;
6794 6783           * we assume that between the hat_pageunload() above and holding
6795 6784           * the SE_EXCL lock that the mapping list *cannot* change at this
6796 6785           * point.
6797 6786           */
6798 6787          ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus);
6799 6788          if (ret != 0) {
6800 6789                  /*
6801 6790                   * EIO translates to fatal error, for all others cleanup
6802 6791                   * and return EAGAIN.
6803 6792                   */
6804 6793                  ASSERT(ret != EIO);
6805 6794                  hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND);
6806 6795                  PAGE_RELOCATE_LOG(target, replacement, ret, -1);
6807 6796                  kreloc_thread = NULL;
6808 6797                  mutex_exit(&kpr_mutex);
6809 6798                  return (EAGAIN);
6810 6799          }
6811 6800  
6812 6801          /*
6813 6802           * acquire p_mapping list lock for both the target and replacement
6814 6803           * root pages.
6815 6804           *
6816 6805           * low and high refer to the need to grab the mlist locks in a
6817 6806           * specific order in order to prevent race conditions.  Thus the
6818 6807           * lower lock must be grabbed before the higher lock.
6819 6808           *
6820 6809           * This will block hat_unload's accessing p_mapping list.  Since
6821 6810           * we have SE_EXCL lock, hat_memload and hat_pageunload will be
6822 6811           * blocked.  Thus, no one else will be accessing the p_mapping list
6823 6812           * while we suspend and reload the locked mapping below.
6824 6813           */
6825 6814          tpp = targ;
6826 6815          rpp = repl;
6827 6816          sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high);
6828 6817  
6829 6818          kpreempt_disable();
6830 6819  
6831 6820          /*
6832 6821           * We raise our PIL to 13 so that we don't get captured by
6833 6822           * another CPU or pinned by an interrupt thread.  We can't go to
6834 6823           * PIL 14 since the nexus driver(s) may need to interrupt at
6835 6824           * that level in the case of IOMMU pseudo mappings.
6836 6825           */
6837 6826          cpuset = cpu_ready_set;
6838 6827          CPUSET_DEL(cpuset, CPU->cpu_id);
6839 6828          if (!cap_cpus || CPUSET_ISNULL(cpuset)) {
6840 6829                  old_pil = splr(XCALL_PIL);
6841 6830          } else {
6842 6831                  old_pil = -1;
6843 6832                  xc_attention(cpuset);
6844 6833          }
6845 6834          ASSERT(getpil() == XCALL_PIL);
6846 6835  
6847 6836          /*
6848 6837           * Now do suspend callbacks. In the case of an IOMMU mapping
6849 6838           * this will suspend all DMA activity to the page while it is
6850 6839           * being relocated. Since we are well above LOCK_LEVEL and CPUs
6851 6840           * may be captured at this point we should have acquired any needed
6852 6841           * locks in the presuspend callback.
6853 6842           */
6854 6843          ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL);
6855 6844          if (ret != 0) {
6856 6845                  repl = targ;
6857 6846                  goto suspend_fail;
6858 6847          }
6859 6848  
6860 6849          /*
6861 6850           * Raise the PIL yet again, this time to block all high-level
6862 6851           * interrupts on this CPU. This is necessary to prevent an
6863 6852           * interrupt routine from pinning the thread which holds the
6864 6853           * mapping suspended and then touching the suspended page.
6865 6854           *
6866 6855           * Once the page is suspended we also need to be careful to
6867 6856           * avoid calling any functions which touch any seg_kmem memory
6868 6857           * since that memory may be backed by the very page we are
6869 6858           * relocating in here!
6870 6859           */
6871 6860          hat_pagesuspend(targ);
6872 6861  
6873 6862          /*
6874 6863           * Now that we are confident everybody has stopped using this page,
6875 6864           * copy the page contents.  Note we use a physical copy to prevent
6876 6865           * locking issues and to avoid fpRAS because we can't handle it in
6877 6866           * this context.
6878 6867           */
6879 6868          for (i = 0; i < npages; i++, tpp++, rpp++) {
6880 6869  #ifdef VAC
6881 6870                  /*
6882 6871                   * If the replacement has a different vcolor than
6883 6872                   * the one being replacd, we need to handle VAC
6884 6873                   * consistency for it just as we were setting up
6885 6874                   * a new mapping to it.
6886 6875                   */
6887 6876                  if ((PP_GET_VCOLOR(rpp) != NO_VCOLOR) &&
6888 6877                      (tpp->p_vcolor != rpp->p_vcolor) &&
6889 6878                      !CacheColor_IsFlushed(cflags, PP_GET_VCOLOR(rpp))) {
6890 6879                          CacheColor_SetFlushed(cflags, PP_GET_VCOLOR(rpp));
6891 6880                          sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp),
6892 6881                              rpp->p_pagenum);
6893 6882                  }
6894 6883  #endif
6895 6884                  /*
6896 6885                   * Copy the contents of the page.
6897 6886                   */
6898 6887                  ppcopy_kernel(tpp, rpp);
6899 6888          }
6900 6889  
6901 6890          tpp = targ;
6902 6891          rpp = repl;
6903 6892          for (i = 0; i < npages; i++, tpp++, rpp++) {
6904 6893                  /*
6905 6894                   * Copy attributes.  VAC consistency was handled above,
6906 6895                   * if required.
6907 6896                   */
6908 6897                  rpp->p_nrm = tpp->p_nrm;
6909 6898                  tpp->p_nrm = 0;
6910 6899                  rpp->p_index = tpp->p_index;
6911 6900                  tpp->p_index = 0;
6912 6901  #ifdef VAC
6913 6902                  rpp->p_vcolor = tpp->p_vcolor;
6914 6903  #endif
6915 6904          }
6916 6905  
6917 6906          /*
6918 6907           * First, unsuspend the page, if we set the suspend bit, and transfer
6919 6908           * the mapping list from the target page to the replacement page.
6920 6909           * Next process postcallbacks; since pa_hment's are linked only to the
6921 6910           * p_mapping list of root page, we don't iterate over the constituent
6922 6911           * pages.
6923 6912           */
6924 6913          hat_pagereload(targ, repl);
6925 6914  
6926 6915  suspend_fail:
6927 6916          hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND);
6928 6917  
6929 6918          /*
6930 6919           * Now lower our PIL and release any captured CPUs since we
6931 6920           * are out of the "danger zone".  After this it will again be
6932 6921           * safe to acquire adaptive mutex locks, or to drop them...
6933 6922           */
6934 6923          if (old_pil != -1) {
6935 6924                  splx(old_pil);
6936 6925          } else {
6937 6926                  xc_dismissed(cpuset);
6938 6927          }
6939 6928  
6940 6929          kpreempt_enable();
6941 6930  
6942 6931          sfmmu_mlist_reloc_exit(low, high);
6943 6932  
6944 6933          /*
6945 6934           * Postsuspend callbacks should drop any locks held across
6946 6935           * the suspend callbacks.  As before, we don't hold the mapping
6947 6936           * list lock at this point.. our assumption is that the mapping
6948 6937           * list still can't change due to our holding SE_EXCL lock and
6949 6938           * there being no unlocked mappings left. Hence the restriction
6950 6939           * on calling context to hat_delete_callback()
6951 6940           */
6952 6941          hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND);
6953 6942          if (ret != 0) {
6954 6943                  /*
6955 6944                   * The second presuspend call failed: we got here through
6956 6945                   * the suspend_fail label above.
6957 6946                   */
6958 6947                  ASSERT(ret != EIO);
6959 6948                  PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus);
6960 6949                  kreloc_thread = NULL;
6961 6950                  mutex_exit(&kpr_mutex);
6962 6951                  return (EAGAIN);
6963 6952          }
6964 6953  
6965 6954          /*
6966 6955           * Now that we're out of the performance critical section we can
6967 6956           * take care of updating the hash table, since we still
6968 6957           * hold all the pages locked SE_EXCL at this point we
6969 6958           * needn't worry about things changing out from under us.
6970 6959           */
6971 6960          tpp = targ;
6972 6961          rpp = repl;
6973 6962          for (i = 0; i < npages; i++, tpp++, rpp++) {
6974 6963  
6975 6964                  /*
6976 6965                   * replace targ with replacement in page_hash table
6977 6966                   */
6978 6967                  targ = tpp;
6979 6968                  page_relocate_hash(rpp, targ);
6980 6969  
6981 6970                  /*
6982 6971                   * concatenate target; caller of platform_page_relocate()
6983 6972                   * expects target to be concatenated after returning.
6984 6973                   */
6985 6974                  ASSERT(targ->p_next == targ);
6986 6975                  ASSERT(targ->p_prev == targ);
6987 6976                  page_list_concat(&pl, &targ);
6988 6977          }
6989 6978  
6990 6979          ASSERT(*target == pl);
6991 6980          *nrelocp = npages;
6992 6981          PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus);
6993 6982          kreloc_thread = NULL;
6994 6983          mutex_exit(&kpr_mutex);
6995 6984          return (0);
6996 6985  }
6997 6986  
6998 6987  /*
6999 6988   * Called when stray pa_hments are found attached to a page which is
7000 6989   * being freed.  Notify the subsystem which attached the pa_hment of
7001 6990   * the error if it registered a suitable handler, else panic.
7002 6991   */
7003 6992  static void
7004 6993  sfmmu_pahment_leaked(struct pa_hment *pahmep)
7005 6994  {
7006 6995          id_t cb_id = pahmep->cb_id;
7007 6996  
7008 6997          ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid);
7009 6998          if (sfmmu_cb_table[cb_id].errhandler != NULL) {
7010 6999                  if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len,
7011 7000                      HAT_CB_ERR_LEAKED, pahmep->pvt) == 0)
7012 7001                          return;         /* non-fatal */
7013 7002          }
7014 7003          panic("pa_hment leaked: 0x%p", (void *)pahmep);
7015 7004  }
7016 7005  
7017 7006  /*
7018 7007   * Remove all mappings to page 'pp'.
7019 7008   */
7020 7009  int
7021 7010  hat_pageunload(struct page *pp, uint_t forceflag)
7022 7011  {
7023 7012          struct page *origpp = pp;
7024 7013          struct sf_hment *sfhme, *tmphme;
7025 7014          struct hme_blk *hmeblkp;
7026 7015          kmutex_t *pml;
7027 7016  #ifdef VAC
7028 7017          kmutex_t *pmtx;
7029 7018  #endif
7030 7019          cpuset_t cpuset, tset;
7031 7020          int index, cons;
7032 7021          int pa_hments;
7033 7022  
7034 7023          ASSERT(PAGE_EXCL(pp));
7035 7024  
7036 7025          tmphme = NULL;
7037 7026          pa_hments = 0;
7038 7027          CPUSET_ZERO(cpuset);
7039 7028  
7040 7029          pml = sfmmu_mlist_enter(pp);
7041 7030  
7042 7031  #ifdef VAC
7043 7032          if (pp->p_kpmref)
7044 7033                  sfmmu_kpm_pageunload(pp);
7045 7034          ASSERT(!PP_ISMAPPED_KPM(pp));
7046 7035  #endif
7047 7036          /*
7048 7037           * Clear vpm reference. Since the page is exclusively locked
7049 7038           * vpm cannot be referencing it.
7050 7039           */
7051 7040          if (vpm_enable) {
7052 7041                  pp->p_vpmref = 0;
7053 7042          }
7054 7043  
7055 7044          index = PP_MAPINDEX(pp);
7056 7045          cons = TTE8K;
7057 7046  retry:
7058 7047          for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7059 7048                  tmphme = sfhme->hme_next;
7060 7049  
7061 7050                  if (IS_PAHME(sfhme)) {
7062 7051                          ASSERT(sfhme->hme_data != NULL);
7063 7052                          pa_hments++;
7064 7053                          continue;
7065 7054                  }
7066 7055  
7067 7056                  hmeblkp = sfmmu_hmetohblk(sfhme);
7068 7057  
7069 7058                  /*
7070 7059                   * If there are kernel mappings don't unload them, they will
7071 7060                   * be suspended.
7072 7061                   */
7073 7062                  if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt &&
7074 7063                      hmeblkp->hblk_tag.htag_id == ksfmmup)
7075 7064                          continue;
7076 7065  
7077 7066                  tset = sfmmu_pageunload(pp, sfhme, cons);
7078 7067                  CPUSET_OR(cpuset, tset);
7079 7068          }
7080 7069  
7081 7070          while (index != 0) {
7082 7071                  index = index >> 1;
7083 7072                  if (index != 0)
7084 7073                          cons++;
7085 7074                  if (index & 0x1) {
7086 7075                          /* Go to leading page */
7087 7076                          pp = PP_GROUPLEADER(pp, cons);
7088 7077                          ASSERT(sfmmu_mlist_held(pp));
7089 7078                          goto retry;
7090 7079                  }
7091 7080          }
7092 7081  
7093 7082          /*
7094 7083           * cpuset may be empty if the page was only mapped by segkpm,
7095 7084           * in which case we won't actually cross-trap.
7096 7085           */
7097 7086          xt_sync(cpuset);
7098 7087  
7099 7088          /*
7100 7089           * The page should have no mappings at this point, unless
7101 7090           * we were called from hat_page_relocate() in which case we
7102 7091           * leave the locked mappings which will be suspended later.
7103 7092           */
7104 7093          ASSERT(!PP_ISMAPPED(origpp) || pa_hments ||
7105 7094              (forceflag == SFMMU_KERNEL_RELOC));
7106 7095  
7107 7096  #ifdef VAC
7108 7097          if (PP_ISTNC(pp)) {
7109 7098                  if (cons == TTE8K) {
7110 7099                          pmtx = sfmmu_page_enter(pp);
7111 7100                          PP_CLRTNC(pp);
7112 7101                          sfmmu_page_exit(pmtx);
7113 7102                  } else {
7114 7103                          conv_tnc(pp, cons);
7115 7104                  }
7116 7105          }
7117 7106  #endif  /* VAC */
7118 7107  
7119 7108          if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) {
7120 7109                  /*
7121 7110                   * Unlink any pa_hments and free them, calling back
7122 7111                   * the responsible subsystem to notify it of the error.
7123 7112                   * This can occur in situations such as drivers leaking
7124 7113                   * DMA handles: naughty, but common enough that we'd like
7125 7114                   * to keep the system running rather than bringing it
7126 7115                   * down with an obscure error like "pa_hment leaked"
7127 7116                   * which doesn't aid the user in debugging their driver.
7128 7117                   */
7129 7118                  for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7130 7119                          tmphme = sfhme->hme_next;
7131 7120                          if (IS_PAHME(sfhme)) {
7132 7121                                  struct pa_hment *pahmep = sfhme->hme_data;
7133 7122                                  sfmmu_pahment_leaked(pahmep);
7134 7123                                  HME_SUB(sfhme, pp);
7135 7124                                  kmem_cache_free(pa_hment_cache, pahmep);
7136 7125                          }
7137 7126                  }
7138 7127  
7139 7128                  ASSERT(!PP_ISMAPPED(origpp));
7140 7129          }
7141 7130  
7142 7131          sfmmu_mlist_exit(pml);
7143 7132  
7144 7133          return (0);
7145 7134  }
7146 7135  
7147 7136  cpuset_t
7148 7137  sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons)
7149 7138  {
7150 7139          struct hme_blk *hmeblkp;
7151 7140          sfmmu_t *sfmmup;
7152 7141          tte_t tte, ttemod;
7153 7142  #ifdef DEBUG
7154 7143          tte_t orig_old;
7155 7144  #endif /* DEBUG */
7156 7145          caddr_t addr;
7157 7146          int ttesz;
7158 7147          int ret;
7159 7148          cpuset_t cpuset;
7160 7149  
7161 7150          ASSERT(pp != NULL);
7162 7151          ASSERT(sfmmu_mlist_held(pp));
7163 7152          ASSERT(!PP_ISKAS(pp));
7164 7153  
7165 7154          CPUSET_ZERO(cpuset);
7166 7155  
7167 7156          hmeblkp = sfmmu_hmetohblk(sfhme);
7168 7157  
7169 7158  readtte:
7170 7159          sfmmu_copytte(&sfhme->hme_tte, &tte);
7171 7160          if (TTE_IS_VALID(&tte)) {
7172 7161                  sfmmup = hblktosfmmu(hmeblkp);
7173 7162                  ttesz = get_hblk_ttesz(hmeblkp);
7174 7163                  /*
7175 7164                   * Only unload mappings of 'cons' size.
7176 7165                   */
7177 7166                  if (ttesz != cons)
7178 7167                          return (cpuset);
7179 7168  
7180 7169                  /*
7181 7170                   * Note that we have p_mapping lock, but no hash lock here.
7182 7171                   * hblk_unload() has to have both hash lock AND p_mapping
7183 7172                   * lock before it tries to modify tte. So, the tte could
7184 7173                   * not become invalid in the sfmmu_modifytte_try() below.
7185 7174                   */
7186 7175                  ttemod = tte;
7187 7176  #ifdef DEBUG
7188 7177                  orig_old = tte;
7189 7178  #endif /* DEBUG */
7190 7179  
7191 7180                  TTE_SET_INVALID(&ttemod);
7192 7181                  ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
7193 7182                  if (ret < 0) {
7194 7183  #ifdef DEBUG
7195 7184                          /* only R/M bits can change. */
7196 7185                          chk_tte(&orig_old, &tte, &ttemod, hmeblkp);
7197 7186  #endif /* DEBUG */
7198 7187                          goto readtte;
7199 7188                  }
7200 7189  
7201 7190                  if (ret == 0) {
7202 7191                          panic("pageunload: cas failed?");
7203 7192                  }
7204 7193  
7205 7194                  addr = tte_to_vaddr(hmeblkp, tte);
7206 7195  
7207 7196                  if (hmeblkp->hblk_shared) {
7208 7197                          sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7209 7198                          uint_t rid = hmeblkp->hblk_tag.htag_rid;
7210 7199                          sf_region_t *rgnp;
7211 7200                          ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7212 7201                          ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7213 7202                          ASSERT(srdp != NULL);
7214 7203                          rgnp = srdp->srd_hmergnp[rid];
7215 7204                          SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
7216 7205                          cpuset = sfmmu_rgntlb_demap(addr, rgnp, hmeblkp, 1);
7217 7206                          sfmmu_ttesync(NULL, addr, &tte, pp);
7218 7207                          ASSERT(rgnp->rgn_ttecnt[ttesz] > 0);
7219 7208                          atomic_dec_ulong(&rgnp->rgn_ttecnt[ttesz]);
7220 7209                  } else {
7221 7210                          sfmmu_ttesync(sfmmup, addr, &tte, pp);
7222 7211                          atomic_dec_ulong(&sfmmup->sfmmu_ttecnt[ttesz]);
7223 7212  
7224 7213                          /*
7225 7214                           * We need to flush the page from the virtual cache
7226 7215                           * in order to prevent a virtual cache alias
7227 7216                           * inconsistency. The particular scenario we need
7228 7217                           * to worry about is:
7229 7218                           * Given:  va1 and va2 are two virtual address that
7230 7219                           * alias and will map the same physical address.
7231 7220                           * 1.   mapping exists from va1 to pa and data has
7232 7221                           *      been read into the cache.
7233 7222                           * 2.   unload va1.
7234 7223                           * 3.   load va2 and modify data using va2.
7235 7224                           * 4    unload va2.
7236 7225                           * 5.   load va1 and reference data.  Unless we flush
7237 7226                           *      the data cache when we unload we will get
7238 7227                           *      stale data.
7239 7228                           * This scenario is taken care of by using virtual
7240 7229                           * page coloring.
7241 7230                           */
7242 7231                          if (sfmmup->sfmmu_ismhat) {
7243 7232                                  /*
7244 7233                                   * Flush TSBs, TLBs and caches
7245 7234                                   * of every process
7246 7235                                   * sharing this ism segment.
7247 7236                                   */
7248 7237                                  sfmmu_hat_lock_all();
7249 7238                                  mutex_enter(&ism_mlist_lock);
7250 7239                                  kpreempt_disable();
7251 7240                                  sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp,
7252 7241                                      pp->p_pagenum, CACHE_NO_FLUSH);
7253 7242                                  kpreempt_enable();
7254 7243                                  mutex_exit(&ism_mlist_lock);
7255 7244                                  sfmmu_hat_unlock_all();
7256 7245                                  cpuset = cpu_ready_set;
7257 7246                          } else {
7258 7247                                  sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
7259 7248                                  cpuset = sfmmup->sfmmu_cpusran;
7260 7249                          }
7261 7250                  }
7262 7251  
7263 7252                  /*
7264 7253                   * Hme_sub has to run after ttesync() and a_rss update.
7265 7254                   * See hblk_unload().
7266 7255                   */
7267 7256                  HME_SUB(sfhme, pp);
7268 7257                  membar_stst();
7269 7258  
7270 7259                  /*
7271 7260                   * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
7272 7261                   * since pteload may have done a HME_ADD() right after
7273 7262                   * we did the HME_SUB() above. Hmecnt is now maintained
7274 7263                   * by cas only. no lock guranteed its value. The only
7275 7264                   * gurantee we have is the hmecnt should not be less than
7276 7265                   * what it should be so the hblk will not be taken away.
7277 7266                   * It's also important that we decremented the hmecnt after
7278 7267                   * we are done with hmeblkp so that this hmeblk won't be
7279 7268                   * stolen.
7280 7269                   */
7281 7270                  ASSERT(hmeblkp->hblk_hmecnt > 0);
7282 7271                  ASSERT(hmeblkp->hblk_vcnt > 0);
7283 7272                  atomic_dec_16(&hmeblkp->hblk_vcnt);
7284 7273                  atomic_dec_16(&hmeblkp->hblk_hmecnt);
7285 7274                  /*
7286 7275                   * This is bug 4063182.
7287 7276                   * XXX: fixme
7288 7277                   * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
7289 7278                   *      !hmeblkp->hblk_lckcnt);
7290 7279                   */
7291 7280          } else {
7292 7281                  panic("invalid tte? pp %p &tte %p",
7293 7282                      (void *)pp, (void *)&tte);
7294 7283          }
7295 7284  
7296 7285          return (cpuset);
7297 7286  }
7298 7287  
7299 7288  /*
7300 7289   * While relocating a kernel page, this function will move the mappings
7301 7290   * from tpp to dpp and modify any associated data with these mappings.
7302 7291   * It also unsuspends the suspended kernel mapping.
7303 7292   */
7304 7293  static void
7305 7294  hat_pagereload(struct page *tpp, struct page *dpp)
7306 7295  {
7307 7296          struct sf_hment *sfhme;
7308 7297          tte_t tte, ttemod;
7309 7298          int index, cons;
7310 7299  
7311 7300          ASSERT(getpil() == PIL_MAX);
7312 7301          ASSERT(sfmmu_mlist_held(tpp));
7313 7302          ASSERT(sfmmu_mlist_held(dpp));
7314 7303  
7315 7304          index = PP_MAPINDEX(tpp);
7316 7305          cons = TTE8K;
7317 7306  
7318 7307          /* Update real mappings to the page */
7319 7308  retry:
7320 7309          for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) {
7321 7310                  if (IS_PAHME(sfhme))
7322 7311                          continue;
7323 7312                  sfmmu_copytte(&sfhme->hme_tte, &tte);
7324 7313                  ttemod = tte;
7325 7314  
7326 7315                  /*
7327 7316                   * replace old pfn with new pfn in TTE
7328 7317                   */
7329 7318                  PFN_TO_TTE(ttemod, dpp->p_pagenum);
7330 7319  
7331 7320                  /*
7332 7321                   * clear suspend bit
7333 7322                   */
7334 7323                  ASSERT(TTE_IS_SUSPEND(&ttemod));
7335 7324                  TTE_CLR_SUSPEND(&ttemod);
7336 7325  
7337 7326                  if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0)
7338 7327                          panic("hat_pagereload(): sfmmu_modifytte_try() failed");
7339 7328  
7340 7329                  /*
7341 7330                   * set hme_page point to new page
7342 7331                   */
7343 7332                  sfhme->hme_page = dpp;
7344 7333          }
7345 7334  
7346 7335          /*
7347 7336           * move p_mapping list from old page to new page
7348 7337           */
7349 7338          dpp->p_mapping = tpp->p_mapping;
7350 7339          tpp->p_mapping = NULL;
7351 7340          dpp->p_share = tpp->p_share;
7352 7341          tpp->p_share = 0;
7353 7342  
7354 7343          while (index != 0) {
7355 7344                  index = index >> 1;
7356 7345                  if (index != 0)
7357 7346                          cons++;
7358 7347                  if (index & 0x1) {
7359 7348                          tpp = PP_GROUPLEADER(tpp, cons);
7360 7349                          dpp = PP_GROUPLEADER(dpp, cons);
7361 7350                          goto retry;
7362 7351                  }
7363 7352          }
7364 7353  
7365 7354          curthread->t_flag &= ~T_DONTDTRACE;
7366 7355          mutex_exit(&kpr_suspendlock);
7367 7356  }
7368 7357  
7369 7358  uint_t
7370 7359  hat_pagesync(struct page *pp, uint_t clearflag)
7371 7360  {
7372 7361          struct sf_hment *sfhme, *tmphme = NULL;
7373 7362          struct hme_blk *hmeblkp;
7374 7363          kmutex_t *pml;
7375 7364          cpuset_t cpuset, tset;
7376 7365          int     index, cons;
7377 7366          extern  ulong_t po_share;
7378 7367          page_t  *save_pp = pp;
7379 7368          int     stop_on_sh = 0;
7380 7369          uint_t  shcnt;
7381 7370  
7382 7371          CPUSET_ZERO(cpuset);
7383 7372  
7384 7373          if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) {
7385 7374                  return (PP_GENERIC_ATTR(pp));
7386 7375          }
7387 7376  
7388 7377          if ((clearflag & HAT_SYNC_ZERORM) == 0) {
7389 7378                  if ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(pp)) {
7390 7379                          return (PP_GENERIC_ATTR(pp));
7391 7380                  }
7392 7381                  if ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(pp)) {
7393 7382                          return (PP_GENERIC_ATTR(pp));
7394 7383                  }
7395 7384                  if (clearflag & HAT_SYNC_STOPON_SHARED) {
7396 7385                          if (pp->p_share > po_share) {
7397 7386                                  hat_page_setattr(pp, P_REF);
7398 7387                                  return (PP_GENERIC_ATTR(pp));
7399 7388                          }
7400 7389                          stop_on_sh = 1;
7401 7390                          shcnt = 0;
7402 7391                  }
7403 7392          }
7404 7393  
7405 7394          clearflag &= ~HAT_SYNC_STOPON_SHARED;
7406 7395          pml = sfmmu_mlist_enter(pp);
7407 7396          index = PP_MAPINDEX(pp);
7408 7397          cons = TTE8K;
7409 7398  retry:
7410 7399          for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7411 7400                  /*
7412 7401                   * We need to save the next hment on the list since
7413 7402                   * it is possible for pagesync to remove an invalid hment
7414 7403                   * from the list.
7415 7404                   */
7416 7405                  tmphme = sfhme->hme_next;
7417 7406                  if (IS_PAHME(sfhme))
7418 7407                          continue;
7419 7408                  /*
7420 7409                   * If we are looking for large mappings and this hme doesn't
7421 7410                   * reach the range we are seeking, just ignore it.
7422 7411                   */
7423 7412                  hmeblkp = sfmmu_hmetohblk(sfhme);
7424 7413  
7425 7414                  if (hme_size(sfhme) < cons)
7426 7415                          continue;
7427 7416  
7428 7417                  if (stop_on_sh) {
7429 7418                          if (hmeblkp->hblk_shared) {
7430 7419                                  sf_srd_t *srdp = hblktosrd(hmeblkp);
7431 7420                                  uint_t rid = hmeblkp->hblk_tag.htag_rid;
7432 7421                                  sf_region_t *rgnp;
7433 7422                                  ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7434 7423                                  ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7435 7424                                  ASSERT(srdp != NULL);
7436 7425                                  rgnp = srdp->srd_hmergnp[rid];
7437 7426                                  SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp,
7438 7427                                      rgnp, rid);
7439 7428                                  shcnt += rgnp->rgn_refcnt;
7440 7429                          } else {
7441 7430                                  shcnt++;
7442 7431                          }
7443 7432                          if (shcnt > po_share) {
7444 7433                                  /*
7445 7434                                   * tell the pager to spare the page this time
7446 7435                                   * around.
7447 7436                                   */
7448 7437                                  hat_page_setattr(save_pp, P_REF);
7449 7438                                  index = 0;
7450 7439                                  break;
7451 7440                          }
7452 7441                  }
7453 7442                  tset = sfmmu_pagesync(pp, sfhme,
7454 7443                      clearflag & ~HAT_SYNC_STOPON_RM);
7455 7444                  CPUSET_OR(cpuset, tset);
7456 7445  
7457 7446                  /*
7458 7447                   * If clearflag is HAT_SYNC_DONTZERO, break out as soon
7459 7448                   * as the "ref" or "mod" is set or share cnt exceeds po_share.
7460 7449                   */
7461 7450                  if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO &&
7462 7451                      (((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) ||
7463 7452                      ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)))) {
7464 7453                          index = 0;
7465 7454                          break;
7466 7455                  }
7467 7456          }
7468 7457  
7469 7458          while (index) {
7470 7459                  index = index >> 1;
7471 7460                  cons++;
7472 7461                  if (index & 0x1) {
7473 7462                          /* Go to leading page */
7474 7463                          pp = PP_GROUPLEADER(pp, cons);
7475 7464                          goto retry;
7476 7465                  }
7477 7466          }
7478 7467  
7479 7468          xt_sync(cpuset);
7480 7469          sfmmu_mlist_exit(pml);
7481 7470          return (PP_GENERIC_ATTR(save_pp));
7482 7471  }
7483 7472  
7484 7473  /*
7485 7474   * Get all the hardware dependent attributes for a page struct
7486 7475   */
7487 7476  static cpuset_t
7488 7477  sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme,
7489 7478          uint_t clearflag)
7490 7479  {
7491 7480          caddr_t addr;
7492 7481          tte_t tte, ttemod;
7493 7482          struct hme_blk *hmeblkp;
7494 7483          int ret;
7495 7484          sfmmu_t *sfmmup;
7496 7485          cpuset_t cpuset;
7497 7486  
7498 7487          ASSERT(pp != NULL);
7499 7488          ASSERT(sfmmu_mlist_held(pp));
7500 7489          ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
7501 7490              (clearflag == HAT_SYNC_ZERORM));
7502 7491  
7503 7492          SFMMU_STAT(sf_pagesync);
7504 7493  
7505 7494          CPUSET_ZERO(cpuset);
7506 7495  
7507 7496  sfmmu_pagesync_retry:
7508 7497  
7509 7498          sfmmu_copytte(&sfhme->hme_tte, &tte);
7510 7499          if (TTE_IS_VALID(&tte)) {
7511 7500                  hmeblkp = sfmmu_hmetohblk(sfhme);
7512 7501                  sfmmup = hblktosfmmu(hmeblkp);
7513 7502                  addr = tte_to_vaddr(hmeblkp, tte);
7514 7503                  if (clearflag == HAT_SYNC_ZERORM) {
7515 7504                          ttemod = tte;
7516 7505                          TTE_CLR_RM(&ttemod);
7517 7506                          ret = sfmmu_modifytte_try(&tte, &ttemod,
7518 7507                              &sfhme->hme_tte);
7519 7508                          if (ret < 0) {
7520 7509                                  /*
7521 7510                                   * cas failed and the new value is not what
7522 7511                                   * we want.
7523 7512                                   */
7524 7513                                  goto sfmmu_pagesync_retry;
7525 7514                          }
7526 7515  
7527 7516                          if (ret > 0) {
7528 7517                                  /* we win the cas */
7529 7518                                  if (hmeblkp->hblk_shared) {
7530 7519                                          sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7531 7520                                          uint_t rid =
7532 7521                                              hmeblkp->hblk_tag.htag_rid;
7533 7522                                          sf_region_t *rgnp;
7534 7523                                          ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7535 7524                                          ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7536 7525                                          ASSERT(srdp != NULL);
7537 7526                                          rgnp = srdp->srd_hmergnp[rid];
7538 7527                                          SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
7539 7528                                              srdp, rgnp, rid);
7540 7529                                          cpuset = sfmmu_rgntlb_demap(addr,
7541 7530                                              rgnp, hmeblkp, 1);
7542 7531                                  } else {
7543 7532                                          sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
7544 7533                                              0, 0);
7545 7534                                          cpuset = sfmmup->sfmmu_cpusran;
7546 7535                                  }
7547 7536                          }
7548 7537                  }
7549 7538                  sfmmu_ttesync(hmeblkp->hblk_shared ? NULL : sfmmup, addr,
7550 7539                      &tte, pp);
7551 7540          }
7552 7541          return (cpuset);
7553 7542  }
7554 7543  
7555 7544  /*
7556 7545   * Remove write permission from a mappings to a page, so that
7557 7546   * we can detect the next modification of it. This requires modifying
7558 7547   * the TTE then invalidating (demap) any TLB entry using that TTE.
7559 7548   * This code is similar to sfmmu_pagesync().
7560 7549   */
7561 7550  static cpuset_t
7562 7551  sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme)
7563 7552  {
7564 7553          caddr_t addr;
7565 7554          tte_t tte;
7566 7555          tte_t ttemod;
7567 7556          struct hme_blk *hmeblkp;
7568 7557          int ret;
7569 7558          sfmmu_t *sfmmup;
7570 7559          cpuset_t cpuset;
7571 7560  
7572 7561          ASSERT(pp != NULL);
7573 7562          ASSERT(sfmmu_mlist_held(pp));
7574 7563  
7575 7564          CPUSET_ZERO(cpuset);
7576 7565          SFMMU_STAT(sf_clrwrt);
7577 7566  
7578 7567  retry:
7579 7568  
7580 7569          sfmmu_copytte(&sfhme->hme_tte, &tte);
7581 7570          if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) {
7582 7571                  hmeblkp = sfmmu_hmetohblk(sfhme);
7583 7572                  sfmmup = hblktosfmmu(hmeblkp);
7584 7573                  addr = tte_to_vaddr(hmeblkp, tte);
7585 7574  
7586 7575                  ttemod = tte;
7587 7576                  TTE_CLR_WRT(&ttemod);
7588 7577                  TTE_CLR_MOD(&ttemod);
7589 7578                  ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
7590 7579  
7591 7580                  /*
7592 7581                   * if cas failed and the new value is not what
7593 7582                   * we want retry
7594 7583                   */
7595 7584                  if (ret < 0)
7596 7585                          goto retry;
7597 7586  
7598 7587                  /* we win the cas */
7599 7588                  if (ret > 0) {
7600 7589                          if (hmeblkp->hblk_shared) {
7601 7590                                  sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7602 7591                                  uint_t rid = hmeblkp->hblk_tag.htag_rid;
7603 7592                                  sf_region_t *rgnp;
7604 7593                                  ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7605 7594                                  ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7606 7595                                  ASSERT(srdp != NULL);
7607 7596                                  rgnp = srdp->srd_hmergnp[rid];
7608 7597                                  SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
7609 7598                                      srdp, rgnp, rid);
7610 7599                                  cpuset = sfmmu_rgntlb_demap(addr,
7611 7600                                      rgnp, hmeblkp, 1);
7612 7601                          } else {
7613 7602                                  sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
7614 7603                                  cpuset = sfmmup->sfmmu_cpusran;
7615 7604                          }
7616 7605                  }
7617 7606          }
7618 7607  
7619 7608          return (cpuset);
7620 7609  }
7621 7610  
7622 7611  /*
7623 7612   * Walk all mappings of a page, removing write permission and clearing the
7624 7613   * ref/mod bits. This code is similar to hat_pagesync()
7625 7614   */
7626 7615  static void
7627 7616  hat_page_clrwrt(page_t *pp)
7628 7617  {
7629 7618          struct sf_hment *sfhme;
7630 7619          struct sf_hment *tmphme = NULL;
7631 7620          kmutex_t *pml;
7632 7621          cpuset_t cpuset;
7633 7622          cpuset_t tset;
7634 7623          int     index;
7635 7624          int      cons;
7636 7625  
7637 7626          CPUSET_ZERO(cpuset);
7638 7627  
7639 7628          pml = sfmmu_mlist_enter(pp);
7640 7629          index = PP_MAPINDEX(pp);
7641 7630          cons = TTE8K;
7642 7631  retry:
7643 7632          for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7644 7633                  tmphme = sfhme->hme_next;
7645 7634  
7646 7635                  /*
7647 7636                   * If we are looking for large mappings and this hme doesn't
7648 7637                   * reach the range we are seeking, just ignore its.
7649 7638                   */
7650 7639  
7651 7640                  if (hme_size(sfhme) < cons)
7652 7641                          continue;
7653 7642  
7654 7643                  tset = sfmmu_pageclrwrt(pp, sfhme);
7655 7644                  CPUSET_OR(cpuset, tset);
7656 7645          }
7657 7646  
7658 7647          while (index) {
7659 7648                  index = index >> 1;
7660 7649                  cons++;
7661 7650                  if (index & 0x1) {
7662 7651                          /* Go to leading page */
7663 7652                          pp = PP_GROUPLEADER(pp, cons);
7664 7653                          goto retry;
7665 7654                  }
7666 7655          }
7667 7656  
7668 7657          xt_sync(cpuset);
7669 7658          sfmmu_mlist_exit(pml);
7670 7659  }
7671 7660  
7672 7661  /*
7673 7662   * Set the given REF/MOD/RO bits for the given page.
7674 7663   * For a vnode with a sorted v_pages list, we need to change
7675 7664   * the attributes and the v_pages list together under page_vnode_mutex.
7676 7665   */
7677 7666  void
7678 7667  hat_page_setattr(page_t *pp, uint_t flag)
7679 7668  {
7680 7669          vnode_t         *vp = pp->p_vnode;
7681 7670          page_t          **listp;
7682 7671          kmutex_t        *pmtx;
7683 7672          kmutex_t        *vphm = NULL;
7684 7673          int             noshuffle;
7685 7674  
7686 7675          noshuffle = flag & P_NSH;
7687 7676          flag &= ~P_NSH;
7688 7677  
7689 7678          ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7690 7679  
7691 7680          /*
7692 7681           * nothing to do if attribute already set
7693 7682           */
7694 7683          if ((pp->p_nrm & flag) == flag)
7695 7684                  return;
7696 7685  
7697 7686          if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
7698 7687              !noshuffle) {
7699 7688                  vphm = page_vnode_mutex(vp);
7700 7689                  mutex_enter(vphm);
7701 7690          }
7702 7691  
7703 7692          pmtx = sfmmu_page_enter(pp);
7704 7693          pp->p_nrm |= flag;
7705 7694          sfmmu_page_exit(pmtx);
7706 7695  
7707 7696          if (vphm != NULL) {
7708 7697                  /*
7709 7698                   * Some File Systems examine v_pages for NULL w/o
7710 7699                   * grabbing the vphm mutex. Must not let it become NULL when
7711 7700                   * pp is the only page on the list.
7712 7701                   */
7713 7702                  if (pp->p_vpnext != pp) {
7714 7703                          page_vpsub(&vp->v_pages, pp);
7715 7704                          if (vp->v_pages != NULL)
7716 7705                                  listp = &vp->v_pages->p_vpprev->p_vpnext;
7717 7706                          else
7718 7707                                  listp = &vp->v_pages;
7719 7708                          page_vpadd(listp, pp);
7720 7709                  }
7721 7710                  mutex_exit(vphm);
7722 7711          }
7723 7712  }
7724 7713  
7725 7714  void
7726 7715  hat_page_clrattr(page_t *pp, uint_t flag)
7727 7716  {
7728 7717          vnode_t         *vp = pp->p_vnode;
7729 7718          kmutex_t        *pmtx;
7730 7719  
7731 7720          ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7732 7721  
7733 7722          pmtx = sfmmu_page_enter(pp);
7734 7723  
7735 7724          /*
7736 7725           * Caller is expected to hold page's io lock for VMODSORT to work
7737 7726           * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
7738 7727           * bit is cleared.
7739 7728           * We don't have assert to avoid tripping some existing third party
7740 7729           * code. The dirty page is moved back to top of the v_page list
7741 7730           * after IO is done in pvn_write_done().
7742 7731           */
7743 7732          pp->p_nrm &= ~flag;
7744 7733          sfmmu_page_exit(pmtx);
7745 7734  
7746 7735          if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
7747 7736  
7748 7737                  /*
7749 7738                   * VMODSORT works by removing write permissions and getting
7750 7739                   * a fault when a page is made dirty. At this point
7751 7740                   * we need to remove write permission from all mappings
7752 7741                   * to this page.
7753 7742                   */
7754 7743                  hat_page_clrwrt(pp);
7755 7744          }
7756 7745  }
7757 7746  
7758 7747  uint_t
7759 7748  hat_page_getattr(page_t *pp, uint_t flag)
7760 7749  {
7761 7750          ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7762 7751          return ((uint_t)(pp->p_nrm & flag));
7763 7752  }
7764 7753  
7765 7754  /*
7766 7755   * DEBUG kernels: verify that a kernel va<->pa translation
7767 7756   * is safe by checking the underlying page_t is in a page
7768 7757   * relocation-safe state.
7769 7758   */
7770 7759  #ifdef  DEBUG
7771 7760  void
7772 7761  sfmmu_check_kpfn(pfn_t pfn)
7773 7762  {
7774 7763          page_t *pp;
7775 7764          int index, cons;
7776 7765  
7777 7766          if (hat_check_vtop == 0)
7778 7767                  return;
7779 7768  
7780 7769          if (kvseg.s_base == NULL || panicstr)
7781 7770                  return;
7782 7771  
7783 7772          pp = page_numtopp_nolock(pfn);
7784 7773          if (!pp)
7785 7774                  return;
7786 7775  
7787 7776          if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp))
7788 7777                  return;
7789 7778  
7790 7779          /*
7791 7780           * Handed a large kernel page, we dig up the root page since we
7792 7781           * know the root page might have the lock also.
7793 7782           */
7794 7783          if (pp->p_szc != 0) {
7795 7784                  index = PP_MAPINDEX(pp);
7796 7785                  cons = TTE8K;
7797 7786  again:
7798 7787                  while (index != 0) {
7799 7788                          index >>= 1;
7800 7789                          if (index != 0)
7801 7790                                  cons++;
7802 7791                          if (index & 0x1) {
7803 7792                                  pp = PP_GROUPLEADER(pp, cons);
7804 7793                                  goto again;
7805 7794                          }
7806 7795                  }
7807 7796          }
7808 7797  
7809 7798          if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp))
7810 7799                  return;
7811 7800  
7812 7801          /*
7813 7802           * Pages need to be locked or allocated "permanent" (either from
7814 7803           * static_arena arena or explicitly setting PG_NORELOC when calling
7815 7804           * page_create_va()) for VA->PA translations to be valid.
7816 7805           */
7817 7806          if (!PP_ISNORELOC(pp))
7818 7807                  panic("Illegal VA->PA translation, pp 0x%p not permanent",
7819 7808                      (void *)pp);
7820 7809          else
7821 7810                  panic("Illegal VA->PA translation, pp 0x%p not locked",
7822 7811                      (void *)pp);
7823 7812  }
7824 7813  #endif  /* DEBUG */
7825 7814  
7826 7815  /*
7827 7816   * Returns a page frame number for a given virtual address.
7828 7817   * Returns PFN_INVALID to indicate an invalid mapping
7829 7818   */
7830 7819  pfn_t
7831 7820  hat_getpfnum(struct hat *hat, caddr_t addr)
7832 7821  {
7833 7822          pfn_t pfn;
7834 7823          tte_t tte;
7835 7824  
7836 7825          /*
7837 7826           * We would like to
7838 7827           * ASSERT(AS_LOCK_HELD(as));
7839 7828           * but we can't because the iommu driver will call this
7840 7829           * routine at interrupt time and it can't grab the as lock
7841 7830           * or it will deadlock: A thread could have the as lock
7842 7831           * and be waiting for io.  The io can't complete
7843 7832           * because the interrupt thread is blocked trying to grab
7844 7833           * the as lock.
7845 7834           */
7846 7835  
7847 7836          if (hat == ksfmmup) {
7848 7837                  if (IS_KMEM_VA_LARGEPAGE(addr)) {
7849 7838                          ASSERT(segkmem_lpszc > 0);
7850 7839                          pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc);
7851 7840                          if (pfn != PFN_INVALID) {
7852 7841                                  sfmmu_check_kpfn(pfn);
7853 7842                                  return (pfn);
7854 7843                          }
7855 7844                  } else if (segkpm && IS_KPM_ADDR(addr)) {
7856 7845                          return (sfmmu_kpm_vatopfn(addr));
7857 7846                  }
7858 7847                  while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte))
7859 7848                      == PFN_SUSPENDED) {
7860 7849                          sfmmu_vatopfn_suspended(addr, ksfmmup, &tte);
7861 7850                  }
7862 7851                  sfmmu_check_kpfn(pfn);
7863 7852                  return (pfn);
7864 7853          } else {
7865 7854                  return (sfmmu_uvatopfn(addr, hat, NULL));
7866 7855          }
7867 7856  }
7868 7857  
7869 7858  /*
7870 7859   * This routine will return both pfn and tte for the vaddr.
7871 7860   */
7872 7861  static pfn_t
7873 7862  sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup, tte_t *ttep)
7874 7863  {
7875 7864          struct hmehash_bucket *hmebp;
7876 7865          hmeblk_tag hblktag;
7877 7866          int hmeshift, hashno = 1;
7878 7867          struct hme_blk *hmeblkp = NULL;
7879 7868          tte_t tte;
7880 7869  
7881 7870          struct sf_hment *sfhmep;
7882 7871          pfn_t pfn;
7883 7872  
7884 7873          /* support for ISM */
7885 7874          ism_map_t       *ism_map;
7886 7875          ism_blk_t       *ism_blkp;
7887 7876          int             i;
7888 7877          sfmmu_t *ism_hatid = NULL;
7889 7878          sfmmu_t *locked_hatid = NULL;
7890 7879          sfmmu_t *sv_sfmmup = sfmmup;
7891 7880          caddr_t sv_vaddr = vaddr;
7892 7881          sf_srd_t *srdp;
7893 7882  
7894 7883          if (ttep == NULL) {
7895 7884                  ttep = &tte;
7896 7885          } else {
7897 7886                  ttep->ll = 0;
7898 7887          }
7899 7888  
7900 7889          ASSERT(sfmmup != ksfmmup);
7901 7890          SFMMU_STAT(sf_user_vtop);
7902 7891          /*
7903 7892           * Set ism_hatid if vaddr falls in a ISM segment.
7904 7893           */
7905 7894          ism_blkp = sfmmup->sfmmu_iblk;
7906 7895          if (ism_blkp != NULL) {
7907 7896                  sfmmu_ismhat_enter(sfmmup, 0);
7908 7897                  locked_hatid = sfmmup;
7909 7898          }
7910 7899          while (ism_blkp != NULL && ism_hatid == NULL) {
7911 7900                  ism_map = ism_blkp->iblk_maps;
7912 7901                  for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) {
7913 7902                          if (vaddr >= ism_start(ism_map[i]) &&
7914 7903                              vaddr < ism_end(ism_map[i])) {
7915 7904                                  sfmmup = ism_hatid = ism_map[i].imap_ismhat;
7916 7905                                  vaddr = (caddr_t)(vaddr -
7917 7906                                      ism_start(ism_map[i]));
7918 7907                                  break;
7919 7908                          }
7920 7909                  }
7921 7910                  ism_blkp = ism_blkp->iblk_next;
7922 7911          }
7923 7912          if (locked_hatid) {
7924 7913                  sfmmu_ismhat_exit(locked_hatid, 0);
7925 7914          }
7926 7915  
7927 7916          hblktag.htag_id = sfmmup;
7928 7917          hblktag.htag_rid = SFMMU_INVALID_SHMERID;
7929 7918          do {
7930 7919                  hmeshift = HME_HASH_SHIFT(hashno);
7931 7920                  hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
7932 7921                  hblktag.htag_rehash = hashno;
7933 7922                  hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
7934 7923  
7935 7924                  SFMMU_HASH_LOCK(hmebp);
7936 7925  
7937 7926                  HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
7938 7927                  if (hmeblkp != NULL) {
7939 7928                          ASSERT(!hmeblkp->hblk_shared);
7940 7929                          HBLKTOHME(sfhmep, hmeblkp, vaddr);
7941 7930                          sfmmu_copytte(&sfhmep->hme_tte, ttep);
7942 7931                          SFMMU_HASH_UNLOCK(hmebp);
7943 7932                          if (TTE_IS_VALID(ttep)) {
7944 7933                                  pfn = TTE_TO_PFN(vaddr, ttep);
7945 7934                                  return (pfn);
7946 7935                          }
7947 7936                          break;
7948 7937                  }
7949 7938                  SFMMU_HASH_UNLOCK(hmebp);
7950 7939                  hashno++;
7951 7940          } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt));
7952 7941  
7953 7942          if (SF_HMERGNMAP_ISNULL(sv_sfmmup)) {
7954 7943                  return (PFN_INVALID);
7955 7944          }
7956 7945          srdp = sv_sfmmup->sfmmu_srdp;
7957 7946          ASSERT(srdp != NULL);
7958 7947          ASSERT(srdp->srd_refcnt != 0);
7959 7948          hblktag.htag_id = srdp;
7960 7949          hashno = 1;
7961 7950          do {
7962 7951                  hmeshift = HME_HASH_SHIFT(hashno);
7963 7952                  hblktag.htag_bspage = HME_HASH_BSPAGE(sv_vaddr, hmeshift);
7964 7953                  hblktag.htag_rehash = hashno;
7965 7954                  hmebp = HME_HASH_FUNCTION(srdp, sv_vaddr, hmeshift);
7966 7955  
7967 7956                  SFMMU_HASH_LOCK(hmebp);
7968 7957                  for (hmeblkp = hmebp->hmeblkp; hmeblkp != NULL;
7969 7958                      hmeblkp = hmeblkp->hblk_next) {
7970 7959                          uint_t rid;
7971 7960                          sf_region_t *rgnp;
7972 7961                          caddr_t rsaddr;
7973 7962                          caddr_t readdr;
7974 7963  
7975 7964                          if (!HTAGS_EQ_SHME(hmeblkp->hblk_tag, hblktag,
7976 7965                              sv_sfmmup->sfmmu_hmeregion_map)) {
7977 7966                                  continue;
7978 7967                          }
7979 7968                          ASSERT(hmeblkp->hblk_shared);
7980 7969                          rid = hmeblkp->hblk_tag.htag_rid;
7981 7970                          ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7982 7971                          ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7983 7972                          rgnp = srdp->srd_hmergnp[rid];
7984 7973                          SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
7985 7974                          HBLKTOHME(sfhmep, hmeblkp, sv_vaddr);
7986 7975                          sfmmu_copytte(&sfhmep->hme_tte, ttep);
7987 7976                          rsaddr = rgnp->rgn_saddr;
7988 7977                          readdr = rsaddr + rgnp->rgn_size;
7989 7978  #ifdef DEBUG
7990 7979                          if (TTE_IS_VALID(ttep) ||
7991 7980                              get_hblk_ttesz(hmeblkp) > TTE8K) {
7992 7981                                  caddr_t eva = tte_to_evaddr(hmeblkp, ttep);
7993 7982                                  ASSERT(eva > sv_vaddr);
7994 7983                                  ASSERT(sv_vaddr >= rsaddr);
7995 7984                                  ASSERT(sv_vaddr < readdr);
7996 7985                                  ASSERT(eva <= readdr);
7997 7986                          }
7998 7987  #endif /* DEBUG */
7999 7988                          /*
8000 7989                           * Continue the search if we
8001 7990                           * found an invalid 8K tte outside of the area
8002 7991                           * covered by this hmeblk's region.
8003 7992                           */
8004 7993                          if (TTE_IS_VALID(ttep)) {
8005 7994                                  SFMMU_HASH_UNLOCK(hmebp);
8006 7995                                  pfn = TTE_TO_PFN(sv_vaddr, ttep);
8007 7996                                  return (pfn);
8008 7997                          } else if (get_hblk_ttesz(hmeblkp) > TTE8K ||
8009 7998                              (sv_vaddr >= rsaddr && sv_vaddr < readdr)) {
8010 7999                                  SFMMU_HASH_UNLOCK(hmebp);
8011 8000                                  pfn = PFN_INVALID;
8012 8001                                  return (pfn);
8013 8002                          }
8014 8003                  }
8015 8004                  SFMMU_HASH_UNLOCK(hmebp);
8016 8005                  hashno++;
8017 8006          } while (hashno <= mmu_hashcnt);
8018 8007          return (PFN_INVALID);
8019 8008  }
8020 8009  
8021 8010  
8022 8011  /*
8023 8012   * For compatability with AT&T and later optimizations
8024 8013   */
8025 8014  /* ARGSUSED */
8026 8015  void
8027 8016  hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags)
8028 8017  {
8029 8018          ASSERT(hat != NULL);
8030 8019  }
8031 8020  
8032 8021  /*
8033 8022   * Return the number of mappings to a particular page.  This number is an
8034 8023   * approximation of the number of people sharing the page.
8035 8024   *
8036 8025   * shared hmeblks or ism hmeblks are counted as 1 mapping here.
8037 8026   * hat_page_checkshare() can be used to compare threshold to share
8038 8027   * count that reflects the number of region sharers albeit at higher cost.
8039 8028   */
8040 8029  ulong_t
8041 8030  hat_page_getshare(page_t *pp)
8042 8031  {
8043 8032          page_t *spp = pp;       /* start page */
8044 8033          kmutex_t *pml;
8045 8034          ulong_t cnt;
8046 8035          int index, sz = TTE64K;
8047 8036  
8048 8037          /*
8049 8038           * We need to grab the mlist lock to make sure any outstanding
8050 8039           * load/unloads complete.  Otherwise we could return zero
8051 8040           * even though the unload(s) hasn't finished yet.
8052 8041           */
8053 8042          pml = sfmmu_mlist_enter(spp);
8054 8043          cnt = spp->p_share;
8055 8044  
8056 8045  #ifdef VAC
8057 8046          if (kpm_enable)
8058 8047                  cnt += spp->p_kpmref;
8059 8048  #endif
8060 8049          if (vpm_enable && pp->p_vpmref) {
8061 8050                  cnt += 1;
8062 8051          }
8063 8052  
8064 8053          /*
8065 8054           * If we have any large mappings, we count the number of
8066 8055           * mappings that this large page is part of.
8067 8056           */
8068 8057          index = PP_MAPINDEX(spp);
8069 8058          index >>= 1;
8070 8059          while (index) {
8071 8060                  pp = PP_GROUPLEADER(spp, sz);
8072 8061                  if ((index & 0x1) && pp != spp) {
8073 8062                          cnt += pp->p_share;
8074 8063                          spp = pp;
8075 8064                  }
8076 8065                  index >>= 1;
8077 8066                  sz++;
8078 8067          }
8079 8068          sfmmu_mlist_exit(pml);
8080 8069          return (cnt);
8081 8070  }
8082 8071  
8083 8072  /*
8084 8073   * Return 1 if the number of mappings exceeds sh_thresh. Return 0
8085 8074   * otherwise. Count shared hmeblks by region's refcnt.
8086 8075   */
8087 8076  int
8088 8077  hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
8089 8078  {
8090 8079          kmutex_t *pml;
8091 8080          ulong_t cnt = 0;
8092 8081          int index, sz = TTE8K;
8093 8082          struct sf_hment *sfhme, *tmphme = NULL;
8094 8083          struct hme_blk *hmeblkp;
8095 8084  
8096 8085          pml = sfmmu_mlist_enter(pp);
8097 8086  
8098 8087  #ifdef VAC
8099 8088          if (kpm_enable)
8100 8089                  cnt = pp->p_kpmref;
8101 8090  #endif
8102 8091  
8103 8092          if (vpm_enable && pp->p_vpmref) {
8104 8093                  cnt += 1;
8105 8094          }
8106 8095  
8107 8096          if (pp->p_share + cnt > sh_thresh) {
8108 8097                  sfmmu_mlist_exit(pml);
8109 8098                  return (1);
8110 8099          }
8111 8100  
8112 8101          index = PP_MAPINDEX(pp);
8113 8102  
8114 8103  again:
8115 8104          for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
8116 8105                  tmphme = sfhme->hme_next;
8117 8106                  if (IS_PAHME(sfhme)) {
8118 8107                          continue;
8119 8108                  }
8120 8109  
8121 8110                  hmeblkp = sfmmu_hmetohblk(sfhme);
8122 8111                  if (hme_size(sfhme) != sz) {
8123 8112                          continue;
8124 8113                  }
8125 8114  
8126 8115                  if (hmeblkp->hblk_shared) {
8127 8116                          sf_srd_t *srdp = hblktosrd(hmeblkp);
8128 8117                          uint_t rid = hmeblkp->hblk_tag.htag_rid;
8129 8118                          sf_region_t *rgnp;
8130 8119                          ASSERT(SFMMU_IS_SHMERID_VALID(rid));
8131 8120                          ASSERT(rid < SFMMU_MAX_HME_REGIONS);
8132 8121                          ASSERT(srdp != NULL);
8133 8122                          rgnp = srdp->srd_hmergnp[rid];
8134 8123                          SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp,
8135 8124                              rgnp, rid);
8136 8125                          cnt += rgnp->rgn_refcnt;
8137 8126                  } else {
8138 8127                          cnt++;
8139 8128                  }
8140 8129                  if (cnt > sh_thresh) {
8141 8130                          sfmmu_mlist_exit(pml);
8142 8131                          return (1);
8143 8132                  }
8144 8133          }
8145 8134  
8146 8135          index >>= 1;
8147 8136          sz++;
8148 8137          while (index) {
8149 8138                  pp = PP_GROUPLEADER(pp, sz);
8150 8139                  ASSERT(sfmmu_mlist_held(pp));
8151 8140                  if (index & 0x1) {
8152 8141                          goto again;
8153 8142                  }
8154 8143                  index >>= 1;
8155 8144                  sz++;
8156 8145          }
8157 8146          sfmmu_mlist_exit(pml);
8158 8147          return (0);
8159 8148  }
8160 8149  
8161 8150  /*
8162 8151   * Unload all large mappings to the pp and reset the p_szc field of every
8163 8152   * constituent page according to the remaining mappings.
8164 8153   *
8165 8154   * pp must be locked SE_EXCL. Even though no other constituent pages are
8166 8155   * locked it's legal to unload the large mappings to the pp because all
8167 8156   * constituent pages of large locked mappings have to be locked SE_SHARED.
8168 8157   * This means if we have SE_EXCL lock on one of constituent pages none of the
8169 8158   * large mappings to pp are locked.
8170 8159   *
8171 8160   * Decrease p_szc field starting from the last constituent page and ending
8172 8161   * with the root page. This method is used because other threads rely on the
8173 8162   * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc
8174 8163   * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This
8175 8164   * ensures that p_szc changes of the constituent pages appears atomic for all
8176 8165   * threads that use sfmmu_mlspl_enter() to examine p_szc field.
8177 8166   *
8178 8167   * This mechanism is only used for file system pages where it's not always
8179 8168   * possible to get SE_EXCL locks on all constituent pages to demote the size
8180 8169   * code (as is done for anonymous or kernel large pages).
8181 8170   *
8182 8171   * See more comments in front of sfmmu_mlspl_enter().
8183 8172   */
8184 8173  void
8185 8174  hat_page_demote(page_t *pp)
8186 8175  {
8187 8176          int index;
8188 8177          int sz;
8189 8178          cpuset_t cpuset;
8190 8179          int sync = 0;
8191 8180          page_t *rootpp;
8192 8181          struct sf_hment *sfhme;
8193 8182          struct sf_hment *tmphme = NULL;
8194 8183          uint_t pszc;
8195 8184          page_t *lastpp;
8196 8185          cpuset_t tset;
8197 8186          pgcnt_t npgs;
8198 8187          kmutex_t *pml;
8199 8188          kmutex_t *pmtx = NULL;
8200 8189  
8201 8190          ASSERT(PAGE_EXCL(pp));
8202 8191          ASSERT(!PP_ISFREE(pp));
8203 8192          ASSERT(!PP_ISKAS(pp));
8204 8193          ASSERT(page_szc_lock_assert(pp));
8205 8194          pml = sfmmu_mlist_enter(pp);
8206 8195  
8207 8196          pszc = pp->p_szc;
8208 8197          if (pszc == 0) {
8209 8198                  goto out;
8210 8199          }
8211 8200  
8212 8201          index = PP_MAPINDEX(pp) >> 1;
8213 8202  
8214 8203          if (index) {
8215 8204                  CPUSET_ZERO(cpuset);
8216 8205                  sz = TTE64K;
8217 8206                  sync = 1;
8218 8207          }
8219 8208  
8220 8209          while (index) {
8221 8210                  if (!(index & 0x1)) {
8222 8211                          index >>= 1;
8223 8212                          sz++;
8224 8213                          continue;
8225 8214                  }
8226 8215                  ASSERT(sz <= pszc);
8227 8216                  rootpp = PP_GROUPLEADER(pp, sz);
8228 8217                  for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) {
8229 8218                          tmphme = sfhme->hme_next;
8230 8219                          ASSERT(!IS_PAHME(sfhme));
8231 8220                          if (hme_size(sfhme) != sz) {
8232 8221                                  continue;
8233 8222                          }
8234 8223                          tset = sfmmu_pageunload(rootpp, sfhme, sz);
8235 8224                          CPUSET_OR(cpuset, tset);
8236 8225                  }
8237 8226                  if (index >>= 1) {
8238 8227                          sz++;
8239 8228                  }
8240 8229          }
8241 8230  
8242 8231          ASSERT(!PP_ISMAPPED_LARGE(pp));
8243 8232  
8244 8233          if (sync) {
8245 8234                  xt_sync(cpuset);
8246 8235  #ifdef VAC
8247 8236                  if (PP_ISTNC(pp)) {
8248 8237                          conv_tnc(rootpp, sz);
8249 8238                  }
8250 8239  #endif  /* VAC */
8251 8240          }
8252 8241  
8253 8242          pmtx = sfmmu_page_enter(pp);
8254 8243  
8255 8244          ASSERT(pp->p_szc == pszc);
8256 8245          rootpp = PP_PAGEROOT(pp);
8257 8246          ASSERT(rootpp->p_szc == pszc);
8258 8247          lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1);
8259 8248  
8260 8249          while (lastpp != rootpp) {
8261 8250                  sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0;
8262 8251                  ASSERT(sz < pszc);
8263 8252                  npgs = (sz == 0) ? 1 : TTEPAGES(sz);
8264 8253                  ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1);
8265 8254                  while (--npgs > 0) {
8266 8255                          lastpp->p_szc = (uchar_t)sz;
8267 8256                          lastpp = PP_PAGEPREV(lastpp);
8268 8257                  }
8269 8258                  if (sz) {
8270 8259                          /*
8271 8260                           * make sure before current root's pszc
8272 8261                           * is updated all updates to constituent pages pszc
8273 8262                           * fields are globally visible.
8274 8263                           */
8275 8264                          membar_producer();
8276 8265                  }
8277 8266                  lastpp->p_szc = sz;
8278 8267                  ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz)));
8279 8268                  if (lastpp != rootpp) {
8280 8269                          lastpp = PP_PAGEPREV(lastpp);
8281 8270                  }
8282 8271          }
8283 8272          if (sz == 0) {
8284 8273                  /* the loop above doesn't cover this case */
8285 8274                  rootpp->p_szc = 0;
8286 8275          }
8287 8276  out:
8288 8277          ASSERT(pp->p_szc == 0);
8289 8278          if (pmtx != NULL) {
8290 8279                  sfmmu_page_exit(pmtx);
8291 8280          }
8292 8281          sfmmu_mlist_exit(pml);
8293 8282  }
8294 8283  
8295 8284  /*
8296 8285   * Refresh the HAT ismttecnt[] element for size szc.
8297 8286   * Caller must have set ISM busy flag to prevent mapping
8298 8287   * lists from changing while we're traversing them.
8299 8288   */
8300 8289  pgcnt_t
8301 8290  ism_tsb_entries(sfmmu_t *sfmmup, int szc)
8302 8291  {
8303 8292          ism_blk_t       *ism_blkp = sfmmup->sfmmu_iblk;
8304 8293          ism_map_t       *ism_map;
8305 8294          pgcnt_t         npgs = 0;
8306 8295          pgcnt_t         npgs_scd = 0;
8307 8296          int             j;
8308 8297          sf_scd_t        *scdp;
8309 8298          uchar_t         rid;
8310 8299  
8311 8300          ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
8312 8301          scdp = sfmmup->sfmmu_scdp;
8313 8302  
8314 8303          for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) {
8315 8304                  ism_map = ism_blkp->iblk_maps;
8316 8305                  for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) {
8317 8306                          rid = ism_map[j].imap_rid;
8318 8307                          ASSERT(rid == SFMMU_INVALID_ISMRID ||
8319 8308                              rid < sfmmup->sfmmu_srdp->srd_next_ismrid);
8320 8309  
8321 8310                          if (scdp != NULL && rid != SFMMU_INVALID_ISMRID &&
8322 8311                              SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) {
8323 8312                                  /* ISM is in sfmmup's SCD */
8324 8313                                  npgs_scd +=
8325 8314                                      ism_map[j].imap_ismhat->sfmmu_ttecnt[szc];
8326 8315                          } else {
8327 8316                                  /* ISMs is not in SCD */
8328 8317                                  npgs +=
8329 8318                                      ism_map[j].imap_ismhat->sfmmu_ttecnt[szc];
8330 8319                          }
8331 8320                  }
8332 8321          }
8333 8322          sfmmup->sfmmu_ismttecnt[szc] = npgs;
8334 8323          sfmmup->sfmmu_scdismttecnt[szc] = npgs_scd;
8335 8324          return (npgs);
8336 8325  }
8337 8326  
8338 8327  /*
8339 8328   * Yield the memory claim requirement for an address space.
8340 8329   *
8341 8330   * This is currently implemented as the number of bytes that have active
8342 8331   * hardware translations that have page structures.  Therefore, it can
8343 8332   * underestimate the traditional resident set size, eg, if the
8344 8333   * physical page is present and the hardware translation is missing;
8345 8334   * and it can overestimate the rss, eg, if there are active
8346 8335   * translations to a frame buffer with page structs.
8347 8336   * Also, it does not take sharing into account.
8348 8337   *
8349 8338   * Note that we don't acquire locks here since this function is most often
8350 8339   * called from the clock thread.
8351 8340   */
8352 8341  size_t
8353 8342  hat_get_mapped_size(struct hat *hat)
8354 8343  {
8355 8344          size_t          assize = 0;
8356 8345          int             i;
8357 8346  
8358 8347          if (hat == NULL)
8359 8348                  return (0);
8360 8349  
8361 8350          for (i = 0; i < mmu_page_sizes; i++)
8362 8351                  assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] +
8363 8352                      (pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i);
8364 8353  
8365 8354          if (hat->sfmmu_iblk == NULL)
8366 8355                  return (assize);
8367 8356  
8368 8357          for (i = 0; i < mmu_page_sizes; i++)
8369 8358                  assize += ((pgcnt_t)hat->sfmmu_ismttecnt[i] +
8370 8359                      (pgcnt_t)hat->sfmmu_scdismttecnt[i]) * TTEBYTES(i);
8371 8360  
8372 8361          return (assize);
8373 8362  }
8374 8363  
8375 8364  int
8376 8365  hat_stats_enable(struct hat *hat)
8377 8366  {
8378 8367          hatlock_t       *hatlockp;
8379 8368  
8380 8369          hatlockp = sfmmu_hat_enter(hat);
8381 8370          hat->sfmmu_rmstat++;
8382 8371          sfmmu_hat_exit(hatlockp);
8383 8372          return (1);
8384 8373  }
8385 8374  
8386 8375  void
8387 8376  hat_stats_disable(struct hat *hat)
8388 8377  {
8389 8378          hatlock_t       *hatlockp;
8390 8379  
8391 8380          hatlockp = sfmmu_hat_enter(hat);
8392 8381          hat->sfmmu_rmstat--;
8393 8382          sfmmu_hat_exit(hatlockp);
8394 8383  }
8395 8384  
8396 8385  /*
8397 8386   * Routines for entering or removing  ourselves from the
8398 8387   * ism_hat's mapping list. This is used for both private and
8399 8388   * SCD hats.
8400 8389   */
8401 8390  static void
8402 8391  iment_add(struct ism_ment *iment,  struct hat *ism_hat)
8403 8392  {
8404 8393          ASSERT(MUTEX_HELD(&ism_mlist_lock));
8405 8394  
8406 8395          iment->iment_prev = NULL;
8407 8396          iment->iment_next = ism_hat->sfmmu_iment;
8408 8397          if (ism_hat->sfmmu_iment) {
8409 8398                  ism_hat->sfmmu_iment->iment_prev = iment;
8410 8399          }
8411 8400          ism_hat->sfmmu_iment = iment;
8412 8401  }
8413 8402  
8414 8403  static void
8415 8404  iment_sub(struct ism_ment *iment, struct hat *ism_hat)
8416 8405  {
8417 8406          ASSERT(MUTEX_HELD(&ism_mlist_lock));
8418 8407  
8419 8408          if (ism_hat->sfmmu_iment == NULL) {
8420 8409                  panic("ism map entry remove - no entries");
8421 8410          }
8422 8411  
8423 8412          if (iment->iment_prev) {
8424 8413                  ASSERT(ism_hat->sfmmu_iment != iment);
8425 8414                  iment->iment_prev->iment_next = iment->iment_next;
8426 8415          } else {
8427 8416                  ASSERT(ism_hat->sfmmu_iment == iment);
8428 8417                  ism_hat->sfmmu_iment = iment->iment_next;
8429 8418          }
8430 8419  
8431 8420          if (iment->iment_next) {
8432 8421                  iment->iment_next->iment_prev = iment->iment_prev;
8433 8422          }
8434 8423  
8435 8424          /*
8436 8425           * zero out the entry
8437 8426           */
8438 8427          iment->iment_next = NULL;
8439 8428          iment->iment_prev = NULL;
8440 8429          iment->iment_hat =  NULL;
8441 8430          iment->iment_base_va = 0;
8442 8431  }
8443 8432  
8444 8433  /*
8445 8434   * Hat_share()/unshare() return an (non-zero) error
8446 8435   * when saddr and daddr are not properly aligned.
8447 8436   *
8448 8437   * The top level mapping element determines the alignment
8449 8438   * requirement for saddr and daddr, depending on different
8450 8439   * architectures.
8451 8440   *
8452 8441   * When hat_share()/unshare() are not supported,
8453 8442   * HATOP_SHARE()/UNSHARE() return 0
8454 8443   */
8455 8444  int
8456 8445  hat_share(struct hat *sfmmup, caddr_t addr,
8457 8446          struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc)
8458 8447  {
8459 8448          ism_blk_t       *ism_blkp;
8460 8449          ism_blk_t       *new_iblk;
8461 8450          ism_map_t       *ism_map;
8462 8451          ism_ment_t      *ism_ment;
8463 8452          int             i, added;
8464 8453          hatlock_t       *hatlockp;
8465 8454          int             reload_mmu = 0;
8466 8455          uint_t          ismshift = page_get_shift(ismszc);
8467 8456          size_t          ismpgsz = page_get_pagesize(ismszc);
8468 8457          uint_t          ismmask = (uint_t)ismpgsz - 1;
8469 8458          size_t          sh_size = ISM_SHIFT(ismshift, len);
8470 8459          ushort_t        ismhatflag;
8471 8460          hat_region_cookie_t rcookie;
8472 8461          sf_scd_t        *old_scdp;
8473 8462  
8474 8463  #ifdef DEBUG
8475 8464          caddr_t         eaddr = addr + len;
8476 8465  #endif /* DEBUG */
8477 8466  
8478 8467          ASSERT(ism_hatid != NULL && sfmmup != NULL);
8479 8468          ASSERT(sptaddr == ISMID_STARTADDR);
8480 8469          /*
8481 8470           * Check the alignment.
8482 8471           */
8483 8472          if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr))
8484 8473                  return (EINVAL);
8485 8474  
8486 8475          /*
8487 8476           * Check size alignment.
8488 8477           */
8489 8478          if (!ISM_ALIGNED(ismshift, len))
8490 8479                  return (EINVAL);
8491 8480  
8492 8481          /*
8493 8482           * Allocate ism_ment for the ism_hat's mapping list, and an
8494 8483           * ism map blk in case we need one.  We must do our
8495 8484           * allocations before acquiring locks to prevent a deadlock
8496 8485           * in the kmem allocator on the mapping list lock.
8497 8486           */
8498 8487          new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP);
8499 8488          ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP);
8500 8489  
8501 8490          /*
8502 8491           * Serialize ISM mappings with the ISM busy flag, and also the
8503 8492           * trap handlers.
8504 8493           */
8505 8494          sfmmu_ismhat_enter(sfmmup, 0);
8506 8495  
8507 8496          /*
8508 8497           * Allocate an ism map blk if necessary.
8509 8498           */
8510 8499          if (sfmmup->sfmmu_iblk == NULL) {
8511 8500                  sfmmup->sfmmu_iblk = new_iblk;
8512 8501                  bzero(new_iblk, sizeof (*new_iblk));
8513 8502                  new_iblk->iblk_nextpa = (uint64_t)-1;
8514 8503                  membar_stst();  /* make sure next ptr visible to all CPUs */
8515 8504                  sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk);
8516 8505                  reload_mmu = 1;
8517 8506                  new_iblk = NULL;
8518 8507          }
8519 8508  
8520 8509  #ifdef DEBUG
8521 8510          /*
8522 8511           * Make sure mapping does not already exist.
8523 8512           */
8524 8513          ism_blkp = sfmmup->sfmmu_iblk;
8525 8514          while (ism_blkp != NULL) {
8526 8515                  ism_map = ism_blkp->iblk_maps;
8527 8516                  for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) {
8528 8517                          if ((addr >= ism_start(ism_map[i]) &&
8529 8518                              addr < ism_end(ism_map[i])) ||
8530 8519                              eaddr > ism_start(ism_map[i]) &&
8531 8520                              eaddr <= ism_end(ism_map[i])) {
8532 8521                                  panic("sfmmu_share: Already mapped!");
8533 8522                          }
8534 8523                  }
8535 8524                  ism_blkp = ism_blkp->iblk_next;
8536 8525          }
8537 8526  #endif /* DEBUG */
8538 8527  
8539 8528          ASSERT(ismszc >= TTE4M);
8540 8529          if (ismszc == TTE4M) {
8541 8530                  ismhatflag = HAT_4M_FLAG;
8542 8531          } else if (ismszc == TTE32M) {
8543 8532                  ismhatflag = HAT_32M_FLAG;
8544 8533          } else if (ismszc == TTE256M) {
8545 8534                  ismhatflag = HAT_256M_FLAG;
8546 8535          }
8547 8536          /*
8548 8537           * Add mapping to first available mapping slot.
8549 8538           */
8550 8539          ism_blkp = sfmmup->sfmmu_iblk;
8551 8540          added = 0;
8552 8541          while (!added) {
8553 8542                  ism_map = ism_blkp->iblk_maps;
8554 8543                  for (i = 0; i < ISM_MAP_SLOTS; i++)  {
8555 8544                          if (ism_map[i].imap_ismhat == NULL) {
8556 8545  
8557 8546                                  ism_map[i].imap_ismhat = ism_hatid;
8558 8547                                  ism_map[i].imap_vb_shift = (uchar_t)ismshift;
8559 8548                                  ism_map[i].imap_rid = SFMMU_INVALID_ISMRID;
8560 8549                                  ism_map[i].imap_hatflags = ismhatflag;
8561 8550                                  ism_map[i].imap_sz_mask = ismmask;
8562 8551                                  /*
8563 8552                                   * imap_seg is checked in ISM_CHECK to see if
8564 8553                                   * non-NULL, then other info assumed valid.
8565 8554                                   */
8566 8555                                  membar_stst();
8567 8556                                  ism_map[i].imap_seg = (uintptr_t)addr | sh_size;
8568 8557                                  ism_map[i].imap_ment = ism_ment;
8569 8558  
8570 8559                                  /*
8571 8560                                   * Now add ourselves to the ism_hat's
8572 8561                                   * mapping list.
8573 8562                                   */
8574 8563                                  ism_ment->iment_hat = sfmmup;
8575 8564                                  ism_ment->iment_base_va = addr;
8576 8565                                  ism_hatid->sfmmu_ismhat = 1;
8577 8566                                  mutex_enter(&ism_mlist_lock);
8578 8567                                  iment_add(ism_ment, ism_hatid);
8579 8568                                  mutex_exit(&ism_mlist_lock);
8580 8569                                  added = 1;
8581 8570                                  break;
8582 8571                          }
8583 8572                  }
8584 8573                  if (!added && ism_blkp->iblk_next == NULL) {
8585 8574                          ism_blkp->iblk_next = new_iblk;
8586 8575                          new_iblk = NULL;
8587 8576                          bzero(ism_blkp->iblk_next,
8588 8577                              sizeof (*ism_blkp->iblk_next));
8589 8578                          ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1;
8590 8579                          membar_stst();
8591 8580                          ism_blkp->iblk_nextpa =
8592 8581                              va_to_pa((caddr_t)ism_blkp->iblk_next);
8593 8582                  }
8594 8583                  ism_blkp = ism_blkp->iblk_next;
8595 8584          }
8596 8585  
8597 8586          /*
8598 8587           * After calling hat_join_region, sfmmup may join a new SCD or
8599 8588           * move from the old scd to a new scd, in which case, we want to
8600 8589           * shrink the sfmmup's private tsb size, i.e., pass shrink to
8601 8590           * sfmmu_check_page_sizes at the end of this routine.
8602 8591           */
8603 8592          old_scdp = sfmmup->sfmmu_scdp;
8604 8593  
8605 8594          rcookie = hat_join_region(sfmmup, addr, len, (void *)ism_hatid, 0,
8606 8595              PROT_ALL, ismszc, NULL, HAT_REGION_ISM);
8607 8596          if (rcookie != HAT_INVALID_REGION_COOKIE) {
8608 8597                  ism_map[i].imap_rid = (uchar_t)((uint64_t)rcookie);
8609 8598          }
8610 8599          /*
8611 8600           * Update our counters for this sfmmup's ism mappings.
8612 8601           */
8613 8602          for (i = 0; i <= ismszc; i++) {
8614 8603                  if (!(disable_ism_large_pages & (1 << i)))
8615 8604                          (void) ism_tsb_entries(sfmmup, i);
8616 8605          }
8617 8606  
8618 8607          /*
8619 8608           * For ISM and DISM we do not support 512K pages, so we only only
8620 8609           * search the 4M and 8K/64K hashes for 4 pagesize cpus, and search the
8621 8610           * 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus.
8622 8611           *
8623 8612           * Need to set 32M/256M ISM flags to make sure
8624 8613           * sfmmu_check_page_sizes() enables them on Panther.
8625 8614           */
8626 8615          ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0);
8627 8616  
8628 8617          switch (ismszc) {
8629 8618          case TTE256M:
8630 8619                  if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_ISM)) {
8631 8620                          hatlockp = sfmmu_hat_enter(sfmmup);
8632 8621                          SFMMU_FLAGS_SET(sfmmup, HAT_256M_ISM);
8633 8622                          sfmmu_hat_exit(hatlockp);
8634 8623                  }
8635 8624                  break;
8636 8625          case TTE32M:
8637 8626                  if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_ISM)) {
8638 8627                          hatlockp = sfmmu_hat_enter(sfmmup);
8639 8628                          SFMMU_FLAGS_SET(sfmmup, HAT_32M_ISM);
8640 8629                          sfmmu_hat_exit(hatlockp);
8641 8630                  }
8642 8631                  break;
8643 8632          default:
8644 8633                  break;
8645 8634          }
8646 8635  
8647 8636          /*
8648 8637           * If we updated the ismblkpa for this HAT we must make
8649 8638           * sure all CPUs running this process reload their tsbmiss area.
8650 8639           * Otherwise they will fail to load the mappings in the tsbmiss
8651 8640           * handler and will loop calling pagefault().
8652 8641           */
8653 8642          if (reload_mmu) {
8654 8643                  hatlockp = sfmmu_hat_enter(sfmmup);
8655 8644                  sfmmu_sync_mmustate(sfmmup);
8656 8645                  sfmmu_hat_exit(hatlockp);
8657 8646          }
8658 8647  
8659 8648          sfmmu_ismhat_exit(sfmmup, 0);
8660 8649  
8661 8650          /*
8662 8651           * Free up ismblk if we didn't use it.
8663 8652           */
8664 8653          if (new_iblk != NULL)
8665 8654                  kmem_cache_free(ism_blk_cache, new_iblk);
8666 8655  
8667 8656          /*
8668 8657           * Check TSB and TLB page sizes.
8669 8658           */
8670 8659          if (sfmmup->sfmmu_scdp != NULL && old_scdp != sfmmup->sfmmu_scdp) {
8671 8660                  sfmmu_check_page_sizes(sfmmup, 0);
8672 8661          } else {
8673 8662                  sfmmu_check_page_sizes(sfmmup, 1);
8674 8663          }
8675 8664          return (0);
8676 8665  }
8677 8666  
8678 8667  /*
8679 8668   * hat_unshare removes exactly one ism_map from
8680 8669   * this process's as.  It expects multiple calls
8681 8670   * to hat_unshare for multiple shm segments.
8682 8671   */
8683 8672  void
8684 8673  hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc)
8685 8674  {
8686 8675          ism_map_t       *ism_map;
8687 8676          ism_ment_t      *free_ment = NULL;
8688 8677          ism_blk_t       *ism_blkp;
8689 8678          struct hat      *ism_hatid;
8690 8679          int             found, i;
8691 8680          hatlock_t       *hatlockp;
8692 8681          struct tsb_info *tsbinfo;
8693 8682          uint_t          ismshift = page_get_shift(ismszc);
8694 8683          size_t          sh_size = ISM_SHIFT(ismshift, len);
8695 8684          uchar_t         ism_rid;
8696 8685          sf_scd_t        *old_scdp;
8697 8686  
8698 8687          ASSERT(ISM_ALIGNED(ismshift, addr));
8699 8688          ASSERT(ISM_ALIGNED(ismshift, len));
8700 8689          ASSERT(sfmmup != NULL);
8701 8690          ASSERT(sfmmup != ksfmmup);
8702 8691  
8703 8692          ASSERT(sfmmup->sfmmu_as != NULL);
8704 8693  
8705 8694          /*
8706 8695           * Make sure that during the entire time ISM mappings are removed,
8707 8696           * the trap handlers serialize behind us, and that no one else
8708 8697           * can be mucking with ISM mappings.  This also lets us get away
8709 8698           * with not doing expensive cross calls to flush the TLB -- we
8710 8699           * just discard the context, flush the entire TSB, and call it
8711 8700           * a day.
8712 8701           */
8713 8702          sfmmu_ismhat_enter(sfmmup, 0);
8714 8703  
8715 8704          /*
8716 8705           * Remove the mapping.
8717 8706           *
8718 8707           * We can't have any holes in the ism map.
8719 8708           * The tsb miss code while searching the ism map will
8720 8709           * stop on an empty map slot.  So we must move
8721 8710           * everyone past the hole up 1 if any.
8722 8711           *
8723 8712           * Also empty ism map blks are not freed until the
8724 8713           * process exits. This is to prevent a MT race condition
8725 8714           * between sfmmu_unshare() and sfmmu_tsbmiss_exception().
8726 8715           */
8727 8716          found = 0;
8728 8717          ism_blkp = sfmmup->sfmmu_iblk;
8729 8718          while (!found && ism_blkp != NULL) {
8730 8719                  ism_map = ism_blkp->iblk_maps;
8731 8720                  for (i = 0; i < ISM_MAP_SLOTS; i++) {
8732 8721                          if (addr == ism_start(ism_map[i]) &&
8733 8722                              sh_size == (size_t)(ism_size(ism_map[i]))) {
8734 8723                                  found = 1;
8735 8724                                  break;
8736 8725                          }
8737 8726                  }
8738 8727                  if (!found)
8739 8728                          ism_blkp = ism_blkp->iblk_next;
8740 8729          }
8741 8730  
8742 8731          if (found) {
8743 8732                  ism_hatid = ism_map[i].imap_ismhat;
8744 8733                  ism_rid = ism_map[i].imap_rid;
8745 8734                  ASSERT(ism_hatid != NULL);
8746 8735                  ASSERT(ism_hatid->sfmmu_ismhat == 1);
8747 8736  
8748 8737                  /*
8749 8738                   * After hat_leave_region, the sfmmup may leave SCD,
8750 8739                   * in which case, we want to grow the private tsb size when
8751 8740                   * calling sfmmu_check_page_sizes at the end of the routine.
8752 8741                   */
8753 8742                  old_scdp = sfmmup->sfmmu_scdp;
8754 8743                  /*
8755 8744                   * Then remove ourselves from the region.
8756 8745                   */
8757 8746                  if (ism_rid != SFMMU_INVALID_ISMRID) {
8758 8747                          hat_leave_region(sfmmup, (void *)((uint64_t)ism_rid),
8759 8748                              HAT_REGION_ISM);
8760 8749                  }
8761 8750  
8762 8751                  /*
8763 8752                   * And now guarantee that any other cpu
8764 8753                   * that tries to process an ISM miss
8765 8754                   * will go to tl=0.
8766 8755                   */
8767 8756                  hatlockp = sfmmu_hat_enter(sfmmup);
8768 8757                  sfmmu_invalidate_ctx(sfmmup);
8769 8758                  sfmmu_hat_exit(hatlockp);
8770 8759  
8771 8760                  /*
8772 8761                   * Remove ourselves from the ism mapping list.
8773 8762                   */
8774 8763                  mutex_enter(&ism_mlist_lock);
8775 8764                  iment_sub(ism_map[i].imap_ment, ism_hatid);
8776 8765                  mutex_exit(&ism_mlist_lock);
8777 8766                  free_ment = ism_map[i].imap_ment;
8778 8767  
8779 8768                  /*
8780 8769                   * We delete the ism map by copying
8781 8770                   * the next map over the current one.
8782 8771                   * We will take the next one in the maps
8783 8772                   * array or from the next ism_blk.
8784 8773                   */
8785 8774                  while (ism_blkp != NULL) {
8786 8775                          ism_map = ism_blkp->iblk_maps;
8787 8776                          while (i < (ISM_MAP_SLOTS - 1)) {
8788 8777                                  ism_map[i] = ism_map[i + 1];
8789 8778                                  i++;
8790 8779                          }
8791 8780                          /* i == (ISM_MAP_SLOTS - 1) */
8792 8781                          ism_blkp = ism_blkp->iblk_next;
8793 8782                          if (ism_blkp != NULL) {
8794 8783                                  ism_map[i] = ism_blkp->iblk_maps[0];
8795 8784                                  i = 0;
8796 8785                          } else {
8797 8786                                  ism_map[i].imap_seg = 0;
8798 8787                                  ism_map[i].imap_vb_shift = 0;
8799 8788                                  ism_map[i].imap_rid = SFMMU_INVALID_ISMRID;
8800 8789                                  ism_map[i].imap_hatflags = 0;
8801 8790                                  ism_map[i].imap_sz_mask = 0;
8802 8791                                  ism_map[i].imap_ismhat = NULL;
8803 8792                                  ism_map[i].imap_ment = NULL;
8804 8793                          }
8805 8794                  }
8806 8795  
8807 8796                  /*
8808 8797                   * Now flush entire TSB for the process, since
8809 8798                   * demapping page by page can be too expensive.
8810 8799                   * We don't have to flush the TLB here anymore
8811 8800                   * since we switch to a new TLB ctx instead.
8812 8801                   * Also, there is no need to flush if the process
8813 8802                   * is exiting since the TSB will be freed later.
8814 8803                   */
8815 8804                  if (!sfmmup->sfmmu_free) {
8816 8805                          hatlockp = sfmmu_hat_enter(sfmmup);
8817 8806                          for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL;
8818 8807                              tsbinfo = tsbinfo->tsb_next) {
8819 8808                                  if (tsbinfo->tsb_flags & TSB_SWAPPED)
8820 8809                                          continue;
8821 8810                                  if (tsbinfo->tsb_flags & TSB_RELOC_FLAG) {
8822 8811                                          tsbinfo->tsb_flags |=
8823 8812                                              TSB_FLUSH_NEEDED;
8824 8813                                          continue;
8825 8814                                  }
8826 8815  
8827 8816                                  sfmmu_inv_tsb(tsbinfo->tsb_va,
8828 8817                                      TSB_BYTES(tsbinfo->tsb_szc));
8829 8818                          }
8830 8819                          sfmmu_hat_exit(hatlockp);
8831 8820                  }
8832 8821          }
8833 8822  
8834 8823          /*
8835 8824           * Update our counters for this sfmmup's ism mappings.
8836 8825           */
8837 8826          for (i = 0; i <= ismszc; i++) {
8838 8827                  if (!(disable_ism_large_pages & (1 << i)))
8839 8828                          (void) ism_tsb_entries(sfmmup, i);
8840 8829          }
8841 8830  
8842 8831          sfmmu_ismhat_exit(sfmmup, 0);
8843 8832  
8844 8833          /*
8845 8834           * We must do our freeing here after dropping locks
8846 8835           * to prevent a deadlock in the kmem allocator on the
8847 8836           * mapping list lock.
8848 8837           */
8849 8838          if (free_ment != NULL)
8850 8839                  kmem_cache_free(ism_ment_cache, free_ment);
8851 8840  
8852 8841          /*
8853 8842           * Check TSB and TLB page sizes if the process isn't exiting.
8854 8843           */
8855 8844          if (!sfmmup->sfmmu_free) {
8856 8845                  if (found && old_scdp != NULL && sfmmup->sfmmu_scdp == NULL) {
8857 8846                          sfmmu_check_page_sizes(sfmmup, 1);
8858 8847                  } else {
8859 8848                          sfmmu_check_page_sizes(sfmmup, 0);
8860 8849                  }
8861 8850          }
8862 8851  }
8863 8852  
8864 8853  /* ARGSUSED */
8865 8854  static int
8866 8855  sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags)
8867 8856  {
8868 8857          /* void *buf is sfmmu_t pointer */
8869 8858          bzero(buf, sizeof (sfmmu_t));
8870 8859  
8871 8860          return (0);
8872 8861  }
8873 8862  
8874 8863  /* ARGSUSED */
8875 8864  static void
8876 8865  sfmmu_idcache_destructor(void *buf, void *cdrarg)
8877 8866  {
8878 8867          /* void *buf is sfmmu_t pointer */
8879 8868  }
8880 8869  
8881 8870  /*
8882 8871   * setup kmem hmeblks by bzeroing all members and initializing the nextpa
8883 8872   * field to be the pa of this hmeblk
8884 8873   */
8885 8874  /* ARGSUSED */
8886 8875  static int
8887 8876  sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags)
8888 8877  {
8889 8878          struct hme_blk *hmeblkp;
8890 8879  
8891 8880          bzero(buf, (size_t)cdrarg);
8892 8881          hmeblkp = (struct hme_blk *)buf;
8893 8882          hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
8894 8883  
8895 8884  #ifdef  HBLK_TRACE
8896 8885          mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL);
8897 8886  #endif  /* HBLK_TRACE */
8898 8887  
8899 8888          return (0);
8900 8889  }
8901 8890  
8902 8891  /* ARGSUSED */
8903 8892  static void
8904 8893  sfmmu_hblkcache_destructor(void *buf, void *cdrarg)
8905 8894  {
8906 8895  
8907 8896  #ifdef  HBLK_TRACE
8908 8897  
8909 8898          struct hme_blk *hmeblkp;
8910 8899  
8911 8900          hmeblkp = (struct hme_blk *)buf;
8912 8901          mutex_destroy(&hmeblkp->hblk_audit_lock);
8913 8902  
8914 8903  #endif  /* HBLK_TRACE */
8915 8904  }
8916 8905  
8917 8906  #define SFMMU_CACHE_RECLAIM_SCAN_RATIO 8
8918 8907  static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO;
8919 8908  /*
8920 8909   * The kmem allocator will callback into our reclaim routine when the system
8921 8910   * is running low in memory.  We traverse the hash and free up all unused but
8922 8911   * still cached hme_blks.  We also traverse the free list and free them up
8923 8912   * as well.
8924 8913   */
8925 8914  /*ARGSUSED*/
8926 8915  static void
8927 8916  sfmmu_hblkcache_reclaim(void *cdrarg)
8928 8917  {
8929 8918          int i;
8930 8919          struct hmehash_bucket *hmebp;
8931 8920          struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL;
8932 8921          static struct hmehash_bucket *uhmehash_reclaim_hand;
8933 8922          static struct hmehash_bucket *khmehash_reclaim_hand;
8934 8923          struct hme_blk *list = NULL, *last_hmeblkp;
8935 8924          cpuset_t cpuset = cpu_ready_set;
8936 8925          cpu_hme_pend_t *cpuhp;
8937 8926  
8938 8927          /* Free up hmeblks on the cpu pending lists */
8939 8928          for (i = 0; i < NCPU; i++) {
8940 8929                  cpuhp = &cpu_hme_pend[i];
8941 8930                  if (cpuhp->chp_listp != NULL)  {
8942 8931                          mutex_enter(&cpuhp->chp_mutex);
8943 8932                          if (cpuhp->chp_listp == NULL) {
8944 8933                                  mutex_exit(&cpuhp->chp_mutex);
8945 8934                                  continue;
8946 8935                          }
8947 8936                          for (last_hmeblkp = cpuhp->chp_listp;
8948 8937                              last_hmeblkp->hblk_next != NULL;
8949 8938                              last_hmeblkp = last_hmeblkp->hblk_next)
8950 8939                                  ;
8951 8940                          last_hmeblkp->hblk_next = list;
8952 8941                          list = cpuhp->chp_listp;
8953 8942                          cpuhp->chp_listp = NULL;
8954 8943                          cpuhp->chp_count = 0;
8955 8944                          mutex_exit(&cpuhp->chp_mutex);
8956 8945                  }
8957 8946  
8958 8947          }
8959 8948  
8960 8949          if (list != NULL) {
8961 8950                  kpreempt_disable();
8962 8951                  CPUSET_DEL(cpuset, CPU->cpu_id);
8963 8952                  xt_sync(cpuset);
8964 8953                  xt_sync(cpuset);
8965 8954                  kpreempt_enable();
8966 8955                  sfmmu_hblk_free(&list);
8967 8956                  list = NULL;
8968 8957          }
8969 8958  
8970 8959          hmebp = uhmehash_reclaim_hand;
8971 8960          if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ])
8972 8961                  uhmehash_reclaim_hand = hmebp = uhme_hash;
8973 8962          uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio;
8974 8963  
8975 8964          for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) {
8976 8965                  if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) {
8977 8966                          hmeblkp = hmebp->hmeblkp;
8978 8967                          pr_hblk = NULL;
8979 8968                          while (hmeblkp) {
8980 8969                                  nx_hblk = hmeblkp->hblk_next;
8981 8970                                  if (!hmeblkp->hblk_vcnt &&
8982 8971                                      !hmeblkp->hblk_hmecnt) {
8983 8972                                          sfmmu_hblk_hash_rm(hmebp, hmeblkp,
8984 8973                                              pr_hblk, &list, 0);
8985 8974                                  } else {
8986 8975                                          pr_hblk = hmeblkp;
8987 8976                                  }
8988 8977                                  hmeblkp = nx_hblk;
8989 8978                          }
8990 8979                          SFMMU_HASH_UNLOCK(hmebp);
8991 8980                  }
8992 8981                  if (hmebp++ == &uhme_hash[UHMEHASH_SZ])
8993 8982                          hmebp = uhme_hash;
8994 8983          }
8995 8984  
8996 8985          hmebp = khmehash_reclaim_hand;
8997 8986          if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ])
8998 8987                  khmehash_reclaim_hand = hmebp = khme_hash;
8999 8988          khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio;
9000 8989  
9001 8990          for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) {
9002 8991                  if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) {
9003 8992                          hmeblkp = hmebp->hmeblkp;
9004 8993                          pr_hblk = NULL;
9005 8994                          while (hmeblkp) {
9006 8995                                  nx_hblk = hmeblkp->hblk_next;
9007 8996                                  if (!hmeblkp->hblk_vcnt &&
9008 8997                                      !hmeblkp->hblk_hmecnt) {
9009 8998                                          sfmmu_hblk_hash_rm(hmebp, hmeblkp,
9010 8999                                              pr_hblk, &list, 0);
9011 9000                                  } else {
9012 9001                                          pr_hblk = hmeblkp;
9013 9002                                  }
9014 9003                                  hmeblkp = nx_hblk;
9015 9004                          }
9016 9005                          SFMMU_HASH_UNLOCK(hmebp);
9017 9006                  }
9018 9007                  if (hmebp++ == &khme_hash[KHMEHASH_SZ])
9019 9008                          hmebp = khme_hash;
9020 9009          }
9021 9010          sfmmu_hblks_list_purge(&list, 0);
9022 9011  }
9023 9012  
9024 9013  /*
9025 9014   * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface.
9026 9015   * same goes for sfmmu_get_addrvcolor().
9027 9016   *
9028 9017   * This function will return the virtual color for the specified page. The
9029 9018   * virtual color corresponds to this page current mapping or its last mapping.
9030 9019   * It is used by memory allocators to choose addresses with the correct
9031 9020   * alignment so vac consistency is automatically maintained.  If the page
9032 9021   * has no color it returns -1.
9033 9022   */
9034 9023  /*ARGSUSED*/
9035 9024  int
9036 9025  sfmmu_get_ppvcolor(struct page *pp)
9037 9026  {
9038 9027  #ifdef VAC
9039 9028          int color;
9040 9029  
9041 9030          if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) {
9042 9031                  return (-1);
9043 9032          }
9044 9033          color = PP_GET_VCOLOR(pp);
9045 9034          ASSERT(color < mmu_btop(shm_alignment));
9046 9035          return (color);
9047 9036  #else
9048 9037          return (-1);
9049 9038  #endif  /* VAC */
9050 9039  }
9051 9040  
9052 9041  /*
9053 9042   * This function will return the desired alignment for vac consistency
9054 9043   * (vac color) given a virtual address.  If no vac is present it returns -1.
9055 9044   */
9056 9045  /*ARGSUSED*/
9057 9046  int
9058 9047  sfmmu_get_addrvcolor(caddr_t vaddr)
9059 9048  {
9060 9049  #ifdef VAC
9061 9050          if (cache & CACHE_VAC) {
9062 9051                  return (addr_to_vcolor(vaddr));
9063 9052          } else {
9064 9053                  return (-1);
9065 9054          }
9066 9055  #else
9067 9056          return (-1);
9068 9057  #endif  /* VAC */
9069 9058  }
9070 9059  
9071 9060  #ifdef VAC
9072 9061  /*
9073 9062   * Check for conflicts.
9074 9063   * A conflict exists if the new and existent mappings do not match in
9075 9064   * their "shm_alignment fields. If conflicts exist, the existant mappings
9076 9065   * are flushed unless one of them is locked. If one of them is locked, then
9077 9066   * the mappings are flushed and converted to non-cacheable mappings.
9078 9067   */
9079 9068  static void
9080 9069  sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp)
9081 9070  {
9082 9071          struct hat *tmphat;
9083 9072          struct sf_hment *sfhmep, *tmphme = NULL;
9084 9073          struct hme_blk *hmeblkp;
9085 9074          int vcolor;
9086 9075          tte_t tte;
9087 9076  
9088 9077          ASSERT(sfmmu_mlist_held(pp));
9089 9078          ASSERT(!PP_ISNC(pp));           /* page better be cacheable */
9090 9079  
9091 9080          vcolor = addr_to_vcolor(addr);
9092 9081          if (PP_NEWPAGE(pp)) {
9093 9082                  PP_SET_VCOLOR(pp, vcolor);
9094 9083                  return;
9095 9084          }
9096 9085  
9097 9086          if (PP_GET_VCOLOR(pp) == vcolor) {
9098 9087                  return;
9099 9088          }
9100 9089  
9101 9090          if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) {
9102 9091                  /*
9103 9092                   * Previous user of page had a different color
9104 9093                   * but since there are no current users
9105 9094                   * we just flush the cache and change the color.
9106 9095                   */
9107 9096                  SFMMU_STAT(sf_pgcolor_conflict);
9108 9097                  sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
9109 9098                  PP_SET_VCOLOR(pp, vcolor);
9110 9099                  return;
9111 9100          }
9112 9101  
9113 9102          /*
9114 9103           * If we get here we have a vac conflict with a current
9115 9104           * mapping.  VAC conflict policy is as follows.
9116 9105           * - The default is to unload the other mappings unless:
9117 9106           * - If we have a large mapping we uncache the page.
9118 9107           * We need to uncache the rest of the large page too.
9119 9108           * - If any of the mappings are locked we uncache the page.
9120 9109           * - If the requested mapping is inconsistent
9121 9110           * with another mapping and that mapping
9122 9111           * is in the same address space we have to
9123 9112           * make it non-cached.  The default thing
9124 9113           * to do is unload the inconsistent mapping
9125 9114           * but if they are in the same address space
9126 9115           * we run the risk of unmapping the pc or the
9127 9116           * stack which we will use as we return to the user,
9128 9117           * in which case we can then fault on the thing
9129 9118           * we just unloaded and get into an infinite loop.
9130 9119           */
9131 9120          if (PP_ISMAPPED_LARGE(pp)) {
9132 9121                  int sz;
9133 9122  
9134 9123                  /*
9135 9124                   * Existing mapping is for big pages. We don't unload
9136 9125                   * existing big mappings to satisfy new mappings.
9137 9126                   * Always convert all mappings to TNC.
9138 9127                   */
9139 9128                  sz = fnd_mapping_sz(pp);
9140 9129                  pp = PP_GROUPLEADER(pp, sz);
9141 9130                  SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz));
9142 9131                  sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH,
9143 9132                      TTEPAGES(sz));
9144 9133  
9145 9134                  return;
9146 9135          }
9147 9136  
9148 9137          /*
9149 9138           * check if any mapping is in same as or if it is locked
9150 9139           * since in that case we need to uncache.
9151 9140           */
9152 9141          for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
9153 9142                  tmphme = sfhmep->hme_next;
9154 9143                  if (IS_PAHME(sfhmep))
9155 9144                          continue;
9156 9145                  hmeblkp = sfmmu_hmetohblk(sfhmep);
9157 9146                  tmphat = hblktosfmmu(hmeblkp);
9158 9147                  sfmmu_copytte(&sfhmep->hme_tte, &tte);
9159 9148                  ASSERT(TTE_IS_VALID(&tte));
9160 9149                  if (hmeblkp->hblk_shared || tmphat == hat ||
9161 9150                      hmeblkp->hblk_lckcnt) {
9162 9151                          /*
9163 9152                           * We have an uncache conflict
9164 9153                           */
9165 9154                          SFMMU_STAT(sf_uncache_conflict);
9166 9155                          sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1);
9167 9156                          return;
9168 9157                  }
9169 9158          }
9170 9159  
9171 9160          /*
9172 9161           * We have an unload conflict
9173 9162           * We have already checked for LARGE mappings, therefore
9174 9163           * the remaining mapping(s) must be TTE8K.
9175 9164           */
9176 9165          SFMMU_STAT(sf_unload_conflict);
9177 9166  
9178 9167          for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
9179 9168                  tmphme = sfhmep->hme_next;
9180 9169                  if (IS_PAHME(sfhmep))
9181 9170                          continue;
9182 9171                  hmeblkp = sfmmu_hmetohblk(sfhmep);
9183 9172                  ASSERT(!hmeblkp->hblk_shared);
9184 9173                  (void) sfmmu_pageunload(pp, sfhmep, TTE8K);
9185 9174          }
9186 9175  
9187 9176          if (PP_ISMAPPED_KPM(pp))
9188 9177                  sfmmu_kpm_vac_unload(pp, addr);
9189 9178  
9190 9179          /*
9191 9180           * Unloads only do TLB flushes so we need to flush the
9192 9181           * cache here.
9193 9182           */
9194 9183          sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
9195 9184          PP_SET_VCOLOR(pp, vcolor);
9196 9185  }
9197 9186  
9198 9187  /*
9199 9188   * Whenever a mapping is unloaded and the page is in TNC state,
9200 9189   * we see if the page can be made cacheable again. 'pp' is
9201 9190   * the page that we just unloaded a mapping from, the size
9202 9191   * of mapping that was unloaded is 'ottesz'.
9203 9192   * Remark:
9204 9193   * The recache policy for mpss pages can leave a performance problem
9205 9194   * under the following circumstances:
9206 9195   * . A large page in uncached mode has just been unmapped.
9207 9196   * . All constituent pages are TNC due to a conflicting small mapping.
9208 9197   * . There are many other, non conflicting, small mappings around for
9209 9198   *   a lot of the constituent pages.
9210 9199   * . We're called w/ the "old" groupleader page and the old ottesz,
9211 9200   *   but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so
9212 9201   *   we end up w/ TTE8K or npages == 1.
9213 9202   * . We call tst_tnc w/ the old groupleader only, and if there is no
9214 9203   *   conflict, we re-cache only this page.
9215 9204   * . All other small mappings are not checked and will be left in TNC mode.
9216 9205   * The problem is not very serious because:
9217 9206   * . mpss is actually only defined for heap and stack, so the probability
9218 9207   *   is not very high that a large page mapping exists in parallel to a small
9219 9208   *   one (this is possible, but seems to be bad programming style in the
9220 9209   *   appl).
9221 9210   * . The problem gets a little bit more serious, when those TNC pages
9222 9211   *   have to be mapped into kernel space, e.g. for networking.
9223 9212   * . When VAC alias conflicts occur in applications, this is regarded
9224 9213   *   as an application bug. So if kstat's show them, the appl should
9225 9214   *   be changed anyway.
9226 9215   */
9227 9216  void
9228 9217  conv_tnc(page_t *pp, int ottesz)
9229 9218  {
9230 9219          int cursz, dosz;
9231 9220          pgcnt_t curnpgs, dopgs;
9232 9221          pgcnt_t pg64k;
9233 9222          page_t *pp2;
9234 9223  
9235 9224          /*
9236 9225           * Determine how big a range we check for TNC and find
9237 9226           * leader page. cursz is the size of the biggest
9238 9227           * mapping that still exist on 'pp'.
9239 9228           */
9240 9229          if (PP_ISMAPPED_LARGE(pp)) {
9241 9230                  cursz = fnd_mapping_sz(pp);
9242 9231          } else {
9243 9232                  cursz = TTE8K;
9244 9233          }
9245 9234  
9246 9235          if (ottesz >= cursz) {
9247 9236                  dosz = ottesz;
9248 9237                  pp2 = pp;
9249 9238          } else {
9250 9239                  dosz = cursz;
9251 9240                  pp2 = PP_GROUPLEADER(pp, dosz);
9252 9241          }
9253 9242  
9254 9243          pg64k = TTEPAGES(TTE64K);
9255 9244          dopgs = TTEPAGES(dosz);
9256 9245  
9257 9246          ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0));
9258 9247  
9259 9248          while (dopgs != 0) {
9260 9249                  curnpgs = TTEPAGES(cursz);
9261 9250                  if (tst_tnc(pp2, curnpgs)) {
9262 9251                          SFMMU_STAT_ADD(sf_recache, curnpgs);
9263 9252                          sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH,
9264 9253                              curnpgs);
9265 9254                  }
9266 9255  
9267 9256                  ASSERT(dopgs >= curnpgs);
9268 9257                  dopgs -= curnpgs;
9269 9258  
9270 9259                  if (dopgs == 0) {
9271 9260                          break;
9272 9261                  }
9273 9262  
9274 9263                  pp2 = PP_PAGENEXT_N(pp2, curnpgs);
9275 9264                  if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) {
9276 9265                          cursz = fnd_mapping_sz(pp2);
9277 9266                  } else {
9278 9267                          cursz = TTE8K;
9279 9268                  }
9280 9269          }
9281 9270  }
9282 9271  
9283 9272  /*
9284 9273   * Returns 1 if page(s) can be converted from TNC to cacheable setting,
9285 9274   * returns 0 otherwise. Note that oaddr argument is valid for only
9286 9275   * 8k pages.
9287 9276   */
9288 9277  int
9289 9278  tst_tnc(page_t *pp, pgcnt_t npages)
9290 9279  {
9291 9280          struct  sf_hment *sfhme;
9292 9281          struct  hme_blk *hmeblkp;
9293 9282          tte_t   tte;
9294 9283          caddr_t vaddr;
9295 9284          int     clr_valid = 0;
9296 9285          int     color, color1, bcolor;
9297 9286          int     i, ncolors;
9298 9287  
9299 9288          ASSERT(pp != NULL);
9300 9289          ASSERT(!(cache & CACHE_WRITEBACK));
9301 9290  
9302 9291          if (npages > 1) {
9303 9292                  ncolors = CACHE_NUM_COLOR;
9304 9293          }
9305 9294  
9306 9295          for (i = 0; i < npages; i++) {
9307 9296                  ASSERT(sfmmu_mlist_held(pp));
9308 9297                  ASSERT(PP_ISTNC(pp));
9309 9298                  ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR);
9310 9299  
9311 9300                  if (PP_ISPNC(pp)) {
9312 9301                          return (0);
9313 9302                  }
9314 9303  
9315 9304                  clr_valid = 0;
9316 9305                  if (PP_ISMAPPED_KPM(pp)) {
9317 9306                          caddr_t kpmvaddr;
9318 9307  
9319 9308                          ASSERT(kpm_enable);
9320 9309                          kpmvaddr = hat_kpm_page2va(pp, 1);
9321 9310                          ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr)));
9322 9311                          color1 = addr_to_vcolor(kpmvaddr);
9323 9312                          clr_valid = 1;
9324 9313                  }
9325 9314  
9326 9315                  for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
9327 9316                          if (IS_PAHME(sfhme))
9328 9317                                  continue;
9329 9318                          hmeblkp = sfmmu_hmetohblk(sfhme);
9330 9319  
9331 9320                          sfmmu_copytte(&sfhme->hme_tte, &tte);
9332 9321                          ASSERT(TTE_IS_VALID(&tte));
9333 9322  
9334 9323                          vaddr = tte_to_vaddr(hmeblkp, tte);
9335 9324                          color = addr_to_vcolor(vaddr);
9336 9325  
9337 9326                          if (npages > 1) {
9338 9327                                  /*
9339 9328                                   * If there is a big mapping, make sure
9340 9329                                   * 8K mapping is consistent with the big
9341 9330                                   * mapping.
9342 9331                                   */
9343 9332                                  bcolor = i % ncolors;
9344 9333                                  if (color != bcolor) {
9345 9334                                          return (0);
9346 9335                                  }
9347 9336                          }
9348 9337                          if (!clr_valid) {
9349 9338                                  clr_valid = 1;
9350 9339                                  color1 = color;
9351 9340                          }
9352 9341  
9353 9342                          if (color1 != color) {
9354 9343                                  return (0);
9355 9344                          }
9356 9345                  }
9357 9346  
9358 9347                  pp = PP_PAGENEXT(pp);
9359 9348          }
9360 9349  
9361 9350          return (1);
9362 9351  }
9363 9352  
9364 9353  void
9365 9354  sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag,
9366 9355          pgcnt_t npages)
9367 9356  {
9368 9357          kmutex_t *pmtx;
9369 9358          int i, ncolors, bcolor;
9370 9359          kpm_hlk_t *kpmp;
9371 9360          cpuset_t cpuset;
9372 9361  
9373 9362          ASSERT(pp != NULL);
9374 9363          ASSERT(!(cache & CACHE_WRITEBACK));
9375 9364  
9376 9365          kpmp = sfmmu_kpm_kpmp_enter(pp, npages);
9377 9366          pmtx = sfmmu_page_enter(pp);
9378 9367  
9379 9368          /*
9380 9369           * Fast path caching single unmapped page
9381 9370           */
9382 9371          if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) &&
9383 9372              flags == HAT_CACHE) {
9384 9373                  PP_CLRTNC(pp);
9385 9374                  PP_CLRPNC(pp);
9386 9375                  sfmmu_page_exit(pmtx);
9387 9376                  sfmmu_kpm_kpmp_exit(kpmp);
9388 9377                  return;
9389 9378          }
9390 9379  
9391 9380          /*
9392 9381           * We need to capture all cpus in order to change cacheability
9393 9382           * because we can't allow one cpu to access the same physical
9394 9383           * page using a cacheable and a non-cachebale mapping at the same
9395 9384           * time. Since we may end up walking the ism mapping list
9396 9385           * have to grab it's lock now since we can't after all the
9397 9386           * cpus have been captured.
9398 9387           */
9399 9388          sfmmu_hat_lock_all();
9400 9389          mutex_enter(&ism_mlist_lock);
9401 9390          kpreempt_disable();
9402 9391          cpuset = cpu_ready_set;
9403 9392          xc_attention(cpuset);
9404 9393  
9405 9394          if (npages > 1) {
9406 9395                  /*
9407 9396                   * Make sure all colors are flushed since the
9408 9397                   * sfmmu_page_cache() only flushes one color-
9409 9398                   * it does not know big pages.
9410 9399                   */
9411 9400                  ncolors = CACHE_NUM_COLOR;
9412 9401                  if (flags & HAT_TMPNC) {
9413 9402                          for (i = 0; i < ncolors; i++) {
9414 9403                                  sfmmu_cache_flushcolor(i, pp->p_pagenum);
9415 9404                          }
9416 9405                          cache_flush_flag = CACHE_NO_FLUSH;
9417 9406                  }
9418 9407          }
9419 9408  
9420 9409          for (i = 0; i < npages; i++) {
9421 9410  
9422 9411                  ASSERT(sfmmu_mlist_held(pp));
9423 9412  
9424 9413                  if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) {
9425 9414  
9426 9415                          if (npages > 1) {
9427 9416                                  bcolor = i % ncolors;
9428 9417                          } else {
9429 9418                                  bcolor = NO_VCOLOR;
9430 9419                          }
9431 9420  
9432 9421                          sfmmu_page_cache(pp, flags, cache_flush_flag,
9433 9422                              bcolor);
9434 9423                  }
9435 9424  
9436 9425                  pp = PP_PAGENEXT(pp);
9437 9426          }
9438 9427  
9439 9428          xt_sync(cpuset);
9440 9429          xc_dismissed(cpuset);
9441 9430          mutex_exit(&ism_mlist_lock);
9442 9431          sfmmu_hat_unlock_all();
9443 9432          sfmmu_page_exit(pmtx);
9444 9433          sfmmu_kpm_kpmp_exit(kpmp);
9445 9434          kpreempt_enable();
9446 9435  }
9447 9436  
9448 9437  /*
9449 9438   * This function changes the virtual cacheability of all mappings to a
9450 9439   * particular page.  When changing from uncache to cacheable the mappings will
9451 9440   * only be changed if all of them have the same virtual color.
9452 9441   * We need to flush the cache in all cpus.  It is possible that
9453 9442   * a process referenced a page as cacheable but has sinced exited
9454 9443   * and cleared the mapping list.  We still to flush it but have no
9455 9444   * state so all cpus is the only alternative.
9456 9445   */
9457 9446  static void
9458 9447  sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor)
9459 9448  {
9460 9449          struct  sf_hment *sfhme;
9461 9450          struct  hme_blk *hmeblkp;
9462 9451          sfmmu_t *sfmmup;
9463 9452          tte_t   tte, ttemod;
9464 9453          caddr_t vaddr;
9465 9454          int     ret, color;
9466 9455          pfn_t   pfn;
9467 9456  
9468 9457          color = bcolor;
9469 9458          pfn = pp->p_pagenum;
9470 9459  
9471 9460          for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
9472 9461  
9473 9462                  if (IS_PAHME(sfhme))
9474 9463                          continue;
9475 9464                  hmeblkp = sfmmu_hmetohblk(sfhme);
9476 9465  
9477 9466                  sfmmu_copytte(&sfhme->hme_tte, &tte);
9478 9467                  ASSERT(TTE_IS_VALID(&tte));
9479 9468                  vaddr = tte_to_vaddr(hmeblkp, tte);
9480 9469                  color = addr_to_vcolor(vaddr);
9481 9470  
9482 9471  #ifdef DEBUG
9483 9472                  if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) {
9484 9473                          ASSERT(color == bcolor);
9485 9474                  }
9486 9475  #endif
9487 9476  
9488 9477                  ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp));
9489 9478  
9490 9479                  ttemod = tte;
9491 9480                  if (flags & (HAT_UNCACHE | HAT_TMPNC)) {
9492 9481                          TTE_CLR_VCACHEABLE(&ttemod);
9493 9482                  } else {        /* flags & HAT_CACHE */
9494 9483                          TTE_SET_VCACHEABLE(&ttemod);
9495 9484                  }
9496 9485                  ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
9497 9486                  if (ret < 0) {
9498 9487                          /*
9499 9488                           * Since all cpus are captured modifytte should not
9500 9489                           * fail.
9501 9490                           */
9502 9491                          panic("sfmmu_page_cache: write to tte failed");
9503 9492                  }
9504 9493  
9505 9494                  sfmmup = hblktosfmmu(hmeblkp);
9506 9495                  if (cache_flush_flag == CACHE_FLUSH) {
9507 9496                          /*
9508 9497                           * Flush TSBs, TLBs and caches
9509 9498                           */
9510 9499                          if (hmeblkp->hblk_shared) {
9511 9500                                  sf_srd_t *srdp = (sf_srd_t *)sfmmup;
9512 9501                                  uint_t rid = hmeblkp->hblk_tag.htag_rid;
9513 9502                                  sf_region_t *rgnp;
9514 9503                                  ASSERT(SFMMU_IS_SHMERID_VALID(rid));
9515 9504                                  ASSERT(rid < SFMMU_MAX_HME_REGIONS);
9516 9505                                  ASSERT(srdp != NULL);
9517 9506                                  rgnp = srdp->srd_hmergnp[rid];
9518 9507                                  SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
9519 9508                                      srdp, rgnp, rid);
9520 9509                                  (void) sfmmu_rgntlb_demap(vaddr, rgnp,
9521 9510                                      hmeblkp, 0);
9522 9511                                  sfmmu_cache_flush(pfn, addr_to_vcolor(vaddr));
9523 9512                          } else if (sfmmup->sfmmu_ismhat) {
9524 9513                                  if (flags & HAT_CACHE) {
9525 9514                                          SFMMU_STAT(sf_ism_recache);
9526 9515                                  } else {
9527 9516                                          SFMMU_STAT(sf_ism_uncache);
9528 9517                                  }
9529 9518                                  sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
9530 9519                                      pfn, CACHE_FLUSH);
9531 9520                          } else {
9532 9521                                  sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp,
9533 9522                                      pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1);
9534 9523                          }
9535 9524  
9536 9525                          /*
9537 9526                           * all cache entries belonging to this pfn are
9538 9527                           * now flushed.
9539 9528                           */
9540 9529                          cache_flush_flag = CACHE_NO_FLUSH;
9541 9530                  } else {
9542 9531                          /*
9543 9532                           * Flush only TSBs and TLBs.
9544 9533                           */
9545 9534                          if (hmeblkp->hblk_shared) {
9546 9535                                  sf_srd_t *srdp = (sf_srd_t *)sfmmup;
9547 9536                                  uint_t rid = hmeblkp->hblk_tag.htag_rid;
9548 9537                                  sf_region_t *rgnp;
9549 9538                                  ASSERT(SFMMU_IS_SHMERID_VALID(rid));
9550 9539                                  ASSERT(rid < SFMMU_MAX_HME_REGIONS);
9551 9540                                  ASSERT(srdp != NULL);
9552 9541                                  rgnp = srdp->srd_hmergnp[rid];
9553 9542                                  SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
9554 9543                                      srdp, rgnp, rid);
9555 9544                                  (void) sfmmu_rgntlb_demap(vaddr, rgnp,
9556 9545                                      hmeblkp, 0);
9557 9546                          } else if (sfmmup->sfmmu_ismhat) {
9558 9547                                  if (flags & HAT_CACHE) {
9559 9548                                          SFMMU_STAT(sf_ism_recache);
9560 9549                                  } else {
9561 9550                                          SFMMU_STAT(sf_ism_uncache);
9562 9551                                  }
9563 9552                                  sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
9564 9553                                      pfn, CACHE_NO_FLUSH);
9565 9554                          } else {
9566 9555                                  sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1);
9567 9556                          }
9568 9557                  }
9569 9558          }
9570 9559  
9571 9560          if (PP_ISMAPPED_KPM(pp))
9572 9561                  sfmmu_kpm_page_cache(pp, flags, cache_flush_flag);
9573 9562  
9574 9563          switch (flags) {
9575 9564  
9576 9565                  default:
9577 9566                          panic("sfmmu_pagecache: unknown flags");
9578 9567                          break;
9579 9568  
9580 9569                  case HAT_CACHE:
9581 9570                          PP_CLRTNC(pp);
9582 9571                          PP_CLRPNC(pp);
9583 9572                          PP_SET_VCOLOR(pp, color);
9584 9573                          break;
9585 9574  
9586 9575                  case HAT_TMPNC:
9587 9576                          PP_SETTNC(pp);
9588 9577                          PP_SET_VCOLOR(pp, NO_VCOLOR);
9589 9578                          break;
9590 9579  
9591 9580                  case HAT_UNCACHE:
9592 9581                          PP_SETPNC(pp);
9593 9582                          PP_CLRTNC(pp);
9594 9583                          PP_SET_VCOLOR(pp, NO_VCOLOR);
9595 9584                          break;
9596 9585          }
9597 9586  }
9598 9587  #endif  /* VAC */
9599 9588  
9600 9589  
9601 9590  /*
9602 9591   * Wrapper routine used to return a context.
9603 9592   *
9604 9593   * It's the responsibility of the caller to guarantee that the
9605 9594   * process serializes on calls here by taking the HAT lock for
9606 9595   * the hat.
9607 9596   *
9608 9597   */
9609 9598  static void
9610 9599  sfmmu_get_ctx(sfmmu_t *sfmmup)
9611 9600  {
9612 9601          mmu_ctx_t *mmu_ctxp;
9613 9602          uint_t pstate_save;
9614 9603          int ret;
9615 9604  
9616 9605          ASSERT(sfmmu_hat_lock_held(sfmmup));
9617 9606          ASSERT(sfmmup != ksfmmup);
9618 9607  
9619 9608          if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)) {
9620 9609                  sfmmu_setup_tsbinfo(sfmmup);
9621 9610                  SFMMU_FLAGS_CLEAR(sfmmup, HAT_ALLCTX_INVALID);
9622 9611          }
9623 9612  
9624 9613          kpreempt_disable();
9625 9614  
9626 9615          mmu_ctxp = CPU_MMU_CTXP(CPU);
9627 9616          ASSERT(mmu_ctxp);
9628 9617          ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms);
9629 9618          ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
9630 9619  
9631 9620          /*
9632 9621           * Do a wrap-around if cnum reaches the max # cnum supported by a MMU.
9633 9622           */
9634 9623          if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs)
9635 9624                  sfmmu_ctx_wrap_around(mmu_ctxp, B_TRUE);
9636 9625  
9637 9626          /*
9638 9627           * Let the MMU set up the page sizes to use for
9639 9628           * this context in the TLB. Don't program 2nd dtlb for ism hat.
9640 9629           */
9641 9630          if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) {
9642 9631                  mmu_set_ctx_page_sizes(sfmmup);
9643 9632          }
9644 9633  
9645 9634          /*
9646 9635           * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with
9647 9636           * interrupts disabled to prevent race condition with wrap-around
9648 9637           * ctx invalidatation. In sun4v, ctx invalidation also involves
9649 9638           * a HV call to set the number of TSBs to 0. If interrupts are not
9650 9639           * disabled until after sfmmu_load_mmustate is complete TSBs may
9651 9640           * become assigned to INVALID_CONTEXT. This is not allowed.
9652 9641           */
9653 9642          pstate_save = sfmmu_disable_intrs();
9654 9643  
9655 9644          if (sfmmu_alloc_ctx(sfmmup, 1, CPU, SFMMU_PRIVATE) &&
9656 9645              sfmmup->sfmmu_scdp != NULL) {
9657 9646                  sf_scd_t *scdp = sfmmup->sfmmu_scdp;
9658 9647                  sfmmu_t *scsfmmup = scdp->scd_sfmmup;
9659 9648                  ret = sfmmu_alloc_ctx(scsfmmup, 1, CPU, SFMMU_SHARED);
9660 9649                  /* debug purpose only */
9661 9650                  ASSERT(!ret || scsfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum
9662 9651                      != INVALID_CONTEXT);
9663 9652          }
9664 9653          sfmmu_load_mmustate(sfmmup);
9665 9654  
9666 9655          sfmmu_enable_intrs(pstate_save);
9667 9656  
9668 9657          kpreempt_enable();
9669 9658  }
9670 9659  
9671 9660  /*
9672 9661   * When all cnums are used up in a MMU, cnum will wrap around to the
9673 9662   * next generation and start from 2.
9674 9663   */
9675 9664  static void
9676 9665  sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp, boolean_t reset_cnum)
9677 9666  {
9678 9667  
9679 9668          /* caller must have disabled the preemption */
9680 9669          ASSERT(curthread->t_preempt >= 1);
9681 9670          ASSERT(mmu_ctxp != NULL);
9682 9671  
9683 9672          /* acquire Per-MMU (PM) spin lock */
9684 9673          mutex_enter(&mmu_ctxp->mmu_lock);
9685 9674  
9686 9675          /* re-check to see if wrap-around is needed */
9687 9676          if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs)
9688 9677                  goto done;
9689 9678  
9690 9679          SFMMU_MMU_STAT(mmu_wrap_around);
9691 9680  
9692 9681          /* update gnum */
9693 9682          ASSERT(mmu_ctxp->mmu_gnum != 0);
9694 9683          mmu_ctxp->mmu_gnum++;
9695 9684          if (mmu_ctxp->mmu_gnum == 0 ||
9696 9685              mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) {
9697 9686                  cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.",
9698 9687                      (void *)mmu_ctxp);
9699 9688          }
9700 9689  
9701 9690          if (mmu_ctxp->mmu_ncpus > 1) {
9702 9691                  cpuset_t cpuset;
9703 9692  
9704 9693                  membar_enter(); /* make sure updated gnum visible */
9705 9694  
9706 9695                  SFMMU_XCALL_STATS(NULL);
9707 9696  
9708 9697                  /* xcall to others on the same MMU to invalidate ctx */
9709 9698                  cpuset = mmu_ctxp->mmu_cpuset;
9710 9699                  ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id) || !reset_cnum);
9711 9700                  CPUSET_DEL(cpuset, CPU->cpu_id);
9712 9701                  CPUSET_AND(cpuset, cpu_ready_set);
9713 9702  
9714 9703                  /*
9715 9704                   * Pass in INVALID_CONTEXT as the first parameter to
9716 9705                   * sfmmu_raise_tsb_exception, which invalidates the context
9717 9706                   * of any process running on the CPUs in the MMU.
9718 9707                   */
9719 9708                  xt_some(cpuset, sfmmu_raise_tsb_exception,
9720 9709                      INVALID_CONTEXT, INVALID_CONTEXT);
9721 9710                  xt_sync(cpuset);
9722 9711  
9723 9712                  SFMMU_MMU_STAT(mmu_tsb_raise_exception);
9724 9713          }
9725 9714  
9726 9715          if (sfmmu_getctx_sec() != INVALID_CONTEXT) {
9727 9716                  sfmmu_setctx_sec(INVALID_CONTEXT);
9728 9717                  sfmmu_clear_utsbinfo();
9729 9718          }
9730 9719  
9731 9720          /*
9732 9721           * No xcall is needed here. For sun4u systems all CPUs in context
9733 9722           * domain share a single physical MMU therefore it's enough to flush
9734 9723           * TLB on local CPU. On sun4v systems we use 1 global context
9735 9724           * domain and flush all remote TLBs in sfmmu_raise_tsb_exception
9736 9725           * handler. Note that vtag_flushall_uctxs() is called
9737 9726           * for Ultra II machine, where the equivalent flushall functionality
9738 9727           * is implemented in SW, and only user ctx TLB entries are flushed.
9739 9728           */
9740 9729          if (&vtag_flushall_uctxs != NULL) {
9741 9730                  vtag_flushall_uctxs();
9742 9731          } else {
9743 9732                  vtag_flushall();
9744 9733          }
9745 9734  
9746 9735          /* reset mmu cnum, skips cnum 0 and 1 */
9747 9736          if (reset_cnum == B_TRUE)
9748 9737                  mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS;
9749 9738  
9750 9739  done:
9751 9740          mutex_exit(&mmu_ctxp->mmu_lock);
9752 9741  }
9753 9742  
9754 9743  
9755 9744  /*
9756 9745   * For multi-threaded process, set the process context to INVALID_CONTEXT
9757 9746   * so that it faults and reloads the MMU state from TL=0. For single-threaded
9758 9747   * process, we can just load the MMU state directly without having to
9759 9748   * set context invalid. Caller must hold the hat lock since we don't
9760 9749   * acquire it here.
9761 9750   */
9762 9751  static void
9763 9752  sfmmu_sync_mmustate(sfmmu_t *sfmmup)
9764 9753  {
9765 9754          uint_t cnum;
9766 9755          uint_t pstate_save;
9767 9756  
9768 9757          ASSERT(sfmmup != ksfmmup);
9769 9758          ASSERT(sfmmu_hat_lock_held(sfmmup));
9770 9759  
9771 9760          kpreempt_disable();
9772 9761  
9773 9762          /*
9774 9763           * We check whether the pass'ed-in sfmmup is the same as the
9775 9764           * current running proc. This is to makes sure the current proc
9776 9765           * stays single-threaded if it already is.
9777 9766           */
9778 9767          if ((sfmmup == curthread->t_procp->p_as->a_hat) &&
9779 9768              (curthread->t_procp->p_lwpcnt == 1)) {
9780 9769                  /* single-thread */
9781 9770                  cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum;
9782 9771                  if (cnum != INVALID_CONTEXT) {
9783 9772                          uint_t curcnum;
9784 9773                          /*
9785 9774                           * Disable interrupts to prevent race condition
9786 9775                           * with sfmmu_ctx_wrap_around ctx invalidation.
9787 9776                           * In sun4v, ctx invalidation involves setting
9788 9777                           * TSB to NULL, hence, interrupts should be disabled
9789 9778                           * untill after sfmmu_load_mmustate is completed.
9790 9779                           */
9791 9780                          pstate_save = sfmmu_disable_intrs();
9792 9781                          curcnum = sfmmu_getctx_sec();
9793 9782                          if (curcnum == cnum)
9794 9783                                  sfmmu_load_mmustate(sfmmup);
9795 9784                          sfmmu_enable_intrs(pstate_save);
9796 9785                          ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT);
9797 9786                  }
9798 9787          } else {
9799 9788                  /*
9800 9789                   * multi-thread
9801 9790                   * or when sfmmup is not the same as the curproc.
9802 9791                   */
9803 9792                  sfmmu_invalidate_ctx(sfmmup);
9804 9793          }
9805 9794  
9806 9795          kpreempt_enable();
9807 9796  }
9808 9797  
9809 9798  
9810 9799  /*
9811 9800   * Replace the specified TSB with a new TSB.  This function gets called when
9812 9801   * we grow, shrink or swapin a TSB.  When swapping in a TSB (TSB_SWAPIN), the
9813 9802   * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB
9814 9803   * (8K).
9815 9804   *
9816 9805   * Caller must hold the HAT lock, but should assume any tsb_info
9817 9806   * pointers it has are no longer valid after calling this function.
9818 9807   *
9819 9808   * Return values:
9820 9809   *      TSB_ALLOCFAIL   Failed to allocate a TSB, due to memory constraints
9821 9810   *      TSB_LOSTRACE    HAT is busy, i.e. another thread is already doing
9822 9811   *                      something to this tsbinfo/TSB
9823 9812   *      TSB_SUCCESS     Operation succeeded
9824 9813   */
9825 9814  static tsb_replace_rc_t
9826 9815  sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc,
9827 9816      hatlock_t *hatlockp, uint_t flags)
9828 9817  {
9829 9818          struct tsb_info *new_tsbinfo = NULL;
9830 9819          struct tsb_info *curtsb, *prevtsb;
9831 9820          uint_t tte_sz_mask;
9832 9821          int i;
9833 9822  
9834 9823          ASSERT(sfmmup != ksfmmup);
9835 9824          ASSERT(sfmmup->sfmmu_ismhat == 0);
9836 9825          ASSERT(sfmmu_hat_lock_held(sfmmup));
9837 9826          ASSERT(szc <= tsb_max_growsize);
9838 9827  
9839 9828          if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY))
9840 9829                  return (TSB_LOSTRACE);
9841 9830  
9842 9831          /*
9843 9832           * Find the tsb_info ahead of this one in the list, and
9844 9833           * also make sure that the tsb_info passed in really
9845 9834           * exists!
9846 9835           */
9847 9836          for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb;
9848 9837              curtsb != old_tsbinfo && curtsb != NULL;
9849 9838              prevtsb = curtsb, curtsb = curtsb->tsb_next)
9850 9839                  ;
9851 9840          ASSERT(curtsb != NULL);
9852 9841  
9853 9842          if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
9854 9843                  /*
9855 9844                   * The process is swapped out, so just set the new size
9856 9845                   * code.  When it swaps back in, we'll allocate a new one
9857 9846                   * of the new chosen size.
9858 9847                   */
9859 9848                  curtsb->tsb_szc = szc;
9860 9849                  return (TSB_SUCCESS);
9861 9850          }
9862 9851          SFMMU_FLAGS_SET(sfmmup, HAT_BUSY);
9863 9852  
9864 9853          tte_sz_mask = old_tsbinfo->tsb_ttesz_mask;
9865 9854  
9866 9855          /*
9867 9856           * All initialization is done inside of sfmmu_tsbinfo_alloc().
9868 9857           * If we fail to allocate a TSB, exit.
9869 9858           *
9870 9859           * If tsb grows with new tsb size > 4M and old tsb size < 4M,
9871 9860           * then try 4M slab after the initial alloc fails.
9872 9861           *
9873 9862           * If tsb swapin with tsb size > 4M, then try 4M after the
9874 9863           * initial alloc fails.
9875 9864           */
9876 9865          sfmmu_hat_exit(hatlockp);
9877 9866          if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc,
9878 9867              tte_sz_mask, flags, sfmmup) &&
9879 9868              (!(flags & (TSB_GROW | TSB_SWAPIN)) || (szc <= TSB_4M_SZCODE) ||
9880 9869              (!(flags & TSB_SWAPIN) &&
9881 9870              (old_tsbinfo->tsb_szc >= TSB_4M_SZCODE)) ||
9882 9871              sfmmu_tsbinfo_alloc(&new_tsbinfo, TSB_4M_SZCODE,
9883 9872              tte_sz_mask, flags, sfmmup))) {
9884 9873                  (void) sfmmu_hat_enter(sfmmup);
9885 9874                  if (!(flags & TSB_SWAPIN))
9886 9875                          SFMMU_STAT(sf_tsb_resize_failures);
9887 9876                  SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
9888 9877                  return (TSB_ALLOCFAIL);
9889 9878          }
9890 9879          (void) sfmmu_hat_enter(sfmmup);
9891 9880  
9892 9881          /*
9893 9882           * Re-check to make sure somebody else didn't muck with us while we
9894 9883           * didn't hold the HAT lock.  If the process swapped out, fine, just
9895 9884           * exit; this can happen if we try to shrink the TSB from the context
9896 9885           * of another process (such as on an ISM unmap), though it is rare.
9897 9886           */
9898 9887          if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
9899 9888                  SFMMU_STAT(sf_tsb_resize_failures);
9900 9889                  SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
9901 9890                  sfmmu_hat_exit(hatlockp);
9902 9891                  sfmmu_tsbinfo_free(new_tsbinfo);
9903 9892                  (void) sfmmu_hat_enter(sfmmup);
9904 9893                  return (TSB_LOSTRACE);
9905 9894          }
9906 9895  
9907 9896  #ifdef  DEBUG
9908 9897          /* Reverify that the tsb_info still exists.. for debugging only */
9909 9898          for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb;
9910 9899              curtsb != old_tsbinfo && curtsb != NULL;
9911 9900              prevtsb = curtsb, curtsb = curtsb->tsb_next)
9912 9901                  ;
9913 9902          ASSERT(curtsb != NULL);
9914 9903  #endif  /* DEBUG */
9915 9904  
9916 9905          /*
9917 9906           * Quiesce any CPUs running this process on their next TLB miss
9918 9907           * so they atomically see the new tsb_info.  We temporarily set the
9919 9908           * context to invalid context so new threads that come on processor
9920 9909           * after we do the xcall to cpusran will also serialize behind the
9921 9910           * HAT lock on TLB miss and will see the new TSB.  Since this short
9922 9911           * race with a new thread coming on processor is relatively rare,
9923 9912           * this synchronization mechanism should be cheaper than always
9924 9913           * pausing all CPUs for the duration of the setup, which is what
9925 9914           * the old implementation did.  This is particuarly true if we are
9926 9915           * copying a huge chunk of memory around during that window.
9927 9916           *
9928 9917           * The memory barriers are to make sure things stay consistent
9929 9918           * with resume() since it does not hold the HAT lock while
9930 9919           * walking the list of tsb_info structures.
9931 9920           */
9932 9921          if ((flags & TSB_SWAPIN) != TSB_SWAPIN) {
9933 9922                  /* The TSB is either growing or shrinking. */
9934 9923                  sfmmu_invalidate_ctx(sfmmup);
9935 9924          } else {
9936 9925                  /*
9937 9926                   * It is illegal to swap in TSBs from a process other
9938 9927                   * than a process being swapped in.  This in turn
9939 9928                   * implies we do not have a valid MMU context here
9940 9929                   * since a process needs one to resolve translation
9941 9930                   * misses.
9942 9931                   */
9943 9932                  ASSERT(curthread->t_procp->p_as->a_hat == sfmmup);
9944 9933          }
9945 9934  
9946 9935  #ifdef DEBUG
9947 9936          ASSERT(max_mmu_ctxdoms > 0);
9948 9937  
9949 9938          /*
9950 9939           * Process should have INVALID_CONTEXT on all MMUs
9951 9940           */
9952 9941          for (i = 0; i < max_mmu_ctxdoms; i++) {
9953 9942  
9954 9943                  ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT);
9955 9944          }
9956 9945  #endif
9957 9946  
9958 9947          new_tsbinfo->tsb_next = old_tsbinfo->tsb_next;
9959 9948          membar_stst();  /* strict ordering required */
9960 9949          if (prevtsb)
9961 9950                  prevtsb->tsb_next = new_tsbinfo;
9962 9951          else
9963 9952                  sfmmup->sfmmu_tsb = new_tsbinfo;
9964 9953          membar_enter(); /* make sure new TSB globally visible */
9965 9954  
9966 9955          /*
9967 9956           * We need to migrate TSB entries from the old TSB to the new TSB
9968 9957           * if tsb_remap_ttes is set and the TSB is growing.
9969 9958           */
9970 9959          if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW))
9971 9960                  sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo);
9972 9961  
9973 9962          SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
9974 9963  
9975 9964          /*
9976 9965           * Drop the HAT lock to free our old tsb_info.
9977 9966           */
9978 9967          sfmmu_hat_exit(hatlockp);
9979 9968  
9980 9969          if ((flags & TSB_GROW) == TSB_GROW) {
9981 9970                  SFMMU_STAT(sf_tsb_grow);
9982 9971          } else if ((flags & TSB_SHRINK) == TSB_SHRINK) {
9983 9972                  SFMMU_STAT(sf_tsb_shrink);
9984 9973          }
9985 9974  
9986 9975          sfmmu_tsbinfo_free(old_tsbinfo);
9987 9976  
9988 9977          (void) sfmmu_hat_enter(sfmmup);
9989 9978          return (TSB_SUCCESS);
9990 9979  }
9991 9980  
9992 9981  /*
9993 9982   * This function will re-program hat pgsz array, and invalidate the
9994 9983   * process' context, forcing the process to switch to another
9995 9984   * context on the next TLB miss, and therefore start using the
9996 9985   * TLB that is reprogrammed for the new page sizes.
9997 9986   */
9998 9987  void
9999 9988  sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz)
10000 9989  {
10001 9990          int i;
10002 9991          hatlock_t *hatlockp = NULL;
10003 9992  
10004 9993          hatlockp = sfmmu_hat_enter(sfmmup);
10005 9994          /* USIII+-IV+ optimization, requires hat lock */
10006 9995          if (tmp_pgsz) {
10007 9996                  for (i = 0; i < mmu_page_sizes; i++)
10008 9997                          sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i];
10009 9998          }
10010 9999          SFMMU_STAT(sf_tlb_reprog_pgsz);
10011 10000  
10012 10001          sfmmu_invalidate_ctx(sfmmup);
10013 10002  
10014 10003          sfmmu_hat_exit(hatlockp);
10015 10004  }
10016 10005  
10017 10006  /*
10018 10007   * The scd_rttecnt field in the SCD must be updated to take account of the
10019 10008   * regions which it contains.
10020 10009   */
10021 10010  static void
10022 10011  sfmmu_set_scd_rttecnt(sf_srd_t *srdp, sf_scd_t *scdp)
10023 10012  {
10024 10013          uint_t rid;
10025 10014          uint_t i, j;
10026 10015          ulong_t w;
10027 10016          sf_region_t *rgnp;
10028 10017  
10029 10018          ASSERT(srdp != NULL);
10030 10019  
10031 10020          for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
10032 10021                  if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
10033 10022                          continue;
10034 10023                  }
10035 10024  
10036 10025                  j = 0;
10037 10026                  while (w) {
10038 10027                          if (!(w & 0x1)) {
10039 10028                                  j++;
10040 10029                                  w >>= 1;
10041 10030                                  continue;
10042 10031                          }
10043 10032                          rid = (i << BT_ULSHIFT) | j;
10044 10033                          j++;
10045 10034                          w >>= 1;
10046 10035  
10047 10036                          ASSERT(SFMMU_IS_SHMERID_VALID(rid));
10048 10037                          ASSERT(rid < SFMMU_MAX_HME_REGIONS);
10049 10038                          rgnp = srdp->srd_hmergnp[rid];
10050 10039                          ASSERT(rgnp->rgn_refcnt > 0);
10051 10040                          ASSERT(rgnp->rgn_id == rid);
10052 10041  
10053 10042                          scdp->scd_rttecnt[rgnp->rgn_pgszc] +=
10054 10043                              rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc);
10055 10044  
10056 10045                          /*
10057 10046                           * Maintain the tsb0 inflation cnt for the regions
10058 10047                           * in the SCD.
10059 10048                           */
10060 10049                          if (rgnp->rgn_pgszc >= TTE4M) {
10061 10050                                  scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt +=
10062 10051                                      rgnp->rgn_size >>
10063 10052                                      (TTE_PAGE_SHIFT(TTE8K) + 2);
10064 10053                          }
10065 10054                  }
10066 10055          }
10067 10056  }
10068 10057  
10069 10058  /*
10070 10059   * This function assumes that there are either four or six supported page
10071 10060   * sizes and at most two programmable TLBs, so we need to decide which
10072 10061   * page sizes are most important and then tell the MMU layer so it
10073 10062   * can adjust the TLB page sizes accordingly (if supported).
10074 10063   *
10075 10064   * If these assumptions change, this function will need to be
10076 10065   * updated to support whatever the new limits are.
10077 10066   *
10078 10067   * The growing flag is nonzero if we are growing the address space,
10079 10068   * and zero if it is shrinking.  This allows us to decide whether
10080 10069   * to grow or shrink our TSB, depending upon available memory
10081 10070   * conditions.
10082 10071   */
10083 10072  static void
10084 10073  sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing)
10085 10074  {
10086 10075          uint64_t ttecnt[MMU_PAGE_SIZES];
10087 10076          uint64_t tte8k_cnt, tte4m_cnt;
10088 10077          uint8_t i;
10089 10078          int sectsb_thresh;
10090 10079  
10091 10080          /*
10092 10081           * Kernel threads, processes with small address spaces not using
10093 10082           * large pages, and dummy ISM HATs need not apply.
10094 10083           */
10095 10084          if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL)
10096 10085                  return;
10097 10086  
10098 10087          if (!SFMMU_LGPGS_INUSE(sfmmup) &&
10099 10088              sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor)
10100 10089                  return;
10101 10090  
10102 10091          for (i = 0; i < mmu_page_sizes; i++) {
10103 10092                  ttecnt[i] = sfmmup->sfmmu_ttecnt[i] +
10104 10093                      sfmmup->sfmmu_ismttecnt[i];
10105 10094          }
10106 10095  
10107 10096          /* Check pagesizes in use, and possibly reprogram DTLB. */
10108 10097          if (&mmu_check_page_sizes)
10109 10098                  mmu_check_page_sizes(sfmmup, ttecnt);
10110 10099  
10111 10100          /*
10112 10101           * Calculate the number of 8k ttes to represent the span of these
10113 10102           * pages.
10114 10103           */
10115 10104          tte8k_cnt = ttecnt[TTE8K] +
10116 10105              (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) +
10117 10106              (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT));
10118 10107          if (mmu_page_sizes == max_mmu_page_sizes) {
10119 10108                  tte4m_cnt = ttecnt[TTE4M] +
10120 10109                      (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) +
10121 10110                      (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M));
10122 10111          } else {
10123 10112                  tte4m_cnt = ttecnt[TTE4M];
10124 10113          }
10125 10114  
10126 10115          /*
10127 10116           * Inflate tte8k_cnt to allow for region large page allocation failure.
10128 10117           */
10129 10118          tte8k_cnt += sfmmup->sfmmu_tsb0_4minflcnt;
10130 10119  
10131 10120          /*
10132 10121           * Inflate TSB sizes by a factor of 2 if this process
10133 10122           * uses 4M text pages to minimize extra conflict misses
10134 10123           * in the first TSB since without counting text pages
10135 10124           * 8K TSB may become too small.
10136 10125           *
10137 10126           * Also double the size of the second TSB to minimize
10138 10127           * extra conflict misses due to competition between 4M text pages
10139 10128           * and data pages.
10140 10129           *
10141 10130           * We need to adjust the second TSB allocation threshold by the
10142 10131           * inflation factor, since there is no point in creating a second
10143 10132           * TSB when we know all the mappings can fit in the I/D TLBs.
10144 10133           */
10145 10134          sectsb_thresh = tsb_sectsb_threshold;
10146 10135          if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) {
10147 10136                  tte8k_cnt <<= 1;
10148 10137                  tte4m_cnt <<= 1;
10149 10138                  sectsb_thresh <<= 1;
10150 10139          }
10151 10140  
10152 10141          /*
10153 10142           * Check to see if our TSB is the right size; we may need to
10154 10143           * grow or shrink it.  If the process is small, our work is
10155 10144           * finished at this point.
10156 10145           */
10157 10146          if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) {
10158 10147                  return;
10159 10148          }
10160 10149          sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh);
10161 10150  }
10162 10151  
10163 10152  static void
10164 10153  sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt,
10165 10154          uint64_t tte4m_cnt, int sectsb_thresh)
10166 10155  {
10167 10156          int tsb_bits;
10168 10157          uint_t tsb_szc;
10169 10158          struct tsb_info *tsbinfop;
10170 10159          hatlock_t *hatlockp = NULL;
10171 10160  
10172 10161          hatlockp = sfmmu_hat_enter(sfmmup);
10173 10162          ASSERT(hatlockp != NULL);
10174 10163          tsbinfop = sfmmup->sfmmu_tsb;
10175 10164          ASSERT(tsbinfop != NULL);
10176 10165  
10177 10166          /*
10178 10167           * If we're growing, select the size based on RSS.  If we're
10179 10168           * shrinking, leave some room so we don't have to turn around and
10180 10169           * grow again immediately.
10181 10170           */
10182 10171          if (growing)
10183 10172                  tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt);
10184 10173          else
10185 10174                  tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1);
10186 10175  
10187 10176          if (!growing && (tsb_szc < tsbinfop->tsb_szc) &&
10188 10177              (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) {
10189 10178                  (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc,
10190 10179                      hatlockp, TSB_SHRINK);
10191 10180          } else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) {
10192 10181                  (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc,
10193 10182                      hatlockp, TSB_GROW);
10194 10183          }
10195 10184          tsbinfop = sfmmup->sfmmu_tsb;
10196 10185  
10197 10186          /*
10198 10187           * With the TLB and first TSB out of the way, we need to see if
10199 10188           * we need a second TSB for 4M pages.  If we managed to reprogram
10200 10189           * the TLB page sizes above, the process will start using this new
10201 10190           * TSB right away; otherwise, it will start using it on the next
10202 10191           * context switch.  Either way, it's no big deal so there's no
10203 10192           * synchronization with the trap handlers here unless we grow the
10204 10193           * TSB (in which case it's required to prevent using the old one
10205 10194           * after it's freed). Note: second tsb is required for 32M/256M
10206 10195           * page sizes.
10207 10196           */
10208 10197          if (tte4m_cnt > sectsb_thresh) {
10209 10198                  /*
10210 10199                   * If we're growing, select the size based on RSS.  If we're
10211 10200                   * shrinking, leave some room so we don't have to turn
10212 10201                   * around and grow again immediately.
10213 10202                   */
10214 10203                  if (growing)
10215 10204                          tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt);
10216 10205                  else
10217 10206                          tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1);
10218 10207                  if (tsbinfop->tsb_next == NULL) {
10219 10208                          struct tsb_info *newtsb;
10220 10209                          int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)?
10221 10210                              0 : TSB_ALLOC;
10222 10211  
10223 10212                          sfmmu_hat_exit(hatlockp);
10224 10213  
10225 10214                          /*
10226 10215                           * Try to allocate a TSB for 4[32|256]M pages.  If we
10227 10216                           * can't get the size we want, retry w/a minimum sized
10228 10217                           * TSB.  If that still didn't work, give up; we can
10229 10218                           * still run without one.
10230 10219                           */
10231 10220                          tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)?
10232 10221                              TSB4M|TSB32M|TSB256M:TSB4M;
10233 10222                          if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits,
10234 10223                              allocflags, sfmmup)) &&
10235 10224                              (tsb_szc <= TSB_4M_SZCODE ||
10236 10225                              sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE,
10237 10226                              tsb_bits, allocflags, sfmmup)) &&
10238 10227                              sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE,
10239 10228                              tsb_bits, allocflags, sfmmup)) {
10240 10229                                  return;
10241 10230                          }
10242 10231  
10243 10232                          hatlockp = sfmmu_hat_enter(sfmmup);
10244 10233  
10245 10234                          sfmmu_invalidate_ctx(sfmmup);
10246 10235  
10247 10236                          if (sfmmup->sfmmu_tsb->tsb_next == NULL) {
10248 10237                                  sfmmup->sfmmu_tsb->tsb_next = newtsb;
10249 10238                                  SFMMU_STAT(sf_tsb_sectsb_create);
10250 10239                                  sfmmu_hat_exit(hatlockp);
10251 10240                                  return;
10252 10241                          } else {
10253 10242                                  /*
10254 10243                                   * It's annoying, but possible for us
10255 10244                                   * to get here.. we dropped the HAT lock
10256 10245                                   * because of locking order in the kmem
10257 10246                                   * allocator, and while we were off getting
10258 10247                                   * our memory, some other thread decided to
10259 10248                                   * do us a favor and won the race to get a
10260 10249                                   * second TSB for this process.  Sigh.
10261 10250                                   */
10262 10251                                  sfmmu_hat_exit(hatlockp);
10263 10252                                  sfmmu_tsbinfo_free(newtsb);
10264 10253                                  return;
10265 10254                          }
10266 10255                  }
10267 10256  
10268 10257                  /*
10269 10258                   * We have a second TSB, see if it's big enough.
10270 10259                   */
10271 10260                  tsbinfop = tsbinfop->tsb_next;
10272 10261  
10273 10262                  /*
10274 10263                   * Check to see if our second TSB is the right size;
10275 10264                   * we may need to grow or shrink it.
10276 10265                   * To prevent thrashing (e.g. growing the TSB on a
10277 10266                   * subsequent map operation), only try to shrink if
10278 10267                   * the TSB reach exceeds twice the virtual address
10279 10268                   * space size.
10280 10269                   */
10281 10270                  if (!growing && (tsb_szc < tsbinfop->tsb_szc) &&
10282 10271                      (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) {
10283 10272                          (void) sfmmu_replace_tsb(sfmmup, tsbinfop,
10284 10273                              tsb_szc, hatlockp, TSB_SHRINK);
10285 10274                  } else if (growing && tsb_szc > tsbinfop->tsb_szc &&
10286 10275                      TSB_OK_GROW()) {
10287 10276                          (void) sfmmu_replace_tsb(sfmmup, tsbinfop,
10288 10277                              tsb_szc, hatlockp, TSB_GROW);
10289 10278                  }
10290 10279          }
10291 10280  
10292 10281          sfmmu_hat_exit(hatlockp);
10293 10282  }
10294 10283  
10295 10284  /*
10296 10285   * Free up a sfmmu
10297 10286   * Since the sfmmu is currently embedded in the hat struct we simply zero
10298 10287   * out our fields and free up the ism map blk list if any.
10299 10288   */
10300 10289  static void
10301 10290  sfmmu_free_sfmmu(sfmmu_t *sfmmup)
10302 10291  {
10303 10292          ism_blk_t       *blkp, *nx_blkp;
10304 10293  #ifdef  DEBUG
10305 10294          ism_map_t       *map;
10306 10295          int             i;
10307 10296  #endif
10308 10297  
10309 10298          ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
10310 10299          ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
10311 10300          ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
10312 10301          ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
10313 10302          ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
10314 10303          ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
10315 10304          ASSERT(SF_RGNMAP_ISNULL(sfmmup));
10316 10305  
10317 10306          sfmmup->sfmmu_free = 0;
10318 10307          sfmmup->sfmmu_ismhat = 0;
10319 10308  
10320 10309          blkp = sfmmup->sfmmu_iblk;
10321 10310          sfmmup->sfmmu_iblk = NULL;
10322 10311  
10323 10312          while (blkp) {
10324 10313  #ifdef  DEBUG
10325 10314                  map = blkp->iblk_maps;
10326 10315                  for (i = 0; i < ISM_MAP_SLOTS; i++) {
10327 10316                          ASSERT(map[i].imap_seg == 0);
10328 10317                          ASSERT(map[i].imap_ismhat == NULL);
10329 10318                          ASSERT(map[i].imap_ment == NULL);
10330 10319                  }
10331 10320  #endif
10332 10321                  nx_blkp = blkp->iblk_next;
10333 10322                  blkp->iblk_next = NULL;
10334 10323                  blkp->iblk_nextpa = (uint64_t)-1;
10335 10324                  kmem_cache_free(ism_blk_cache, blkp);
10336 10325                  blkp = nx_blkp;
10337 10326          }
10338 10327  }
10339 10328  
10340 10329  /*
10341 10330   * Locking primitves accessed by HATLOCK macros
10342 10331   */
10343 10332  
10344 10333  #define SFMMU_SPL_MTX   (0x0)
10345 10334  #define SFMMU_ML_MTX    (0x1)
10346 10335  
10347 10336  #define SFMMU_MLSPL_MTX(type, pg)       (((type) == SFMMU_SPL_MTX) ? \
10348 10337                                              SPL_HASH(pg) : MLIST_HASH(pg))
10349 10338  
10350 10339  kmutex_t *
10351 10340  sfmmu_page_enter(struct page *pp)
10352 10341  {
10353 10342          return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX));
10354 10343  }
10355 10344  
10356 10345  void
10357 10346  sfmmu_page_exit(kmutex_t *spl)
10358 10347  {
10359 10348          mutex_exit(spl);
10360 10349  }
10361 10350  
10362 10351  int
10363 10352  sfmmu_page_spl_held(struct page *pp)
10364 10353  {
10365 10354          return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX));
10366 10355  }
10367 10356  
10368 10357  kmutex_t *
10369 10358  sfmmu_mlist_enter(struct page *pp)
10370 10359  {
10371 10360          return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX));
10372 10361  }
10373 10362  
10374 10363  void
10375 10364  sfmmu_mlist_exit(kmutex_t *mml)
10376 10365  {
10377 10366          mutex_exit(mml);
10378 10367  }
10379 10368  
10380 10369  int
10381 10370  sfmmu_mlist_held(struct page *pp)
10382 10371  {
10383 10372  
10384 10373          return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX));
10385 10374  }
10386 10375  
10387 10376  /*
10388 10377   * Common code for sfmmu_mlist_enter() and sfmmu_page_enter().  For
10389 10378   * sfmmu_mlist_enter() case mml_table lock array is used and for
10390 10379   * sfmmu_page_enter() sfmmu_page_lock lock array is used.
10391 10380   *
10392 10381   * The lock is taken on a root page so that it protects an operation on all
10393 10382   * constituent pages of a large page pp belongs to.
10394 10383   *
10395 10384   * The routine takes a lock from the appropriate array. The lock is determined
10396 10385   * by hashing the root page. After taking the lock this routine checks if the
10397 10386   * root page has the same size code that was used to determine the root (i.e
10398 10387   * that root hasn't changed).  If root page has the expected p_szc field we
10399 10388   * have the right lock and it's returned to the caller. If root's p_szc
10400 10389   * decreased we release the lock and retry from the beginning.  This case can
10401 10390   * happen due to hat_page_demote() decreasing p_szc between our load of p_szc
10402 10391   * value and taking the lock. The number of retries due to p_szc decrease is
10403 10392   * limited by the maximum p_szc value. If p_szc is 0 we return the lock
10404 10393   * determined by hashing pp itself.
10405 10394   *
10406 10395   * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also
10407 10396   * possible that p_szc can increase. To increase p_szc a thread has to lock
10408 10397   * all constituent pages EXCL and do hat_pageunload() on all of them. All the
10409 10398   * callers that don't hold a page locked recheck if hmeblk through which pp
10410 10399   * was found still maps this pp.  If it doesn't map it anymore returned lock
10411 10400   * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of
10412 10401   * p_szc increase after taking the lock it returns this lock without further
10413 10402   * retries because in this case the caller doesn't care about which lock was
10414 10403   * taken. The caller will drop it right away.
10415 10404   *
10416 10405   * After the routine returns it's guaranteed that hat_page_demote() can't
10417 10406   * change p_szc field of any of constituent pages of a large page pp belongs
10418 10407   * to as long as pp was either locked at least SHARED prior to this call or
10419 10408   * the caller finds that hment that pointed to this pp still references this
10420 10409   * pp (this also assumes that the caller holds hme hash bucket lock so that
10421 10410   * the same pp can't be remapped into the same hmeblk after it was unmapped by
10422 10411   * hat_pageunload()).
10423 10412   */
10424 10413  static kmutex_t *
10425 10414  sfmmu_mlspl_enter(struct page *pp, int type)
10426 10415  {
10427 10416          kmutex_t        *mtx;
10428 10417          uint_t          prev_rszc = UINT_MAX;
10429 10418          page_t          *rootpp;
10430 10419          uint_t          szc;
10431 10420          uint_t          rszc;
10432 10421          uint_t          pszc = pp->p_szc;
10433 10422  
10434 10423          ASSERT(pp != NULL);
10435 10424  
10436 10425  again:
10437 10426          if (pszc == 0) {
10438 10427                  mtx = SFMMU_MLSPL_MTX(type, pp);
10439 10428                  mutex_enter(mtx);
10440 10429                  return (mtx);
10441 10430          }
10442 10431  
10443 10432          /* The lock lives in the root page */
10444 10433          rootpp = PP_GROUPLEADER(pp, pszc);
10445 10434          mtx = SFMMU_MLSPL_MTX(type, rootpp);
10446 10435          mutex_enter(mtx);
10447 10436  
10448 10437          /*
10449 10438           * Return mml in the following 3 cases:
10450 10439           *
10451 10440           * 1) If pp itself is root since if its p_szc decreased before we took
10452 10441           * the lock pp is still the root of smaller szc page. And if its p_szc
10453 10442           * increased it doesn't matter what lock we return (see comment in
10454 10443           * front of this routine).
10455 10444           *
10456 10445           * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size
10457 10446           * large page we have the right lock since any previous potential
10458 10447           * hat_page_demote() is done demoting from greater than current root's
10459 10448           * p_szc because hat_page_demote() changes root's p_szc last. No
10460 10449           * further hat_page_demote() can start or be in progress since it
10461 10450           * would need the same lock we currently hold.
10462 10451           *
10463 10452           * 3) If rootpp's p_szc increased since previous iteration it doesn't
10464 10453           * matter what lock we return (see comment in front of this routine).
10465 10454           */
10466 10455          if (pp == rootpp || (rszc = rootpp->p_szc) == pszc ||
10467 10456              rszc >= prev_rszc) {
10468 10457                  return (mtx);
10469 10458          }
10470 10459  
10471 10460          /*
10472 10461           * hat_page_demote() could have decreased root's p_szc.
10473 10462           * In this case pp's p_szc must also be smaller than pszc.
10474 10463           * Retry.
10475 10464           */
10476 10465          if (rszc < pszc) {
10477 10466                  szc = pp->p_szc;
10478 10467                  if (szc < pszc) {
10479 10468                          mutex_exit(mtx);
10480 10469                          pszc = szc;
10481 10470                          goto again;
10482 10471                  }
10483 10472                  /*
10484 10473                   * pp's p_szc increased after it was decreased.
10485 10474                   * page cannot be mapped. Return current lock. The caller
10486 10475                   * will drop it right away.
10487 10476                   */
10488 10477                  return (mtx);
10489 10478          }
10490 10479  
10491 10480          /*
10492 10481           * root's p_szc is greater than pp's p_szc.
10493 10482           * hat_page_demote() is not done with all pages
10494 10483           * yet. Wait for it to complete.
10495 10484           */
10496 10485          mutex_exit(mtx);
10497 10486          rootpp = PP_GROUPLEADER(rootpp, rszc);
10498 10487          mtx = SFMMU_MLSPL_MTX(type, rootpp);
10499 10488          mutex_enter(mtx);
10500 10489          mutex_exit(mtx);
10501 10490          prev_rszc = rszc;
10502 10491          goto again;
10503 10492  }
10504 10493  
10505 10494  static int
10506 10495  sfmmu_mlspl_held(struct page *pp, int type)
10507 10496  {
10508 10497          kmutex_t        *mtx;
10509 10498  
10510 10499          ASSERT(pp != NULL);
10511 10500          /* The lock lives in the root page */
10512 10501          pp = PP_PAGEROOT(pp);
10513 10502          ASSERT(pp != NULL);
10514 10503  
10515 10504          mtx = SFMMU_MLSPL_MTX(type, pp);
10516 10505          return (MUTEX_HELD(mtx));
10517 10506  }
10518 10507  
10519 10508  static uint_t
10520 10509  sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical)
10521 10510  {
10522 10511          struct  hme_blk *hblkp;
10523 10512  
10524 10513  
10525 10514          if (freehblkp != NULL) {
10526 10515                  mutex_enter(&freehblkp_lock);
10527 10516                  if (freehblkp != NULL) {
10528 10517                          /*
10529 10518                           * If the current thread is owning hblk_reserve OR
10530 10519                           * critical request from sfmmu_hblk_steal()
10531 10520                           * let it succeed even if freehblkcnt is really low.
10532 10521                           */
10533 10522                          if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) {
10534 10523                                  SFMMU_STAT(sf_get_free_throttle);
10535 10524                                  mutex_exit(&freehblkp_lock);
10536 10525                                  return (0);
10537 10526                          }
10538 10527                          freehblkcnt--;
10539 10528                          *hmeblkpp = freehblkp;
10540 10529                          hblkp = *hmeblkpp;
10541 10530                          freehblkp = hblkp->hblk_next;
10542 10531                          mutex_exit(&freehblkp_lock);
10543 10532                          hblkp->hblk_next = NULL;
10544 10533                          SFMMU_STAT(sf_get_free_success);
10545 10534  
10546 10535                          ASSERT(hblkp->hblk_hmecnt == 0);
10547 10536                          ASSERT(hblkp->hblk_vcnt == 0);
10548 10537                          ASSERT(hblkp->hblk_nextpa == va_to_pa((caddr_t)hblkp));
10549 10538  
10550 10539                          return (1);
10551 10540                  }
10552 10541                  mutex_exit(&freehblkp_lock);
10553 10542          }
10554 10543  
10555 10544          /* Check cpu hblk pending queues */
10556 10545          if ((*hmeblkpp = sfmmu_check_pending_hblks(TTE8K)) != NULL) {
10557 10546                  hblkp = *hmeblkpp;
10558 10547                  hblkp->hblk_next = NULL;
10559 10548                  hblkp->hblk_nextpa = va_to_pa((caddr_t)hblkp);
10560 10549  
10561 10550                  ASSERT(hblkp->hblk_hmecnt == 0);
10562 10551                  ASSERT(hblkp->hblk_vcnt == 0);
10563 10552  
10564 10553                  return (1);
10565 10554          }
10566 10555  
10567 10556          SFMMU_STAT(sf_get_free_fail);
10568 10557          return (0);
10569 10558  }
10570 10559  
10571 10560  static uint_t
10572 10561  sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical)
10573 10562  {
10574 10563          struct  hme_blk *hblkp;
10575 10564  
10576 10565          ASSERT(hmeblkp->hblk_hmecnt == 0);
10577 10566          ASSERT(hmeblkp->hblk_vcnt == 0);
10578 10567          ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
10579 10568  
10580 10569          /*
10581 10570           * If the current thread is mapping into kernel space,
10582 10571           * let it succede even if freehblkcnt is max
10583 10572           * so that it will avoid freeing it to kmem.
10584 10573           * This will prevent stack overflow due to
10585 10574           * possible recursion since kmem_cache_free()
10586 10575           * might require creation of a slab which
10587 10576           * in turn needs an hmeblk to map that slab;
10588 10577           * let's break this vicious chain at the first
10589 10578           * opportunity.
10590 10579           */
10591 10580          if (freehblkcnt < HBLK_RESERVE_CNT || critical) {
10592 10581                  mutex_enter(&freehblkp_lock);
10593 10582                  if (freehblkcnt < HBLK_RESERVE_CNT || critical) {
10594 10583                          SFMMU_STAT(sf_put_free_success);
10595 10584                          freehblkcnt++;
10596 10585                          hmeblkp->hblk_next = freehblkp;
10597 10586                          freehblkp = hmeblkp;
10598 10587                          mutex_exit(&freehblkp_lock);
10599 10588                          return (1);
10600 10589                  }
10601 10590                  mutex_exit(&freehblkp_lock);
10602 10591          }
10603 10592  
10604 10593          /*
10605 10594           * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here
10606 10595           * only if freehblkcnt is at least HBLK_RESERVE_CNT *and*
10607 10596           * we are not in the process of mapping into kernel space.
10608 10597           */
10609 10598          ASSERT(!critical);
10610 10599          while (freehblkcnt > HBLK_RESERVE_CNT) {
10611 10600                  mutex_enter(&freehblkp_lock);
10612 10601                  if (freehblkcnt > HBLK_RESERVE_CNT) {
10613 10602                          freehblkcnt--;
10614 10603                          hblkp = freehblkp;
10615 10604                          freehblkp = hblkp->hblk_next;
10616 10605                          mutex_exit(&freehblkp_lock);
10617 10606                          ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache);
10618 10607                          kmem_cache_free(sfmmu8_cache, hblkp);
10619 10608                          continue;
10620 10609                  }
10621 10610                  mutex_exit(&freehblkp_lock);
10622 10611          }
10623 10612          SFMMU_STAT(sf_put_free_fail);
10624 10613          return (0);
10625 10614  }
10626 10615  
10627 10616  static void
10628 10617  sfmmu_hblk_swap(struct hme_blk *new)
10629 10618  {
10630 10619          struct hme_blk *old, *hblkp, *prev;
10631 10620          uint64_t newpa;
10632 10621          caddr_t base, vaddr, endaddr;
10633 10622          struct hmehash_bucket *hmebp;
10634 10623          struct sf_hment *osfhme, *nsfhme;
10635 10624          page_t *pp;
10636 10625          kmutex_t *pml;
10637 10626          tte_t tte;
10638 10627          struct hme_blk *list = NULL;
10639 10628  
10640 10629  #ifdef  DEBUG
10641 10630          hmeblk_tag              hblktag;
10642 10631          struct hme_blk          *found;
10643 10632  #endif
10644 10633          old = HBLK_RESERVE;
10645 10634          ASSERT(!old->hblk_shared);
10646 10635  
10647 10636          /*
10648 10637           * save pa before bcopy clobbers it
10649 10638           */
10650 10639          newpa = new->hblk_nextpa;
10651 10640  
10652 10641          base = (caddr_t)get_hblk_base(old);
10653 10642          endaddr = base + get_hblk_span(old);
10654 10643  
10655 10644          /*
10656 10645           * acquire hash bucket lock.
10657 10646           */
10658 10647          hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K,
10659 10648              SFMMU_INVALID_SHMERID);
10660 10649  
10661 10650          /*
10662 10651           * copy contents from old to new
10663 10652           */
10664 10653          bcopy((void *)old, (void *)new, HME8BLK_SZ);
10665 10654  
10666 10655          /*
10667 10656           * add new to hash chain
10668 10657           */
10669 10658          sfmmu_hblk_hash_add(hmebp, new, newpa);
10670 10659  
10671 10660          /*
10672 10661           * search hash chain for hblk_reserve; this needs to be performed
10673 10662           * after adding new, otherwise prev won't correspond to the hblk which
10674 10663           * is prior to old in hash chain when we call sfmmu_hblk_hash_rm to
10675 10664           * remove old later.
10676 10665           */
10677 10666          for (prev = NULL,
10678 10667              hblkp = hmebp->hmeblkp; hblkp != NULL && hblkp != old;
10679 10668              prev = hblkp, hblkp = hblkp->hblk_next)
10680 10669                  ;
10681 10670  
10682 10671          if (hblkp != old)
10683 10672                  panic("sfmmu_hblk_swap: hblk_reserve not found");
10684 10673  
10685 10674          /*
10686 10675           * p_mapping list is still pointing to hments in hblk_reserve;
10687 10676           * fix up p_mapping list so that they point to hments in new.
10688 10677           *
10689 10678           * Since all these mappings are created by hblk_reserve_thread
10690 10679           * on the way and it's using at least one of the buffers from each of
10691 10680           * the newly minted slabs, there is no danger of any of these
10692 10681           * mappings getting unloaded by another thread.
10693 10682           *
10694 10683           * tsbmiss could only modify ref/mod bits of hments in old/new.
10695 10684           * Since all of these hments hold mappings established by segkmem
10696 10685           * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits
10697 10686           * have no meaning for the mappings in hblk_reserve.  hments in
10698 10687           * old and new are identical except for ref/mod bits.
10699 10688           */
10700 10689          for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) {
10701 10690  
10702 10691                  HBLKTOHME(osfhme, old, vaddr);
10703 10692                  sfmmu_copytte(&osfhme->hme_tte, &tte);
10704 10693  
10705 10694                  if (TTE_IS_VALID(&tte)) {
10706 10695                          if ((pp = osfhme->hme_page) == NULL)
10707 10696                                  panic("sfmmu_hblk_swap: page not mapped");
10708 10697  
10709 10698                          pml = sfmmu_mlist_enter(pp);
10710 10699  
10711 10700                          if (pp != osfhme->hme_page)
10712 10701                                  panic("sfmmu_hblk_swap: mapping changed");
10713 10702  
10714 10703                          HBLKTOHME(nsfhme, new, vaddr);
10715 10704  
10716 10705                          HME_ADD(nsfhme, pp);
10717 10706                          HME_SUB(osfhme, pp);
10718 10707  
10719 10708                          sfmmu_mlist_exit(pml);
10720 10709                  }
10721 10710          }
10722 10711  
10723 10712          /*
10724 10713           * remove old from hash chain
10725 10714           */
10726 10715          sfmmu_hblk_hash_rm(hmebp, old, prev, &list, 1);
10727 10716  
10728 10717  #ifdef  DEBUG
10729 10718  
10730 10719          hblktag.htag_id = ksfmmup;
10731 10720          hblktag.htag_rid = SFMMU_INVALID_SHMERID;
10732 10721          hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K));
10733 10722          hblktag.htag_rehash = HME_HASH_REHASH(TTE8K);
10734 10723          HME_HASH_FAST_SEARCH(hmebp, hblktag, found);
10735 10724  
10736 10725          if (found != new)
10737 10726                  panic("sfmmu_hblk_swap: new hblk not found");
10738 10727  #endif
10739 10728  
10740 10729          SFMMU_HASH_UNLOCK(hmebp);
10741 10730  
10742 10731          /*
10743 10732           * Reset hblk_reserve
10744 10733           */
10745 10734          bzero((void *)old, HME8BLK_SZ);
10746 10735          old->hblk_nextpa = va_to_pa((caddr_t)old);
10747 10736  }
10748 10737  
10749 10738  /*
10750 10739   * Grab the mlist mutex for both pages passed in.
10751 10740   *
10752 10741   * low and high will be returned as pointers to the mutexes for these pages.
10753 10742   * low refers to the mutex residing in the lower bin of the mlist hash, while
10754 10743   * high refers to the mutex residing in the higher bin of the mlist hash.  This
10755 10744   * is due to the locking order restrictions on the same thread grabbing
10756 10745   * multiple mlist mutexes.  The low lock must be acquired before the high lock.
10757 10746   *
10758 10747   * If both pages hash to the same mutex, only grab that single mutex, and
10759 10748   * high will be returned as NULL
10760 10749   * If the pages hash to different bins in the hash, grab the lower addressed
10761 10750   * lock first and then the higher addressed lock in order to follow the locking
10762 10751   * rules involved with the same thread grabbing multiple mlist mutexes.
10763 10752   * low and high will both have non-NULL values.
10764 10753   */
10765 10754  static void
10766 10755  sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl,
10767 10756      kmutex_t **low, kmutex_t **high)
10768 10757  {
10769 10758          kmutex_t        *mml_targ, *mml_repl;
10770 10759  
10771 10760          /*
10772 10761           * no need to do the dance around szc as in sfmmu_mlist_enter()
10773 10762           * because this routine is only called by hat_page_relocate() and all
10774 10763           * targ and repl pages are already locked EXCL so szc can't change.
10775 10764           */
10776 10765  
10777 10766          mml_targ = MLIST_HASH(PP_PAGEROOT(targ));
10778 10767          mml_repl = MLIST_HASH(PP_PAGEROOT(repl));
10779 10768  
10780 10769          if (mml_targ == mml_repl) {
10781 10770                  *low = mml_targ;
10782 10771                  *high = NULL;
10783 10772          } else {
10784 10773                  if (mml_targ < mml_repl) {
10785 10774                          *low = mml_targ;
10786 10775                          *high = mml_repl;
10787 10776                  } else {
10788 10777                          *low = mml_repl;
10789 10778                          *high = mml_targ;
10790 10779                  }
10791 10780          }
10792 10781  
10793 10782          mutex_enter(*low);
10794 10783          if (*high)
10795 10784                  mutex_enter(*high);
10796 10785  }
10797 10786  
10798 10787  static void
10799 10788  sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high)
10800 10789  {
10801 10790          if (high)
10802 10791                  mutex_exit(high);
10803 10792          mutex_exit(low);
10804 10793  }
10805 10794  
10806 10795  static hatlock_t *
10807 10796  sfmmu_hat_enter(sfmmu_t *sfmmup)
10808 10797  {
10809 10798          hatlock_t       *hatlockp;
10810 10799  
10811 10800          if (sfmmup != ksfmmup) {
10812 10801                  hatlockp = TSB_HASH(sfmmup);
10813 10802                  mutex_enter(HATLOCK_MUTEXP(hatlockp));
10814 10803                  return (hatlockp);
10815 10804          }
10816 10805          return (NULL);
10817 10806  }
10818 10807  
10819 10808  static hatlock_t *
10820 10809  sfmmu_hat_tryenter(sfmmu_t *sfmmup)
10821 10810  {
10822 10811          hatlock_t       *hatlockp;
10823 10812  
10824 10813          if (sfmmup != ksfmmup) {
10825 10814                  hatlockp = TSB_HASH(sfmmup);
10826 10815                  if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0)
10827 10816                          return (NULL);
10828 10817                  return (hatlockp);
10829 10818          }
10830 10819          return (NULL);
10831 10820  }
10832 10821  
10833 10822  static void
10834 10823  sfmmu_hat_exit(hatlock_t *hatlockp)
10835 10824  {
10836 10825          if (hatlockp != NULL)
10837 10826                  mutex_exit(HATLOCK_MUTEXP(hatlockp));
10838 10827  }
10839 10828  
10840 10829  static void
10841 10830  sfmmu_hat_lock_all(void)
10842 10831  {
10843 10832          int i;
10844 10833          for (i = 0; i < SFMMU_NUM_LOCK; i++)
10845 10834                  mutex_enter(HATLOCK_MUTEXP(&hat_lock[i]));
10846 10835  }
10847 10836  
10848 10837  static void
10849 10838  sfmmu_hat_unlock_all(void)
10850 10839  {
10851 10840          int i;
10852 10841          for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--)
10853 10842                  mutex_exit(HATLOCK_MUTEXP(&hat_lock[i]));
10854 10843  }
10855 10844  
10856 10845  int
10857 10846  sfmmu_hat_lock_held(sfmmu_t *sfmmup)
10858 10847  {
10859 10848          ASSERT(sfmmup != ksfmmup);
10860 10849          return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup))));
10861 10850  }
10862 10851  
10863 10852  /*
10864 10853   * Locking primitives to provide consistency between ISM unmap
10865 10854   * and other operations.  Since ISM unmap can take a long time, we
10866 10855   * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating
10867 10856   * contention on the hatlock buckets while ISM segments are being
10868 10857   * unmapped.  The tradeoff is that the flags don't prevent priority
10869 10858   * inversion from occurring, so we must request kernel priority in
10870 10859   * case we have to sleep to keep from getting buried while holding
10871 10860   * the HAT_ISMBUSY flag set, which in turn could block other kernel
10872 10861   * threads from running (for example, in sfmmu_uvatopfn()).
10873 10862   */
10874 10863  static void
10875 10864  sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held)
10876 10865  {
10877 10866          hatlock_t *hatlockp;
10878 10867  
10879 10868          THREAD_KPRI_REQUEST();
10880 10869          if (!hatlock_held)
10881 10870                  hatlockp = sfmmu_hat_enter(sfmmup);
10882 10871          while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY))
10883 10872                  cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
10884 10873          SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
10885 10874          if (!hatlock_held)
10886 10875                  sfmmu_hat_exit(hatlockp);
10887 10876  }
10888 10877  
10889 10878  static void
10890 10879  sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held)
10891 10880  {
10892 10881          hatlock_t *hatlockp;
10893 10882  
10894 10883          if (!hatlock_held)
10895 10884                  hatlockp = sfmmu_hat_enter(sfmmup);
10896 10885          ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
10897 10886          SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
10898 10887          cv_broadcast(&sfmmup->sfmmu_tsb_cv);
10899 10888          if (!hatlock_held)
10900 10889                  sfmmu_hat_exit(hatlockp);
10901 10890          THREAD_KPRI_RELEASE();
10902 10891  }
10903 10892  
10904 10893  /*
10905 10894   *
10906 10895   * Algorithm:
10907 10896   *
10908 10897   * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed
10909 10898   *      hblks.
10910 10899   *
10911 10900   * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache,
10912 10901   *
10913 10902   *              (a) try to return an hblk from reserve pool of free hblks;
10914 10903   *              (b) if the reserve pool is empty, acquire hblk_reserve_lock
10915 10904   *                  and return hblk_reserve.
10916 10905   *
10917 10906   * (3) call kmem_cache_alloc() to allocate hblk;
10918 10907   *
10919 10908   *              (a) if hblk_reserve_lock is held by the current thread,
10920 10909   *                  atomically replace hblk_reserve by the hblk that is
10921 10910   *                  returned by kmem_cache_alloc; release hblk_reserve_lock
10922 10911   *                  and call kmem_cache_alloc() again.
10923 10912   *              (b) if reserve pool is not full, add the hblk that is
10924 10913   *                  returned by kmem_cache_alloc to reserve pool and
10925 10914   *                  call kmem_cache_alloc again.
10926 10915   *
10927 10916   */
10928 10917  static struct hme_blk *
10929 10918  sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr,
10930 10919          struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag,
10931 10920          uint_t flags, uint_t rid)
10932 10921  {
10933 10922          struct hme_blk *hmeblkp = NULL;
10934 10923          struct hme_blk *newhblkp;
10935 10924          struct hme_blk *shw_hblkp = NULL;
10936 10925          struct kmem_cache *sfmmu_cache = NULL;
10937 10926          uint64_t hblkpa;
10938 10927          ulong_t index;
10939 10928          uint_t owner;           /* set to 1 if using hblk_reserve */
10940 10929          uint_t forcefree;
10941 10930          int sleep;
10942 10931          sf_srd_t *srdp;
10943 10932          sf_region_t *rgnp;
10944 10933  
10945 10934          ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
10946 10935          ASSERT(hblktag.htag_rid == rid);
10947 10936          SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
10948 10937          ASSERT(!SFMMU_IS_SHMERID_VALID(rid) ||
10949 10938              IS_P2ALIGNED(vaddr, TTEBYTES(size)));
10950 10939  
10951 10940          /*
10952 10941           * If segkmem is not created yet, allocate from static hmeblks
10953 10942           * created at the end of startup_modules().  See the block comment
10954 10943           * in startup_modules() describing how we estimate the number of
10955 10944           * static hmeblks that will be needed during re-map.
10956 10945           */
10957 10946          if (!hblk_alloc_dynamic) {
10958 10947  
10959 10948                  ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
10960 10949  
10961 10950                  if (size == TTE8K) {
10962 10951                          index = nucleus_hblk8.index;
10963 10952                          if (index >= nucleus_hblk8.len) {
10964 10953                                  /*
10965 10954                                   * If we panic here, see startup_modules() to
10966 10955                                   * make sure that we are calculating the
10967 10956                                   * number of hblk8's that we need correctly.
10968 10957                                   */
10969 10958                                  prom_panic("no nucleus hblk8 to allocate");
10970 10959                          }
10971 10960                          hmeblkp =
10972 10961                              (struct hme_blk *)&nucleus_hblk8.list[index];
10973 10962                          nucleus_hblk8.index++;
10974 10963                          SFMMU_STAT(sf_hblk8_nalloc);
10975 10964                  } else {
10976 10965                          index = nucleus_hblk1.index;
10977 10966                          if (nucleus_hblk1.index >= nucleus_hblk1.len) {
10978 10967                                  /*
10979 10968                                   * If we panic here, see startup_modules().
10980 10969                                   * Most likely you need to update the
10981 10970                                   * calculation of the number of hblk1 elements
10982 10971                                   * that the kernel needs to boot.
10983 10972                                   */
10984 10973                                  prom_panic("no nucleus hblk1 to allocate");
10985 10974                          }
10986 10975                          hmeblkp =
10987 10976                              (struct hme_blk *)&nucleus_hblk1.list[index];
10988 10977                          nucleus_hblk1.index++;
10989 10978                          SFMMU_STAT(sf_hblk1_nalloc);
10990 10979                  }
10991 10980  
10992 10981                  goto hblk_init;
10993 10982          }
10994 10983  
10995 10984          SFMMU_HASH_UNLOCK(hmebp);
10996 10985  
10997 10986          if (sfmmup != KHATID && !SFMMU_IS_SHMERID_VALID(rid)) {
10998 10987                  if (mmu_page_sizes == max_mmu_page_sizes) {
10999 10988                          if (size < TTE256M)
11000 10989                                  shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr,
11001 10990                                      size, flags);
11002 10991                  } else {
11003 10992                          if (size < TTE4M)
11004 10993                                  shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr,
11005 10994                                      size, flags);
11006 10995                  }
11007 10996          } else if (SFMMU_IS_SHMERID_VALID(rid)) {
11008 10997                  /*
11009 10998                   * Shared hmes use per region bitmaps in rgn_hmeflag
11010 10999                   * rather than shadow hmeblks to keep track of the
11011 11000                   * mapping sizes which have been allocated for the region.
11012 11001                   * Here we cleanup old invalid hmeblks with this rid,
11013 11002                   * which may be left around by pageunload().
11014 11003                   */
11015 11004                  int ttesz;
11016 11005                  caddr_t va;
11017 11006                  caddr_t eva = vaddr + TTEBYTES(size);
11018 11007  
11019 11008                  ASSERT(sfmmup != KHATID);
11020 11009  
11021 11010                  srdp = sfmmup->sfmmu_srdp;
11022 11011                  ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
11023 11012                  rgnp = srdp->srd_hmergnp[rid];
11024 11013                  ASSERT(rgnp != NULL && rgnp->rgn_id == rid);
11025 11014                  ASSERT(rgnp->rgn_refcnt != 0);
11026 11015                  ASSERT(size <= rgnp->rgn_pgszc);
11027 11016  
11028 11017                  ttesz = HBLK_MIN_TTESZ;
11029 11018                  do {
11030 11019                          if (!(rgnp->rgn_hmeflags & (0x1 << ttesz))) {
11031 11020                                  continue;
11032 11021                          }
11033 11022  
11034 11023                          if (ttesz > size && ttesz != HBLK_MIN_TTESZ) {
11035 11024                                  sfmmu_cleanup_rhblk(srdp, vaddr, rid, ttesz);
11036 11025                          } else if (ttesz < size) {
11037 11026                                  for (va = vaddr; va < eva;
11038 11027                                      va += TTEBYTES(ttesz)) {
11039 11028                                          sfmmu_cleanup_rhblk(srdp, va, rid,
11040 11029                                              ttesz);
11041 11030                                  }
11042 11031                          }
11043 11032                  } while (++ttesz <= rgnp->rgn_pgszc);
11044 11033          }
11045 11034  
11046 11035  fill_hblk:
11047 11036          owner = (hblk_reserve_thread == curthread) ? 1 : 0;
11048 11037  
11049 11038          if (owner && size == TTE8K) {
11050 11039  
11051 11040                  ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
11052 11041                  /*
11053 11042                   * We are really in a tight spot. We already own
11054 11043                   * hblk_reserve and we need another hblk.  In anticipation
11055 11044                   * of this kind of scenario, we specifically set aside
11056 11045                   * HBLK_RESERVE_MIN number of hblks to be used exclusively
11057 11046                   * by owner of hblk_reserve.
11058 11047                   */
11059 11048                  SFMMU_STAT(sf_hblk_recurse_cnt);
11060 11049  
11061 11050                  if (!sfmmu_get_free_hblk(&hmeblkp, 1))
11062 11051                          panic("sfmmu_hblk_alloc: reserve list is empty");
11063 11052  
11064 11053                  goto hblk_verify;
11065 11054          }
11066 11055  
11067 11056          ASSERT(!owner);
11068 11057  
11069 11058          if ((flags & HAT_NO_KALLOC) == 0) {
11070 11059  
11071 11060                  sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache);
11072 11061                  sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP);
11073 11062  
11074 11063                  if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) {
11075 11064                          hmeblkp = sfmmu_hblk_steal(size);
11076 11065                  } else {
11077 11066                          /*
11078 11067                           * if we are the owner of hblk_reserve,
11079 11068                           * swap hblk_reserve with hmeblkp and
11080 11069                           * start a fresh life.  Hope things go
11081 11070                           * better this time.
11082 11071                           */
11083 11072                          if (hblk_reserve_thread == curthread) {
11084 11073                                  ASSERT(sfmmu_cache == sfmmu8_cache);
11085 11074                                  sfmmu_hblk_swap(hmeblkp);
11086 11075                                  hblk_reserve_thread = NULL;
11087 11076                                  mutex_exit(&hblk_reserve_lock);
11088 11077                                  goto fill_hblk;
11089 11078                          }
11090 11079                          /*
11091 11080                           * let's donate this hblk to our reserve list if
11092 11081                           * we are not mapping kernel range
11093 11082                           */
11094 11083                          if (size == TTE8K && sfmmup != KHATID) {
11095 11084                                  if (sfmmu_put_free_hblk(hmeblkp, 0))
11096 11085                                          goto fill_hblk;
11097 11086                          }
11098 11087                  }
11099 11088          } else {
11100 11089                  /*
11101 11090                   * We are here to map the slab in sfmmu8_cache; let's
11102 11091                   * check if we could tap our reserve list; if successful,
11103 11092                   * this will avoid the pain of going thru sfmmu_hblk_swap
11104 11093                   */
11105 11094                  SFMMU_STAT(sf_hblk_slab_cnt);
11106 11095                  if (!sfmmu_get_free_hblk(&hmeblkp, 0)) {
11107 11096                          /*
11108 11097                           * let's start hblk_reserve dance
11109 11098                           */
11110 11099                          SFMMU_STAT(sf_hblk_reserve_cnt);
11111 11100                          owner = 1;
11112 11101                          mutex_enter(&hblk_reserve_lock);
11113 11102                          hmeblkp = HBLK_RESERVE;
11114 11103                          hblk_reserve_thread = curthread;
11115 11104                  }
11116 11105          }
11117 11106  
11118 11107  hblk_verify:
11119 11108          ASSERT(hmeblkp != NULL);
11120 11109          set_hblk_sz(hmeblkp, size);
11121 11110          ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
11122 11111          SFMMU_HASH_LOCK(hmebp);
11123 11112          HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp);
11124 11113          if (newhblkp != NULL) {
11125 11114                  SFMMU_HASH_UNLOCK(hmebp);
11126 11115                  if (hmeblkp != HBLK_RESERVE) {
11127 11116                          /*
11128 11117                           * This is really tricky!
11129 11118                           *
11130 11119                           * vmem_alloc(vmem_seg_arena)
11131 11120                           *  vmem_alloc(vmem_internal_arena)
11132 11121                           *   segkmem_alloc(heap_arena)
11133 11122                           *    vmem_alloc(heap_arena)
11134 11123                           *    page_create()
11135 11124                           *    hat_memload()
11136 11125                           *      kmem_cache_free()
11137 11126                           *       kmem_cache_alloc()
11138 11127                           *        kmem_slab_create()
11139 11128                           *         vmem_alloc(kmem_internal_arena)
11140 11129                           *          segkmem_alloc(heap_arena)
11141 11130                           *              vmem_alloc(heap_arena)
11142 11131                           *              page_create()
11143 11132                           *              hat_memload()
11144 11133                           *                kmem_cache_free()
11145 11134                           *              ...
11146 11135                           *
11147 11136                           * Thus, hat_memload() could call kmem_cache_free
11148 11137                           * for enough number of times that we could easily
11149 11138                           * hit the bottom of the stack or run out of reserve
11150 11139                           * list of vmem_seg structs.  So, we must donate
11151 11140                           * this hblk to reserve list if it's allocated
11152 11141                           * from sfmmu8_cache *and* mapping kernel range.
11153 11142                           * We don't need to worry about freeing hmeblk1's
11154 11143                           * to kmem since they don't map any kmem slabs.
11155 11144                           *
11156 11145                           * Note: When segkmem supports largepages, we must
11157 11146                           * free hmeblk1's to reserve list as well.
11158 11147                           */
11159 11148                          forcefree = (sfmmup == KHATID) ? 1 : 0;
11160 11149                          if (size == TTE8K &&
11161 11150                              sfmmu_put_free_hblk(hmeblkp, forcefree)) {
11162 11151                                  goto re_verify;
11163 11152                          }
11164 11153                          ASSERT(sfmmup != KHATID);
11165 11154                          kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp);
11166 11155                  } else {
11167 11156                          /*
11168 11157                           * Hey! we don't need hblk_reserve any more.
11169 11158                           */
11170 11159                          ASSERT(owner);
11171 11160                          hblk_reserve_thread = NULL;
11172 11161                          mutex_exit(&hblk_reserve_lock);
11173 11162                          owner = 0;
11174 11163                  }
11175 11164  re_verify:
11176 11165                  /*
11177 11166                   * let's check if the goodies are still present
11178 11167                   */
11179 11168                  SFMMU_HASH_LOCK(hmebp);
11180 11169                  HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp);
11181 11170                  if (newhblkp != NULL) {
11182 11171                          /*
11183 11172                           * return newhblkp if it's not hblk_reserve;
11184 11173                           * if newhblkp is hblk_reserve, return it
11185 11174                           * _only if_ we are the owner of hblk_reserve.
11186 11175                           */
11187 11176                          if (newhblkp != HBLK_RESERVE || owner) {
11188 11177                                  ASSERT(!SFMMU_IS_SHMERID_VALID(rid) ||
11189 11178                                      newhblkp->hblk_shared);
11190 11179                                  ASSERT(SFMMU_IS_SHMERID_VALID(rid) ||
11191 11180                                      !newhblkp->hblk_shared);
11192 11181                                  return (newhblkp);
11193 11182                          } else {
11194 11183                                  /*
11195 11184                                   * we just hit hblk_reserve in the hash and
11196 11185                                   * we are not the owner of that;
11197 11186                                   *
11198 11187                                   * block until hblk_reserve_thread completes
11199 11188                                   * swapping hblk_reserve and try the dance
11200 11189                                   * once again.
11201 11190                                   */
11202 11191                                  SFMMU_HASH_UNLOCK(hmebp);
11203 11192                                  mutex_enter(&hblk_reserve_lock);
11204 11193                                  mutex_exit(&hblk_reserve_lock);
11205 11194                                  SFMMU_STAT(sf_hblk_reserve_hit);
11206 11195                                  goto fill_hblk;
11207 11196                          }
11208 11197                  } else {
11209 11198                          /*
11210 11199                           * it's no more! try the dance once again.
11211 11200                           */
11212 11201                          SFMMU_HASH_UNLOCK(hmebp);
11213 11202                          goto fill_hblk;
11214 11203                  }
11215 11204          }
11216 11205  
11217 11206  hblk_init:
11218 11207          if (SFMMU_IS_SHMERID_VALID(rid)) {
11219 11208                  uint16_t tteflag = 0x1 <<
11220 11209                      ((size < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : size);
11221 11210  
11222 11211                  if (!(rgnp->rgn_hmeflags & tteflag)) {
11223 11212                          atomic_or_16(&rgnp->rgn_hmeflags, tteflag);
11224 11213                  }
11225 11214                  hmeblkp->hblk_shared = 1;
11226 11215          } else {
11227 11216                  hmeblkp->hblk_shared = 0;
11228 11217          }
11229 11218          set_hblk_sz(hmeblkp, size);
11230 11219          ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11231 11220          hmeblkp->hblk_next = (struct hme_blk *)NULL;
11232 11221          hmeblkp->hblk_tag = hblktag;
11233 11222          hmeblkp->hblk_shadow = shw_hblkp;
11234 11223          hblkpa = hmeblkp->hblk_nextpa;
11235 11224          hmeblkp->hblk_nextpa = HMEBLK_ENDPA;
11236 11225  
11237 11226          ASSERT(get_hblk_ttesz(hmeblkp) == size);
11238 11227          ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size));
11239 11228          ASSERT(hmeblkp->hblk_hmecnt == 0);
11240 11229          ASSERT(hmeblkp->hblk_vcnt == 0);
11241 11230          ASSERT(hmeblkp->hblk_lckcnt == 0);
11242 11231          ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp));
11243 11232          sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa);
11244 11233          return (hmeblkp);
11245 11234  }
11246 11235  
11247 11236  /*
11248 11237   * This function cleans up the hme_blk and returns it to the free list.
11249 11238   */
11250 11239  /* ARGSUSED */
11251 11240  static void
11252 11241  sfmmu_hblk_free(struct hme_blk **listp)
11253 11242  {
11254 11243          struct hme_blk *hmeblkp, *next_hmeblkp;
11255 11244          int             size;
11256 11245          uint_t          critical;
11257 11246          uint64_t        hblkpa;
11258 11247  
11259 11248          ASSERT(*listp != NULL);
11260 11249  
11261 11250          hmeblkp = *listp;
11262 11251          while (hmeblkp != NULL) {
11263 11252                  next_hmeblkp = hmeblkp->hblk_next;
11264 11253                  ASSERT(!hmeblkp->hblk_hmecnt);
11265 11254                  ASSERT(!hmeblkp->hblk_vcnt);
11266 11255                  ASSERT(!hmeblkp->hblk_lckcnt);
11267 11256                  ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
11268 11257                  ASSERT(hmeblkp->hblk_shared == 0);
11269 11258                  ASSERT(hmeblkp->hblk_shw_bit == 0);
11270 11259                  ASSERT(hmeblkp->hblk_shadow == NULL);
11271 11260  
11272 11261                  hblkpa = va_to_pa((caddr_t)hmeblkp);
11273 11262                  ASSERT(hblkpa != (uint64_t)-1);
11274 11263                  critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0;
11275 11264  
11276 11265                  size = get_hblk_ttesz(hmeblkp);
11277 11266                  hmeblkp->hblk_next = NULL;
11278 11267                  hmeblkp->hblk_nextpa = hblkpa;
11279 11268  
11280 11269                  if (hmeblkp->hblk_nuc_bit == 0) {
11281 11270  
11282 11271                          if (size != TTE8K ||
11283 11272                              !sfmmu_put_free_hblk(hmeblkp, critical))
11284 11273                                  kmem_cache_free(get_hblk_cache(hmeblkp),
11285 11274                                      hmeblkp);
11286 11275                  }
11287 11276                  hmeblkp = next_hmeblkp;
11288 11277          }
11289 11278  }
11290 11279  
11291 11280  #define BUCKETS_TO_SEARCH_BEFORE_UNLOAD 30
11292 11281  #define SFMMU_HBLK_STEAL_THRESHOLD 5
11293 11282  
11294 11283  static uint_t sfmmu_hblk_steal_twice;
11295 11284  static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count;
11296 11285  
11297 11286  /*
11298 11287   * Steal a hmeblk from user or kernel hme hash lists.
11299 11288   * For 8K tte grab one from reserve pool (freehblkp) before proceeding to
11300 11289   * steal and if we fail to steal after SFMMU_HBLK_STEAL_THRESHOLD attempts
11301 11290   * tap into critical reserve of freehblkp.
11302 11291   * Note: We remain looping in this routine until we find one.
11303 11292   */
11304 11293  static struct hme_blk *
11305 11294  sfmmu_hblk_steal(int size)
11306 11295  {
11307 11296          static struct hmehash_bucket *uhmehash_steal_hand = NULL;
11308 11297          struct hmehash_bucket *hmebp;
11309 11298          struct hme_blk *hmeblkp = NULL, *pr_hblk;
11310 11299          uint64_t hblkpa;
11311 11300          int i;
11312 11301          uint_t loop_cnt = 0, critical;
11313 11302  
11314 11303          for (;;) {
11315 11304                  /* Check cpu hblk pending queues */
11316 11305                  if ((hmeblkp = sfmmu_check_pending_hblks(size)) != NULL) {
11317 11306                          hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
11318 11307                          ASSERT(hmeblkp->hblk_hmecnt == 0);
11319 11308                          ASSERT(hmeblkp->hblk_vcnt == 0);
11320 11309                          return (hmeblkp);
11321 11310                  }
11322 11311  
11323 11312                  if (size == TTE8K) {
11324 11313                          critical =
11325 11314                              (++loop_cnt > SFMMU_HBLK_STEAL_THRESHOLD) ? 1 : 0;
11326 11315                          if (sfmmu_get_free_hblk(&hmeblkp, critical))
11327 11316                                  return (hmeblkp);
11328 11317                  }
11329 11318  
11330 11319                  hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash :
11331 11320                      uhmehash_steal_hand;
11332 11321                  ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]);
11333 11322  
11334 11323                  for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ +
11335 11324                      BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) {
11336 11325                          SFMMU_HASH_LOCK(hmebp);
11337 11326                          hmeblkp = hmebp->hmeblkp;
11338 11327                          hblkpa = hmebp->hmeh_nextpa;
11339 11328                          pr_hblk = NULL;
11340 11329                          while (hmeblkp) {
11341 11330                                  /*
11342 11331                                   * check if it is a hmeblk that is not locked
11343 11332                                   * and not shared. skip shadow hmeblks with
11344 11333                                   * shadow_mask set i.e valid count non zero.
11345 11334                                   */
11346 11335                                  if ((get_hblk_ttesz(hmeblkp) == size) &&
11347 11336                                      (hmeblkp->hblk_shw_bit == 0 ||
11348 11337                                      hmeblkp->hblk_vcnt == 0) &&
11349 11338                                      (hmeblkp->hblk_lckcnt == 0)) {
11350 11339                                          /*
11351 11340                                           * there is a high probability that we
11352 11341                                           * will find a free one. search some
11353 11342                                           * buckets for a free hmeblk initially
11354 11343                                           * before unloading a valid hmeblk.
11355 11344                                           */
11356 11345                                          if ((hmeblkp->hblk_vcnt == 0 &&
11357 11346                                              hmeblkp->hblk_hmecnt == 0) || (i >=
11358 11347                                              BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) {
11359 11348                                                  if (sfmmu_steal_this_hblk(hmebp,
11360 11349                                                      hmeblkp, hblkpa, pr_hblk)) {
11361 11350                                                          /*
11362 11351                                                           * Hblk is unloaded
11363 11352                                                           * successfully
11364 11353                                                           */
11365 11354                                                          break;
11366 11355                                                  }
11367 11356                                          }
11368 11357                                  }
11369 11358                                  pr_hblk = hmeblkp;
11370 11359                                  hblkpa = hmeblkp->hblk_nextpa;
11371 11360                                  hmeblkp = hmeblkp->hblk_next;
11372 11361                          }
11373 11362  
11374 11363                          SFMMU_HASH_UNLOCK(hmebp);
11375 11364                          if (hmebp++ == &uhme_hash[UHMEHASH_SZ])
11376 11365                                  hmebp = uhme_hash;
11377 11366                  }
11378 11367                  uhmehash_steal_hand = hmebp;
11379 11368  
11380 11369                  if (hmeblkp != NULL)
11381 11370                          break;
11382 11371  
11383 11372                  /*
11384 11373                   * in the worst case, look for a free one in the kernel
11385 11374                   * hash table.
11386 11375                   */
11387 11376                  for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) {
11388 11377                          SFMMU_HASH_LOCK(hmebp);
11389 11378                          hmeblkp = hmebp->hmeblkp;
11390 11379                          hblkpa = hmebp->hmeh_nextpa;
11391 11380                          pr_hblk = NULL;
11392 11381                          while (hmeblkp) {
11393 11382                                  /*
11394 11383                                   * check if it is free hmeblk
11395 11384                                   */
11396 11385                                  if ((get_hblk_ttesz(hmeblkp) == size) &&
11397 11386                                      (hmeblkp->hblk_lckcnt == 0) &&
11398 11387                                      (hmeblkp->hblk_vcnt == 0) &&
11399 11388                                      (hmeblkp->hblk_hmecnt == 0)) {
11400 11389                                          if (sfmmu_steal_this_hblk(hmebp,
11401 11390                                              hmeblkp, hblkpa, pr_hblk)) {
11402 11391                                                  break;
11403 11392                                          } else {
11404 11393                                                  /*
11405 11394                                                   * Cannot fail since we have
11406 11395                                                   * hash lock.
11407 11396                                                   */
11408 11397                                                  panic("fail to steal?");
11409 11398                                          }
11410 11399                                  }
11411 11400  
11412 11401                                  pr_hblk = hmeblkp;
11413 11402                                  hblkpa = hmeblkp->hblk_nextpa;
11414 11403                                  hmeblkp = hmeblkp->hblk_next;
11415 11404                          }
11416 11405  
11417 11406                          SFMMU_HASH_UNLOCK(hmebp);
11418 11407                          if (hmebp++ == &khme_hash[KHMEHASH_SZ])
11419 11408                                  hmebp = khme_hash;
11420 11409                  }
11421 11410  
11422 11411                  if (hmeblkp != NULL)
11423 11412                          break;
11424 11413                  sfmmu_hblk_steal_twice++;
11425 11414          }
11426 11415          return (hmeblkp);
11427 11416  }
11428 11417  
11429 11418  /*
11430 11419   * This routine does real work to prepare a hblk to be "stolen" by
11431 11420   * unloading the mappings, updating shadow counts ....
11432 11421   * It returns 1 if the block is ready to be reused (stolen), or 0
11433 11422   * means the block cannot be stolen yet- pageunload is still working
11434 11423   * on this hblk.
11435 11424   */
11436 11425  static int
11437 11426  sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
11438 11427          uint64_t hblkpa, struct hme_blk *pr_hblk)
11439 11428  {
11440 11429          int shw_size, vshift;
11441 11430          struct hme_blk *shw_hblkp;
11442 11431          caddr_t vaddr;
11443 11432          uint_t shw_mask, newshw_mask;
11444 11433          struct hme_blk *list = NULL;
11445 11434  
11446 11435          ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11447 11436  
11448 11437          /*
11449 11438           * check if the hmeblk is free, unload if necessary
11450 11439           */
11451 11440          if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
11452 11441                  sfmmu_t *sfmmup;
11453 11442                  demap_range_t dmr;
11454 11443  
11455 11444                  sfmmup = hblktosfmmu(hmeblkp);
11456 11445                  if (hmeblkp->hblk_shared || sfmmup->sfmmu_ismhat) {
11457 11446                          return (0);
11458 11447                  }
11459 11448                  DEMAP_RANGE_INIT(sfmmup, &dmr);
11460 11449                  (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
11461 11450                      (caddr_t)get_hblk_base(hmeblkp),
11462 11451                      get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD);
11463 11452                  DEMAP_RANGE_FLUSH(&dmr);
11464 11453                  if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
11465 11454                          /*
11466 11455                           * Pageunload is working on the same hblk.
11467 11456                           */
11468 11457                          return (0);
11469 11458                  }
11470 11459  
11471 11460                  sfmmu_hblk_steal_unload_count++;
11472 11461          }
11473 11462  
11474 11463          ASSERT(hmeblkp->hblk_lckcnt == 0);
11475 11464          ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0);
11476 11465  
11477 11466          sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 1);
11478 11467          hmeblkp->hblk_nextpa = hblkpa;
11479 11468  
11480 11469          shw_hblkp = hmeblkp->hblk_shadow;
11481 11470          if (shw_hblkp) {
11482 11471                  ASSERT(!hmeblkp->hblk_shared);
11483 11472                  shw_size = get_hblk_ttesz(shw_hblkp);
11484 11473                  vaddr = (caddr_t)get_hblk_base(hmeblkp);
11485 11474                  vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
11486 11475                  ASSERT(vshift < 8);
11487 11476                  /*
11488 11477                   * Atomically clear shadow mask bit
11489 11478                   */
11490 11479                  do {
11491 11480                          shw_mask = shw_hblkp->hblk_shw_mask;
11492 11481                          ASSERT(shw_mask & (1 << vshift));
11493 11482                          newshw_mask = shw_mask & ~(1 << vshift);
11494 11483                          newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask,
11495 11484                              shw_mask, newshw_mask);
11496 11485                  } while (newshw_mask != shw_mask);
11497 11486                  hmeblkp->hblk_shadow = NULL;
11498 11487          }
11499 11488  
11500 11489          /*
11501 11490           * remove shadow bit if we are stealing an unused shadow hmeblk.
11502 11491           * sfmmu_hblk_alloc needs it that way, will set shadow bit later if
11503 11492           * we are indeed allocating a shadow hmeblk.
11504 11493           */
11505 11494          hmeblkp->hblk_shw_bit = 0;
11506 11495  
11507 11496          if (hmeblkp->hblk_shared) {
11508 11497                  sf_srd_t        *srdp;
11509 11498                  sf_region_t     *rgnp;
11510 11499                  uint_t          rid;
11511 11500  
11512 11501                  srdp = hblktosrd(hmeblkp);
11513 11502                  ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
11514 11503                  rid = hmeblkp->hblk_tag.htag_rid;
11515 11504                  ASSERT(SFMMU_IS_SHMERID_VALID(rid));
11516 11505                  ASSERT(rid < SFMMU_MAX_HME_REGIONS);
11517 11506                  rgnp = srdp->srd_hmergnp[rid];
11518 11507                  ASSERT(rgnp != NULL);
11519 11508                  SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
11520 11509                  hmeblkp->hblk_shared = 0;
11521 11510          }
11522 11511  
11523 11512          sfmmu_hblk_steal_count++;
11524 11513          SFMMU_STAT(sf_steal_count);
11525 11514  
11526 11515          return (1);
11527 11516  }
11528 11517  
11529 11518  struct hme_blk *
11530 11519  sfmmu_hmetohblk(struct sf_hment *sfhme)
11531 11520  {
11532 11521          struct hme_blk *hmeblkp;
11533 11522          struct sf_hment *sfhme0;
11534 11523          struct hme_blk *hblk_dummy = 0;
11535 11524  
11536 11525          /*
11537 11526           * No dummy sf_hments, please.
11538 11527           */
11539 11528          ASSERT(sfhme->hme_tte.ll != 0);
11540 11529  
11541 11530          sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum;
11542 11531          hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 -
11543 11532              (uintptr_t)&hblk_dummy->hblk_hme[0]);
11544 11533  
11545 11534          return (hmeblkp);
11546 11535  }
11547 11536  
11548 11537  /*
11549 11538   * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag.
11550 11539   * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using
11551 11540   * KM_SLEEP allocation.
11552 11541   *
11553 11542   * Return 0 on success, -1 otherwise.
11554 11543   */
11555 11544  static void
11556 11545  sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp)
11557 11546  {
11558 11547          struct tsb_info *tsbinfop, *next;
11559 11548          tsb_replace_rc_t rc;
11560 11549          boolean_t gotfirst = B_FALSE;
11561 11550  
11562 11551          ASSERT(sfmmup != ksfmmup);
11563 11552          ASSERT(sfmmu_hat_lock_held(sfmmup));
11564 11553  
11565 11554          while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) {
11566 11555                  cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
11567 11556          }
11568 11557  
11569 11558          if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
11570 11559                  SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN);
11571 11560          } else {
11572 11561                  return;
11573 11562          }
11574 11563  
11575 11564          ASSERT(sfmmup->sfmmu_tsb != NULL);
11576 11565  
11577 11566          /*
11578 11567           * Loop over all tsbinfo's replacing them with ones that actually have
11579 11568           * a TSB.  If any of the replacements ever fail, bail out of the loop.
11580 11569           */
11581 11570          for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) {
11582 11571                  ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED);
11583 11572                  next = tsbinfop->tsb_next;
11584 11573                  rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc,
11585 11574                      hatlockp, TSB_SWAPIN);
11586 11575                  if (rc != TSB_SUCCESS) {
11587 11576                          break;
11588 11577                  }
11589 11578                  gotfirst = B_TRUE;
11590 11579          }
11591 11580  
11592 11581          switch (rc) {
11593 11582          case TSB_SUCCESS:
11594 11583                  SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN);
11595 11584                  cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11596 11585                  return;
11597 11586          case TSB_LOSTRACE:
11598 11587                  break;
11599 11588          case TSB_ALLOCFAIL:
11600 11589                  break;
11601 11590          default:
11602 11591                  panic("sfmmu_replace_tsb returned unrecognized failure code "
11603 11592                      "%d", rc);
11604 11593          }
11605 11594  
11606 11595          /*
11607 11596           * In this case, we failed to get one of our TSBs.  If we failed to
11608 11597           * get the first TSB, get one of minimum size (8KB).  Walk the list
11609 11598           * and throw away the tsbinfos, starting where the allocation failed;
11610 11599           * we can get by with just one TSB as long as we don't leave the
11611 11600           * SWAPPED tsbinfo structures lying around.
11612 11601           */
11613 11602          tsbinfop = sfmmup->sfmmu_tsb;
11614 11603          next = tsbinfop->tsb_next;
11615 11604          tsbinfop->tsb_next = NULL;
11616 11605  
11617 11606          sfmmu_hat_exit(hatlockp);
11618 11607          for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) {
11619 11608                  next = tsbinfop->tsb_next;
11620 11609                  sfmmu_tsbinfo_free(tsbinfop);
11621 11610          }
11622 11611          hatlockp = sfmmu_hat_enter(sfmmup);
11623 11612  
11624 11613          /*
11625 11614           * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K
11626 11615           * pages.
11627 11616           */
11628 11617          if (!gotfirst) {
11629 11618                  tsbinfop = sfmmup->sfmmu_tsb;
11630 11619                  rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE,
11631 11620                      hatlockp, TSB_SWAPIN | TSB_FORCEALLOC);
11632 11621                  ASSERT(rc == TSB_SUCCESS);
11633 11622          }
11634 11623  
11635 11624          SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN);
11636 11625          cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11637 11626  }
11638 11627  
11639 11628  static int
11640 11629  sfmmu_is_rgnva(sf_srd_t *srdp, caddr_t addr, ulong_t w, ulong_t bmw)
11641 11630  {
11642 11631          ulong_t bix = 0;
11643 11632          uint_t rid;
11644 11633          sf_region_t *rgnp;
11645 11634  
11646 11635          ASSERT(srdp != NULL);
11647 11636          ASSERT(srdp->srd_refcnt != 0);
11648 11637  
11649 11638          w <<= BT_ULSHIFT;
11650 11639          while (bmw) {
11651 11640                  if (!(bmw & 0x1)) {
11652 11641                          bix++;
11653 11642                          bmw >>= 1;
11654 11643                          continue;
11655 11644                  }
11656 11645                  rid = w | bix;
11657 11646                  rgnp = srdp->srd_hmergnp[rid];
11658 11647                  ASSERT(rgnp->rgn_refcnt > 0);
11659 11648                  ASSERT(rgnp->rgn_id == rid);
11660 11649                  if (addr < rgnp->rgn_saddr ||
11661 11650                      addr >= (rgnp->rgn_saddr + rgnp->rgn_size)) {
11662 11651                          bix++;
11663 11652                          bmw >>= 1;
11664 11653                  } else {
11665 11654                          return (1);
11666 11655                  }
11667 11656          }
11668 11657          return (0);
11669 11658  }
11670 11659  
11671 11660  /*
11672 11661   * Handle exceptions for low level tsb_handler.
11673 11662   *
11674 11663   * There are many scenarios that could land us here:
11675 11664   *
11676 11665   * If the context is invalid we land here. The context can be invalid
11677 11666   * for 3 reasons: 1) we couldn't allocate a new context and now need to
11678 11667   * perform a wrap around operation in order to allocate a new context.
11679 11668   * 2) Context was invalidated to change pagesize programming 3) ISMs or
11680 11669   * TSBs configuration is changeing for this process and we are forced into
11681 11670   * here to do a syncronization operation. If the context is valid we can
11682 11671   * be here from window trap hanlder. In this case just call trap to handle
11683 11672   * the fault.
11684 11673   *
11685 11674   * Note that the process will run in INVALID_CONTEXT before
11686 11675   * faulting into here and subsequently loading the MMU registers
11687 11676   * (including the TSB base register) associated with this process.
11688 11677   * For this reason, the trap handlers must all test for
11689 11678   * INVALID_CONTEXT before attempting to access any registers other
11690 11679   * than the context registers.
11691 11680   */
11692 11681  void
11693 11682  sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype)
11694 11683  {
11695 11684          sfmmu_t *sfmmup, *shsfmmup;
11696 11685          uint_t ctxtype;
11697 11686          klwp_id_t lwp;
11698 11687          char lwp_save_state;
11699 11688          hatlock_t *hatlockp, *shatlockp;
11700 11689          struct tsb_info *tsbinfop;
11701 11690          struct tsbmiss *tsbmp;
11702 11691          sf_scd_t *scdp;
11703 11692  
11704 11693          SFMMU_STAT(sf_tsb_exceptions);
11705 11694          SFMMU_MMU_STAT(mmu_tsb_exceptions);
11706 11695          sfmmup = astosfmmu(curthread->t_procp->p_as);
11707 11696          /*
11708 11697           * note that in sun4u, tagacces register contains ctxnum
11709 11698           * while sun4v passes ctxtype in the tagaccess register.
11710 11699           */
11711 11700          ctxtype = tagaccess & TAGACC_CTX_MASK;
11712 11701  
11713 11702          ASSERT(sfmmup != ksfmmup && ctxtype != KCONTEXT);
11714 11703          ASSERT(sfmmup->sfmmu_ismhat == 0);
11715 11704          ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) ||
11716 11705              ctxtype == INVALID_CONTEXT);
11717 11706  
11718 11707          if (ctxtype != INVALID_CONTEXT && traptype != T_DATA_PROT) {
11719 11708                  /*
11720 11709                   * We may land here because shme bitmap and pagesize
11721 11710                   * flags are updated lazily in tsbmiss area on other cpus.
11722 11711                   * If we detect here that tsbmiss area is out of sync with
11723 11712                   * sfmmu update it and retry the trapped instruction.
11724 11713                   * Otherwise call trap().
11725 11714                   */
11726 11715                  int ret = 0;
11727 11716                  uchar_t tteflag_mask = (1 << TTE64K) | (1 << TTE8K);
11728 11717                  caddr_t addr = (caddr_t)(tagaccess & TAGACC_VADDR_MASK);
11729 11718  
11730 11719                  /*
11731 11720                   * Must set lwp state to LWP_SYS before
11732 11721                   * trying to acquire any adaptive lock
11733 11722                   */
11734 11723                  lwp = ttolwp(curthread);
11735 11724                  ASSERT(lwp);
11736 11725                  lwp_save_state = lwp->lwp_state;
11737 11726                  lwp->lwp_state = LWP_SYS;
11738 11727  
11739 11728                  hatlockp = sfmmu_hat_enter(sfmmup);
11740 11729                  kpreempt_disable();
11741 11730                  tsbmp = &tsbmiss_area[CPU->cpu_id];
11742 11731                  ASSERT(sfmmup == tsbmp->usfmmup);
11743 11732                  if (((tsbmp->uhat_tteflags ^ sfmmup->sfmmu_tteflags) &
11744 11733                      ~tteflag_mask) ||
11745 11734                      ((tsbmp->uhat_rtteflags ^  sfmmup->sfmmu_rtteflags) &
11746 11735                      ~tteflag_mask)) {
11747 11736                          tsbmp->uhat_tteflags = sfmmup->sfmmu_tteflags;
11748 11737                          tsbmp->uhat_rtteflags = sfmmup->sfmmu_rtteflags;
11749 11738                          ret = 1;
11750 11739                  }
11751 11740                  if (sfmmup->sfmmu_srdp != NULL) {
11752 11741                          ulong_t *sm = sfmmup->sfmmu_hmeregion_map.bitmap;
11753 11742                          ulong_t *tm = tsbmp->shmermap;
11754 11743                          ulong_t i;
11755 11744                          for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
11756 11745                                  ulong_t d = tm[i] ^ sm[i];
11757 11746                                  if (d) {
11758 11747                                          if (d & sm[i]) {
11759 11748                                                  if (!ret && sfmmu_is_rgnva(
11760 11749                                                      sfmmup->sfmmu_srdp,
11761 11750                                                      addr, i, d & sm[i])) {
11762 11751                                                          ret = 1;
11763 11752                                                  }
11764 11753                                          }
11765 11754                                          tm[i] = sm[i];
11766 11755                                  }
11767 11756                          }
11768 11757                  }
11769 11758                  kpreempt_enable();
11770 11759                  sfmmu_hat_exit(hatlockp);
11771 11760                  lwp->lwp_state = lwp_save_state;
11772 11761                  if (ret) {
11773 11762                          return;
11774 11763                  }
11775 11764          } else if (ctxtype == INVALID_CONTEXT) {
11776 11765                  /*
11777 11766                   * First, make sure we come out of here with a valid ctx,
11778 11767                   * since if we don't get one we'll simply loop on the
11779 11768                   * faulting instruction.
11780 11769                   *
11781 11770                   * If the ISM mappings are changing, the TSB is relocated,
11782 11771                   * the process is swapped, the process is joining SCD or
11783 11772                   * leaving SCD or shared regions we serialize behind the
11784 11773                   * controlling thread with hat lock, sfmmu_flags and
11785 11774                   * sfmmu_tsb_cv condition variable.
11786 11775                   */
11787 11776  
11788 11777                  /*
11789 11778                   * Must set lwp state to LWP_SYS before
11790 11779                   * trying to acquire any adaptive lock
11791 11780                   */
11792 11781                  lwp = ttolwp(curthread);
11793 11782                  ASSERT(lwp);
11794 11783                  lwp_save_state = lwp->lwp_state;
11795 11784                  lwp->lwp_state = LWP_SYS;
11796 11785  
11797 11786                  hatlockp = sfmmu_hat_enter(sfmmup);
11798 11787  retry:
11799 11788                  if ((scdp = sfmmup->sfmmu_scdp) != NULL) {
11800 11789                          shsfmmup = scdp->scd_sfmmup;
11801 11790                          ASSERT(shsfmmup != NULL);
11802 11791  
11803 11792                          for (tsbinfop = shsfmmup->sfmmu_tsb; tsbinfop != NULL;
11804 11793                              tsbinfop = tsbinfop->tsb_next) {
11805 11794                                  if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) {
11806 11795                                          /* drop the private hat lock */
11807 11796                                          sfmmu_hat_exit(hatlockp);
11808 11797                                          /* acquire the shared hat lock */
11809 11798                                          shatlockp = sfmmu_hat_enter(shsfmmup);
11810 11799                                          /*
11811 11800                                           * recheck to see if anything changed
11812 11801                                           * after we drop the private hat lock.
11813 11802                                           */
11814 11803                                          if (sfmmup->sfmmu_scdp == scdp &&
11815 11804                                              shsfmmup == scdp->scd_sfmmup) {
11816 11805                                                  sfmmu_tsb_chk_reloc(shsfmmup,
11817 11806                                                      shatlockp);
11818 11807                                          }
11819 11808                                          sfmmu_hat_exit(shatlockp);
11820 11809                                          hatlockp = sfmmu_hat_enter(sfmmup);
11821 11810                                          goto retry;
11822 11811                                  }
11823 11812                          }
11824 11813                  }
11825 11814  
11826 11815                  for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
11827 11816                      tsbinfop = tsbinfop->tsb_next) {
11828 11817                          if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) {
11829 11818                                  cv_wait(&sfmmup->sfmmu_tsb_cv,
11830 11819                                      HATLOCK_MUTEXP(hatlockp));
11831 11820                                  goto retry;
11832 11821                          }
11833 11822                  }
11834 11823  
11835 11824                  /*
11836 11825                   * Wait for ISM maps to be updated.
11837 11826                   */
11838 11827                  if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) {
11839 11828                          cv_wait(&sfmmup->sfmmu_tsb_cv,
11840 11829                              HATLOCK_MUTEXP(hatlockp));
11841 11830                          goto retry;
11842 11831                  }
11843 11832  
11844 11833                  /* Is this process joining an SCD? */
11845 11834                  if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
11846 11835                          /*
11847 11836                           * Flush private TSB and setup shared TSB.
11848 11837                           * sfmmu_finish_join_scd() does not drop the
11849 11838                           * hat lock.
11850 11839                           */
11851 11840                          sfmmu_finish_join_scd(sfmmup);
11852 11841                          SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD);
11853 11842                  }
11854 11843  
11855 11844                  /*
11856 11845                   * If we're swapping in, get TSB(s).  Note that we must do
11857 11846                   * this before we get a ctx or load the MMU state.  Once
11858 11847                   * we swap in we have to recheck to make sure the TSB(s) and
11859 11848                   * ISM mappings didn't change while we slept.
11860 11849                   */
11861 11850                  if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
11862 11851                          sfmmu_tsb_swapin(sfmmup, hatlockp);
11863 11852                          goto retry;
11864 11853                  }
11865 11854  
11866 11855                  sfmmu_get_ctx(sfmmup);
11867 11856  
11868 11857                  sfmmu_hat_exit(hatlockp);
11869 11858                  /*
11870 11859                   * Must restore lwp_state if not calling
11871 11860                   * trap() for further processing. Restore
11872 11861                   * it anyway.
11873 11862                   */
11874 11863                  lwp->lwp_state = lwp_save_state;
11875 11864                  return;
11876 11865          }
11877 11866          trap(rp, (caddr_t)tagaccess, traptype, 0);
11878 11867  }
11879 11868  
11880 11869  static void
11881 11870  sfmmu_tsb_chk_reloc(sfmmu_t *sfmmup, hatlock_t *hatlockp)
11882 11871  {
11883 11872          struct tsb_info *tp;
11884 11873  
11885 11874          ASSERT(sfmmu_hat_lock_held(sfmmup));
11886 11875  
11887 11876          for (tp = sfmmup->sfmmu_tsb; tp != NULL; tp = tp->tsb_next) {
11888 11877                  if (tp->tsb_flags & TSB_RELOC_FLAG) {
11889 11878                          cv_wait(&sfmmup->sfmmu_tsb_cv,
11890 11879                              HATLOCK_MUTEXP(hatlockp));
11891 11880                          break;
11892 11881                  }
11893 11882          }
11894 11883  }
11895 11884  
11896 11885  /*
11897 11886   * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and
11898 11887   * TTE_SUSPENDED bit set in tte we block on aquiring a page lock
11899 11888   * rather than spinning to avoid send mondo timeouts with
11900 11889   * interrupts enabled. When the lock is acquired it is immediately
11901 11890   * released and we return back to sfmmu_vatopfn just after
11902 11891   * the GET_TTE call.
11903 11892   */
11904 11893  void
11905 11894  sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep)
11906 11895  {
11907 11896          struct page     **pp;
11908 11897  
11909 11898          (void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE);
11910 11899          as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE);
11911 11900  }
11912 11901  
11913 11902  /*
11914 11903   * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and
11915 11904   * TTE_SUSPENDED bit set in tte. We do this so that we can handle
11916 11905   * cross traps which cannot be handled while spinning in the
11917 11906   * trap handlers. Simply enter and exit the kpr_suspendlock spin
11918 11907   * mutex, which is held by the holder of the suspend bit, and then
11919 11908   * retry the trapped instruction after unwinding.
11920 11909   */
11921 11910  /*ARGSUSED*/
11922 11911  void
11923 11912  sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype)
11924 11913  {
11925 11914          ASSERT(curthread != kreloc_thread);
11926 11915          mutex_enter(&kpr_suspendlock);
11927 11916          mutex_exit(&kpr_suspendlock);
11928 11917  }
11929 11918  
11930 11919  /*
11931 11920   * This routine could be optimized to reduce the number of xcalls by flushing
11932 11921   * the entire TLBs if region reference count is above some threshold but the
11933 11922   * tradeoff will depend on the size of the TLB. So for now flush the specific
11934 11923   * page a context at a time.
11935 11924   *
11936 11925   * If uselocks is 0 then it's called after all cpus were captured and all the
11937 11926   * hat locks were taken. In this case don't take the region lock by relying on
11938 11927   * the order of list region update operations in hat_join_region(),
11939 11928   * hat_leave_region() and hat_dup_region(). The ordering in those routines
11940 11929   * guarantees that list is always forward walkable and reaches active sfmmus
11941 11930   * regardless of where xc_attention() captures a cpu.
11942 11931   */
11943 11932  cpuset_t
11944 11933  sfmmu_rgntlb_demap(caddr_t addr, sf_region_t *rgnp,
11945 11934      struct hme_blk *hmeblkp, int uselocks)
11946 11935  {
11947 11936          sfmmu_t *sfmmup;
11948 11937          cpuset_t cpuset;
11949 11938          cpuset_t rcpuset;
11950 11939          hatlock_t *hatlockp;
11951 11940          uint_t rid = rgnp->rgn_id;
11952 11941          sf_rgn_link_t *rlink;
11953 11942          sf_scd_t *scdp;
11954 11943  
11955 11944          ASSERT(hmeblkp->hblk_shared);
11956 11945          ASSERT(SFMMU_IS_SHMERID_VALID(rid));
11957 11946          ASSERT(rid < SFMMU_MAX_HME_REGIONS);
11958 11947  
11959 11948          CPUSET_ZERO(rcpuset);
11960 11949          if (uselocks) {
11961 11950                  mutex_enter(&rgnp->rgn_mutex);
11962 11951          }
11963 11952          sfmmup = rgnp->rgn_sfmmu_head;
11964 11953          while (sfmmup != NULL) {
11965 11954                  if (uselocks) {
11966 11955                          hatlockp = sfmmu_hat_enter(sfmmup);
11967 11956                  }
11968 11957  
11969 11958                  /*
11970 11959                   * When an SCD is created the SCD hat is linked on the sfmmu
11971 11960                   * region lists for each hme region which is part of the
11972 11961                   * SCD. If we find an SCD hat, when walking these lists,
11973 11962                   * then we flush the shared TSBs, if we find a private hat,
11974 11963                   * which is part of an SCD, but where the region
11975 11964                   * is not part of the SCD then we flush the private TSBs.
11976 11965                   */
11977 11966                  if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL &&
11978 11967                      !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
11979 11968                          scdp = sfmmup->sfmmu_scdp;
11980 11969                          if (SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
11981 11970                                  if (uselocks) {
11982 11971                                          sfmmu_hat_exit(hatlockp);
11983 11972                                  }
11984 11973                                  goto next;
11985 11974                          }
11986 11975                  }
11987 11976  
11988 11977                  SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
11989 11978  
11990 11979                  kpreempt_disable();
11991 11980                  cpuset = sfmmup->sfmmu_cpusran;
11992 11981                  CPUSET_AND(cpuset, cpu_ready_set);
11993 11982                  CPUSET_DEL(cpuset, CPU->cpu_id);
11994 11983                  SFMMU_XCALL_STATS(sfmmup);
11995 11984                  xt_some(cpuset, vtag_flushpage_tl1,
11996 11985                      (uint64_t)addr, (uint64_t)sfmmup);
11997 11986                  vtag_flushpage(addr, (uint64_t)sfmmup);
11998 11987                  if (uselocks) {
11999 11988                          sfmmu_hat_exit(hatlockp);
12000 11989                  }
12001 11990                  kpreempt_enable();
12002 11991                  CPUSET_OR(rcpuset, cpuset);
12003 11992  
12004 11993  next:
12005 11994                  /* LINTED: constant in conditional context */
12006 11995                  SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0);
12007 11996                  ASSERT(rlink != NULL);
12008 11997                  sfmmup = rlink->next;
12009 11998          }
12010 11999          if (uselocks) {
12011 12000                  mutex_exit(&rgnp->rgn_mutex);
12012 12001          }
12013 12002          return (rcpuset);
12014 12003  }
12015 12004  
12016 12005  /*
12017 12006   * This routine takes an sfmmu pointer and the va for an adddress in an
12018 12007   * ISM region as input and returns the corresponding region id in ism_rid.
12019 12008   * The return value of 1 indicates that a region has been found and ism_rid
12020 12009   * is valid, otherwise 0 is returned.
12021 12010   */
12022 12011  static int
12023 12012  find_ism_rid(sfmmu_t *sfmmup, sfmmu_t *ism_sfmmup, caddr_t va, uint_t *ism_rid)
12024 12013  {
12025 12014          ism_blk_t       *ism_blkp;
12026 12015          int             i;
12027 12016          ism_map_t       *ism_map;
12028 12017  #ifdef DEBUG
12029 12018          struct hat      *ism_hatid;
12030 12019  #endif
12031 12020          ASSERT(sfmmu_hat_lock_held(sfmmup));
12032 12021  
12033 12022          ism_blkp = sfmmup->sfmmu_iblk;
12034 12023          while (ism_blkp != NULL) {
12035 12024                  ism_map = ism_blkp->iblk_maps;
12036 12025                  for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) {
12037 12026                          if ((va >= ism_start(ism_map[i])) &&
12038 12027                              (va < ism_end(ism_map[i]))) {
12039 12028  
12040 12029                                  *ism_rid = ism_map[i].imap_rid;
12041 12030  #ifdef DEBUG
12042 12031                                  ism_hatid = ism_map[i].imap_ismhat;
12043 12032                                  ASSERT(ism_hatid == ism_sfmmup);
12044 12033                                  ASSERT(ism_hatid->sfmmu_ismhat);
12045 12034  #endif
12046 12035                                  return (1);
12047 12036                          }
12048 12037                  }
12049 12038                  ism_blkp = ism_blkp->iblk_next;
12050 12039          }
12051 12040          return (0);
12052 12041  }
12053 12042  
12054 12043  /*
12055 12044   * Special routine to flush out ism mappings- TSBs, TLBs and D-caches.
12056 12045   * This routine may be called with all cpu's captured. Therefore, the
12057 12046   * caller is responsible for holding all locks and disabling kernel
12058 12047   * preemption.
12059 12048   */
12060 12049  /* ARGSUSED */
12061 12050  static void
12062 12051  sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup,
12063 12052          struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag)
12064 12053  {
12065 12054          cpuset_t        cpuset;
12066 12055          caddr_t         va;
12067 12056          ism_ment_t      *ment;
12068 12057          sfmmu_t         *sfmmup;
12069 12058  #ifdef VAC
12070 12059          int             vcolor;
12071 12060  #endif
12072 12061  
12073 12062          sf_scd_t        *scdp;
12074 12063          uint_t          ism_rid;
12075 12064  
12076 12065          ASSERT(!hmeblkp->hblk_shared);
12077 12066          /*
12078 12067           * Walk the ism_hat's mapping list and flush the page
12079 12068           * from every hat sharing this ism_hat. This routine
12080 12069           * may be called while all cpu's have been captured.
12081 12070           * Therefore we can't attempt to grab any locks. For now
12082 12071           * this means we will protect the ism mapping list under
12083 12072           * a single lock which will be grabbed by the caller.
12084 12073           * If hat_share/unshare scalibility becomes a performance
12085 12074           * problem then we may need to re-think ism mapping list locking.
12086 12075           */
12087 12076          ASSERT(ism_sfmmup->sfmmu_ismhat);
12088 12077          ASSERT(MUTEX_HELD(&ism_mlist_lock));
12089 12078          addr = addr - ISMID_STARTADDR;
12090 12079  
12091 12080          for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) {
12092 12081  
12093 12082                  sfmmup = ment->iment_hat;
12094 12083  
12095 12084                  va = ment->iment_base_va;
12096 12085                  va = (caddr_t)((uintptr_t)va  + (uintptr_t)addr);
12097 12086  
12098 12087                  /*
12099 12088                   * When an SCD is created the SCD hat is linked on the ism
12100 12089                   * mapping lists for each ISM segment which is part of the
12101 12090                   * SCD. If we find an SCD hat, when walking these lists,
12102 12091                   * then we flush the shared TSBs, if we find a private hat,
12103 12092                   * which is part of an SCD, but where the region
12104 12093                   * corresponding to this va is not part of the SCD then we
12105 12094                   * flush the private TSBs.
12106 12095                   */
12107 12096                  if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL &&
12108 12097                      !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD) &&
12109 12098                      !SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) {
12110 12099                          if (!find_ism_rid(sfmmup, ism_sfmmup, va,
12111 12100                              &ism_rid)) {
12112 12101                                  cmn_err(CE_PANIC,
12113 12102                                      "can't find matching ISM rid!");
12114 12103                          }
12115 12104  
12116 12105                          scdp = sfmmup->sfmmu_scdp;
12117 12106                          if (SFMMU_IS_ISMRID_VALID(ism_rid) &&
12118 12107                              SF_RGNMAP_TEST(scdp->scd_ismregion_map,
12119 12108                              ism_rid)) {
12120 12109                                  continue;
12121 12110                          }
12122 12111                  }
12123 12112                  SFMMU_UNLOAD_TSB(va, sfmmup, hmeblkp, 1);
12124 12113  
12125 12114                  cpuset = sfmmup->sfmmu_cpusran;
12126 12115                  CPUSET_AND(cpuset, cpu_ready_set);
12127 12116                  CPUSET_DEL(cpuset, CPU->cpu_id);
12128 12117                  SFMMU_XCALL_STATS(sfmmup);
12129 12118                  xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va,
12130 12119                      (uint64_t)sfmmup);
12131 12120                  vtag_flushpage(va, (uint64_t)sfmmup);
12132 12121  
12133 12122  #ifdef VAC
12134 12123                  /*
12135 12124                   * Flush D$
12136 12125                   * When flushing D$ we must flush all
12137 12126                   * cpu's. See sfmmu_cache_flush().
12138 12127                   */
12139 12128                  if (cache_flush_flag == CACHE_FLUSH) {
12140 12129                          cpuset = cpu_ready_set;
12141 12130                          CPUSET_DEL(cpuset, CPU->cpu_id);
12142 12131  
12143 12132                          SFMMU_XCALL_STATS(sfmmup);
12144 12133                          vcolor = addr_to_vcolor(va);
12145 12134                          xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12146 12135                          vac_flushpage(pfnum, vcolor);
12147 12136                  }
12148 12137  #endif  /* VAC */
12149 12138          }
12150 12139  }
12151 12140  
12152 12141  /*
12153 12142   * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of
12154 12143   * a particular virtual address and ctx.  If noflush is set we do not
12155 12144   * flush the TLB/TSB.  This function may or may not be called with the
12156 12145   * HAT lock held.
12157 12146   */
12158 12147  static void
12159 12148  sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12160 12149          pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag,
12161 12150          int hat_lock_held)
12162 12151  {
12163 12152  #ifdef VAC
12164 12153          int vcolor;
12165 12154  #endif
12166 12155          cpuset_t cpuset;
12167 12156          hatlock_t *hatlockp;
12168 12157  
12169 12158          ASSERT(!hmeblkp->hblk_shared);
12170 12159  
12171 12160  #if defined(lint) && !defined(VAC)
12172 12161          pfnum = pfnum;
12173 12162          cpu_flag = cpu_flag;
12174 12163          cache_flush_flag = cache_flush_flag;
12175 12164  #endif
12176 12165  
12177 12166          /*
12178 12167           * There is no longer a need to protect against ctx being
12179 12168           * stolen here since we don't store the ctx in the TSB anymore.
12180 12169           */
12181 12170  #ifdef VAC
12182 12171          vcolor = addr_to_vcolor(addr);
12183 12172  #endif
12184 12173  
12185 12174          /*
12186 12175           * We must hold the hat lock during the flush of TLB,
12187 12176           * to avoid a race with sfmmu_invalidate_ctx(), where
12188 12177           * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT,
12189 12178           * causing TLB demap routine to skip flush on that MMU.
12190 12179           * If the context on a MMU has already been set to
12191 12180           * INVALID_CONTEXT, we just get an extra flush on
12192 12181           * that MMU.
12193 12182           */
12194 12183          if (!hat_lock_held && !tlb_noflush)
12195 12184                  hatlockp = sfmmu_hat_enter(sfmmup);
12196 12185  
12197 12186          kpreempt_disable();
12198 12187          if (!tlb_noflush) {
12199 12188                  /*
12200 12189                   * Flush the TSB and TLB.
12201 12190                   */
12202 12191                  SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12203 12192  
12204 12193                  cpuset = sfmmup->sfmmu_cpusran;
12205 12194                  CPUSET_AND(cpuset, cpu_ready_set);
12206 12195                  CPUSET_DEL(cpuset, CPU->cpu_id);
12207 12196  
12208 12197                  SFMMU_XCALL_STATS(sfmmup);
12209 12198  
12210 12199                  xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
12211 12200                      (uint64_t)sfmmup);
12212 12201  
12213 12202                  vtag_flushpage(addr, (uint64_t)sfmmup);
12214 12203          }
12215 12204  
12216 12205          if (!hat_lock_held && !tlb_noflush)
12217 12206                  sfmmu_hat_exit(hatlockp);
12218 12207  
12219 12208  #ifdef VAC
12220 12209          /*
12221 12210           * Flush the D$
12222 12211           *
12223 12212           * Even if the ctx is stolen, we need to flush the
12224 12213           * cache. Our ctx stealer only flushes the TLBs.
12225 12214           */
12226 12215          if (cache_flush_flag == CACHE_FLUSH) {
12227 12216                  if (cpu_flag & FLUSH_ALL_CPUS) {
12228 12217                          cpuset = cpu_ready_set;
12229 12218                  } else {
12230 12219                          cpuset = sfmmup->sfmmu_cpusran;
12231 12220                          CPUSET_AND(cpuset, cpu_ready_set);
12232 12221                  }
12233 12222                  CPUSET_DEL(cpuset, CPU->cpu_id);
12234 12223                  SFMMU_XCALL_STATS(sfmmup);
12235 12224                  xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12236 12225                  vac_flushpage(pfnum, vcolor);
12237 12226          }
12238 12227  #endif  /* VAC */
12239 12228          kpreempt_enable();
12240 12229  }
12241 12230  
12242 12231  /*
12243 12232   * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual
12244 12233   * address and ctx.  If noflush is set we do not currently do anything.
12245 12234   * This function may or may not be called with the HAT lock held.
12246 12235   */
12247 12236  static void
12248 12237  sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12249 12238          int tlb_noflush, int hat_lock_held)
12250 12239  {
12251 12240          cpuset_t cpuset;
12252 12241          hatlock_t *hatlockp;
12253 12242  
12254 12243          ASSERT(!hmeblkp->hblk_shared);
12255 12244  
12256 12245          /*
12257 12246           * If the process is exiting we have nothing to do.
12258 12247           */
12259 12248          if (tlb_noflush)
12260 12249                  return;
12261 12250  
12262 12251          /*
12263 12252           * Flush TSB.
12264 12253           */
12265 12254          if (!hat_lock_held)
12266 12255                  hatlockp = sfmmu_hat_enter(sfmmup);
12267 12256          SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12268 12257  
12269 12258          kpreempt_disable();
12270 12259  
12271 12260          cpuset = sfmmup->sfmmu_cpusran;
12272 12261          CPUSET_AND(cpuset, cpu_ready_set);
12273 12262          CPUSET_DEL(cpuset, CPU->cpu_id);
12274 12263  
12275 12264          SFMMU_XCALL_STATS(sfmmup);
12276 12265          xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup);
12277 12266  
12278 12267          vtag_flushpage(addr, (uint64_t)sfmmup);
12279 12268  
12280 12269          if (!hat_lock_held)
12281 12270                  sfmmu_hat_exit(hatlockp);
12282 12271  
12283 12272          kpreempt_enable();
12284 12273  
12285 12274  }
12286 12275  
12287 12276  /*
12288 12277   * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall
12289 12278   * call handler that can flush a range of pages to save on xcalls.
12290 12279   */
12291 12280  static int sfmmu_xcall_save;
12292 12281  
12293 12282  /*
12294 12283   * this routine is never used for demaping addresses backed by SRD hmeblks.
12295 12284   */
12296 12285  static void
12297 12286  sfmmu_tlb_range_demap(demap_range_t *dmrp)
12298 12287  {
12299 12288          sfmmu_t *sfmmup = dmrp->dmr_sfmmup;
12300 12289          hatlock_t *hatlockp;
12301 12290          cpuset_t cpuset;
12302 12291          uint64_t sfmmu_pgcnt;
12303 12292          pgcnt_t pgcnt = 0;
12304 12293          int pgunload = 0;
12305 12294          int dirtypg = 0;
12306 12295          caddr_t addr = dmrp->dmr_addr;
12307 12296          caddr_t eaddr;
12308 12297          uint64_t bitvec = dmrp->dmr_bitvec;
12309 12298  
12310 12299          ASSERT(bitvec & 1);
12311 12300  
12312 12301          /*
12313 12302           * Flush TSB and calculate number of pages to flush.
12314 12303           */
12315 12304          while (bitvec != 0) {
12316 12305                  dirtypg = 0;
12317 12306                  /*
12318 12307                   * Find the first page to flush and then count how many
12319 12308                   * pages there are after it that also need to be flushed.
12320 12309                   * This way the number of TSB flushes is minimized.
12321 12310                   */
12322 12311                  while ((bitvec & 1) == 0) {
12323 12312                          pgcnt++;
12324 12313                          addr += MMU_PAGESIZE;
12325 12314                          bitvec >>= 1;
12326 12315                  }
12327 12316                  while (bitvec & 1) {
12328 12317                          dirtypg++;
12329 12318                          bitvec >>= 1;
12330 12319                  }
12331 12320                  eaddr = addr + ptob(dirtypg);
12332 12321                  hatlockp = sfmmu_hat_enter(sfmmup);
12333 12322                  sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K);
12334 12323                  sfmmu_hat_exit(hatlockp);
12335 12324                  pgunload += dirtypg;
12336 12325                  addr = eaddr;
12337 12326                  pgcnt += dirtypg;
12338 12327          }
12339 12328  
12340 12329          ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr);
12341 12330          if (sfmmup->sfmmu_free == 0) {
12342 12331                  addr = dmrp->dmr_addr;
12343 12332                  bitvec = dmrp->dmr_bitvec;
12344 12333  
12345 12334                  /*
12346 12335                   * make sure it has SFMMU_PGCNT_SHIFT bits only,
12347 12336                   * as it will be used to pack argument for xt_some
12348 12337                   */
12349 12338                  ASSERT((pgcnt > 0) &&
12350 12339                      (pgcnt <= (1 << SFMMU_PGCNT_SHIFT)));
12351 12340  
12352 12341                  /*
12353 12342                   * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in
12354 12343                   * the low 6 bits of sfmmup. This is doable since pgcnt
12355 12344                   * always >= 1.
12356 12345                   */
12357 12346                  ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK));
12358 12347                  sfmmu_pgcnt = (uint64_t)sfmmup |
12359 12348                      ((pgcnt - 1) & SFMMU_PGCNT_MASK);
12360 12349  
12361 12350                  /*
12362 12351                   * We must hold the hat lock during the flush of TLB,
12363 12352                   * to avoid a race with sfmmu_invalidate_ctx(), where
12364 12353                   * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT,
12365 12354                   * causing TLB demap routine to skip flush on that MMU.
12366 12355                   * If the context on a MMU has already been set to
12367 12356                   * INVALID_CONTEXT, we just get an extra flush on
12368 12357                   * that MMU.
12369 12358                   */
12370 12359                  hatlockp = sfmmu_hat_enter(sfmmup);
12371 12360                  kpreempt_disable();
12372 12361  
12373 12362                  cpuset = sfmmup->sfmmu_cpusran;
12374 12363                  CPUSET_AND(cpuset, cpu_ready_set);
12375 12364                  CPUSET_DEL(cpuset, CPU->cpu_id);
12376 12365  
12377 12366                  SFMMU_XCALL_STATS(sfmmup);
12378 12367                  xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr,
12379 12368                      sfmmu_pgcnt);
12380 12369  
12381 12370                  for (; bitvec != 0; bitvec >>= 1) {
12382 12371                          if (bitvec & 1)
12383 12372                                  vtag_flushpage(addr, (uint64_t)sfmmup);
12384 12373                          addr += MMU_PAGESIZE;
12385 12374                  }
12386 12375                  kpreempt_enable();
12387 12376                  sfmmu_hat_exit(hatlockp);
12388 12377  
12389 12378                  sfmmu_xcall_save += (pgunload-1);
12390 12379          }
12391 12380          dmrp->dmr_bitvec = 0;
12392 12381  }
12393 12382  
12394 12383  /*
12395 12384   * In cases where we need to synchronize with TLB/TSB miss trap
12396 12385   * handlers, _and_ need to flush the TLB, it's a lot easier to
12397 12386   * throw away the context from the process than to do a
12398 12387   * special song and dance to keep things consistent for the
12399 12388   * handlers.
12400 12389   *
12401 12390   * Since the process suddenly ends up without a context and our caller
12402 12391   * holds the hat lock, threads that fault after this function is called
12403 12392   * will pile up on the lock.  We can then do whatever we need to
12404 12393   * atomically from the context of the caller.  The first blocked thread
12405 12394   * to resume executing will get the process a new context, and the
12406 12395   * process will resume executing.
12407 12396   *
12408 12397   * One added advantage of this approach is that on MMUs that
12409 12398   * support a "flush all" operation, we will delay the flush until
12410 12399   * cnum wrap-around, and then flush the TLB one time.  This
12411 12400   * is rather rare, so it's a lot less expensive than making 8000
12412 12401   * x-calls to flush the TLB 8000 times.
12413 12402   *
12414 12403   * A per-process (PP) lock is used to synchronize ctx allocations in
12415 12404   * resume() and ctx invalidations here.
12416 12405   */
12417 12406  static void
12418 12407  sfmmu_invalidate_ctx(sfmmu_t *sfmmup)
12419 12408  {
12420 12409          cpuset_t cpuset;
12421 12410          int cnum, currcnum;
12422 12411          mmu_ctx_t *mmu_ctxp;
12423 12412          int i;
12424 12413          uint_t pstate_save;
12425 12414  
12426 12415          SFMMU_STAT(sf_ctx_inv);
12427 12416  
12428 12417          ASSERT(sfmmu_hat_lock_held(sfmmup));
12429 12418          ASSERT(sfmmup != ksfmmup);
12430 12419  
12431 12420          kpreempt_disable();
12432 12421  
12433 12422          mmu_ctxp = CPU_MMU_CTXP(CPU);
12434 12423          ASSERT(mmu_ctxp);
12435 12424          ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms);
12436 12425          ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
12437 12426  
12438 12427          currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum;
12439 12428  
12440 12429          pstate_save = sfmmu_disable_intrs();
12441 12430  
12442 12431          lock_set(&sfmmup->sfmmu_ctx_lock);      /* acquire PP lock */
12443 12432          /* set HAT cnum invalid across all context domains. */
12444 12433          for (i = 0; i < max_mmu_ctxdoms; i++) {
12445 12434  
12446 12435                  cnum =  sfmmup->sfmmu_ctxs[i].cnum;
12447 12436                  if (cnum == INVALID_CONTEXT) {
12448 12437                          continue;
12449 12438                  }
12450 12439  
12451 12440                  sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT;
12452 12441          }
12453 12442          membar_enter(); /* make sure globally visible to all CPUs */
12454 12443          lock_clear(&sfmmup->sfmmu_ctx_lock);    /* release PP lock */
12455 12444  
12456 12445          sfmmu_enable_intrs(pstate_save);
12457 12446  
12458 12447          cpuset = sfmmup->sfmmu_cpusran;
12459 12448          CPUSET_DEL(cpuset, CPU->cpu_id);
12460 12449          CPUSET_AND(cpuset, cpu_ready_set);
12461 12450          if (!CPUSET_ISNULL(cpuset)) {
12462 12451                  SFMMU_XCALL_STATS(sfmmup);
12463 12452                  xt_some(cpuset, sfmmu_raise_tsb_exception,
12464 12453                      (uint64_t)sfmmup, INVALID_CONTEXT);
12465 12454                  xt_sync(cpuset);
12466 12455                  SFMMU_STAT(sf_tsb_raise_exception);
12467 12456                  SFMMU_MMU_STAT(mmu_tsb_raise_exception);
12468 12457          }
12469 12458  
12470 12459          /*
12471 12460           * If the hat to-be-invalidated is the same as the current
12472 12461           * process on local CPU we need to invalidate
12473 12462           * this CPU context as well.
12474 12463           */
12475 12464          if ((sfmmu_getctx_sec() == currcnum) &&
12476 12465              (currcnum != INVALID_CONTEXT)) {
12477 12466                  /* sets shared context to INVALID too */
12478 12467                  sfmmu_setctx_sec(INVALID_CONTEXT);
12479 12468                  sfmmu_clear_utsbinfo();
12480 12469          }
12481 12470  
12482 12471          SFMMU_FLAGS_SET(sfmmup, HAT_ALLCTX_INVALID);
12483 12472  
12484 12473          kpreempt_enable();
12485 12474  
12486 12475          /*
12487 12476           * we hold the hat lock, so nobody should allocate a context
12488 12477           * for us yet
12489 12478           */
12490 12479          ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT);
12491 12480  }
12492 12481  
12493 12482  #ifdef VAC
12494 12483  /*
12495 12484   * We need to flush the cache in all cpus.  It is possible that
12496 12485   * a process referenced a page as cacheable but has sinced exited
12497 12486   * and cleared the mapping list.  We still to flush it but have no
12498 12487   * state so all cpus is the only alternative.
12499 12488   */
12500 12489  void
12501 12490  sfmmu_cache_flush(pfn_t pfnum, int vcolor)
12502 12491  {
12503 12492          cpuset_t cpuset;
12504 12493  
12505 12494          kpreempt_disable();
12506 12495          cpuset = cpu_ready_set;
12507 12496          CPUSET_DEL(cpuset, CPU->cpu_id);
12508 12497          SFMMU_XCALL_STATS(NULL);        /* account to any ctx */
12509 12498          xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12510 12499          xt_sync(cpuset);
12511 12500          vac_flushpage(pfnum, vcolor);
12512 12501          kpreempt_enable();
12513 12502  }
12514 12503  
12515 12504  void
12516 12505  sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum)
12517 12506  {
12518 12507          cpuset_t cpuset;
12519 12508  
12520 12509          ASSERT(vcolor >= 0);
12521 12510  
12522 12511          kpreempt_disable();
12523 12512          cpuset = cpu_ready_set;
12524 12513          CPUSET_DEL(cpuset, CPU->cpu_id);
12525 12514          SFMMU_XCALL_STATS(NULL);        /* account to any ctx */
12526 12515          xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum);
12527 12516          xt_sync(cpuset);
12528 12517          vac_flushcolor(vcolor, pfnum);
12529 12518          kpreempt_enable();
12530 12519  }
12531 12520  #endif  /* VAC */
12532 12521  
12533 12522  /*
12534 12523   * We need to prevent processes from accessing the TSB using a cached physical
12535 12524   * address.  It's alright if they try to access the TSB via virtual address
12536 12525   * since they will just fault on that virtual address once the mapping has
12537 12526   * been suspended.
12538 12527   */
12539 12528  #pragma weak sendmondo_in_recover
12540 12529  
12541 12530  /* ARGSUSED */
12542 12531  static int
12543 12532  sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo)
12544 12533  {
12545 12534          struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
12546 12535          sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu;
12547 12536          hatlock_t *hatlockp;
12548 12537          sf_scd_t *scdp;
12549 12538  
12550 12539          if (flags != HAT_PRESUSPEND)
12551 12540                  return (0);
12552 12541  
12553 12542          /*
12554 12543           * If tsb is a shared TSB with TSB_SHAREDCTX set, sfmmup must
12555 12544           * be a shared hat, then set SCD's tsbinfo's flag.
12556 12545           * If tsb is not shared, sfmmup is a private hat, then set
12557 12546           * its private tsbinfo's flag.
12558 12547           */
12559 12548          hatlockp = sfmmu_hat_enter(sfmmup);
12560 12549          tsbinfop->tsb_flags |= TSB_RELOC_FLAG;
12561 12550  
12562 12551          if (!(tsbinfop->tsb_flags & TSB_SHAREDCTX)) {
12563 12552                  sfmmu_tsb_inv_ctx(sfmmup);
12564 12553                  sfmmu_hat_exit(hatlockp);
12565 12554          } else {
12566 12555                  /* release lock on the shared hat */
12567 12556                  sfmmu_hat_exit(hatlockp);
12568 12557                  /* sfmmup is a shared hat */
12569 12558                  ASSERT(sfmmup->sfmmu_scdhat);
12570 12559                  scdp = sfmmup->sfmmu_scdp;
12571 12560                  ASSERT(scdp != NULL);
12572 12561                  /* get private hat from the scd list */
12573 12562                  mutex_enter(&scdp->scd_mutex);
12574 12563                  sfmmup = scdp->scd_sf_list;
12575 12564                  while (sfmmup != NULL) {
12576 12565                          hatlockp = sfmmu_hat_enter(sfmmup);
12577 12566                          /*
12578 12567                           * We do not call sfmmu_tsb_inv_ctx here because
12579 12568                           * sendmondo_in_recover check is only needed for
12580 12569                           * sun4u.
12581 12570                           */
12582 12571                          sfmmu_invalidate_ctx(sfmmup);
12583 12572                          sfmmu_hat_exit(hatlockp);
12584 12573                          sfmmup = sfmmup->sfmmu_scd_link.next;
12585 12574  
12586 12575                  }
12587 12576                  mutex_exit(&scdp->scd_mutex);
12588 12577          }
12589 12578          return (0);
12590 12579  }
12591 12580  
12592 12581  static void
12593 12582  sfmmu_tsb_inv_ctx(sfmmu_t *sfmmup)
12594 12583  {
12595 12584          extern uint32_t sendmondo_in_recover;
12596 12585  
12597 12586          ASSERT(sfmmu_hat_lock_held(sfmmup));
12598 12587  
12599 12588          /*
12600 12589           * For Cheetah+ Erratum 25:
12601 12590           * Wait for any active recovery to finish.  We can't risk
12602 12591           * relocating the TSB of the thread running mondo_recover_proc()
12603 12592           * since, if we did that, we would deadlock.  The scenario we are
12604 12593           * trying to avoid is as follows:
12605 12594           *
12606 12595           * THIS CPU                     RECOVER CPU
12607 12596           * --------                     -----------
12608 12597           *                              Begins recovery, walking through TSB
12609 12598           * hat_pagesuspend() TSB TTE
12610 12599           *                              TLB miss on TSB TTE, spins at TL1
12611 12600           * xt_sync()
12612 12601           *      send_mondo_timeout()
12613 12602           *      mondo_recover_proc()
12614 12603           *      ((deadlocked))
12615 12604           *
12616 12605           * The second half of the workaround is that mondo_recover_proc()
12617 12606           * checks to see if the tsb_info has the RELOC flag set, and if it
12618 12607           * does, it skips over that TSB without ever touching tsbinfop->tsb_va
12619 12608           * and hence avoiding the TLB miss that could result in a deadlock.
12620 12609           */
12621 12610          if (&sendmondo_in_recover) {
12622 12611                  membar_enter(); /* make sure RELOC flag visible */
12623 12612                  while (sendmondo_in_recover) {
12624 12613                          drv_usecwait(1);
12625 12614                          membar_consumer();
12626 12615                  }
12627 12616          }
12628 12617  
12629 12618          sfmmu_invalidate_ctx(sfmmup);
12630 12619  }
12631 12620  
12632 12621  /* ARGSUSED */
12633 12622  static int
12634 12623  sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags,
12635 12624          void *tsbinfo, pfn_t newpfn)
12636 12625  {
12637 12626          hatlock_t *hatlockp;
12638 12627          struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
12639 12628          sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu;
12640 12629  
12641 12630          if (flags != HAT_POSTUNSUSPEND)
12642 12631                  return (0);
12643 12632  
12644 12633          hatlockp = sfmmu_hat_enter(sfmmup);
12645 12634  
12646 12635          SFMMU_STAT(sf_tsb_reloc);
12647 12636  
12648 12637          /*
12649 12638           * The process may have swapped out while we were relocating one
12650 12639           * of its TSBs.  If so, don't bother doing the setup since the
12651 12640           * process can't be using the memory anymore.
12652 12641           */
12653 12642          if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) {
12654 12643                  ASSERT(va == tsbinfop->tsb_va);
12655 12644                  sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn);
12656 12645  
12657 12646                  if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) {
12658 12647                          sfmmu_inv_tsb(tsbinfop->tsb_va,
12659 12648                              TSB_BYTES(tsbinfop->tsb_szc));
12660 12649                          tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED;
12661 12650                  }
12662 12651          }
12663 12652  
12664 12653          membar_exit();
12665 12654          tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG;
12666 12655          cv_broadcast(&sfmmup->sfmmu_tsb_cv);
12667 12656  
12668 12657          sfmmu_hat_exit(hatlockp);
12669 12658  
12670 12659          return (0);
12671 12660  }
12672 12661  
12673 12662  /*
12674 12663   * Allocate and initialize a tsb_info structure.  Note that we may or may not
12675 12664   * allocate a TSB here, depending on the flags passed in.
12676 12665   */
12677 12666  static int
12678 12667  sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask,
12679 12668          uint_t flags, sfmmu_t *sfmmup)
12680 12669  {
12681 12670          int err;
12682 12671  
12683 12672          *tsbinfopp = (struct tsb_info *)kmem_cache_alloc(
12684 12673              sfmmu_tsbinfo_cache, KM_SLEEP);
12685 12674  
12686 12675          if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask,
12687 12676              tsb_szc, flags, sfmmup)) != 0) {
12688 12677                  kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp);
12689 12678                  SFMMU_STAT(sf_tsb_allocfail);
12690 12679                  *tsbinfopp = NULL;
12691 12680                  return (err);
12692 12681          }
12693 12682          SFMMU_STAT(sf_tsb_alloc);
12694 12683  
12695 12684          /*
12696 12685           * Bump the TSB size counters for this TSB size.
12697 12686           */
12698 12687          (*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++;
12699 12688          return (0);
12700 12689  }
12701 12690  
12702 12691  static void
12703 12692  sfmmu_tsb_free(struct tsb_info *tsbinfo)
12704 12693  {
12705 12694          caddr_t tsbva = tsbinfo->tsb_va;
12706 12695          uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc);
12707 12696          struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache;
12708 12697          vmem_t  *vmp = tsbinfo->tsb_vmp;
12709 12698  
12710 12699          /*
12711 12700           * If we allocated this TSB from relocatable kernel memory, then we
12712 12701           * need to uninstall the callback handler.
12713 12702           */
12714 12703          if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) {
12715 12704                  uintptr_t slab_mask;
12716 12705                  caddr_t slab_vaddr;
12717 12706                  page_t **ppl;
12718 12707                  int ret;
12719 12708  
12720 12709                  ASSERT(tsb_size <= MMU_PAGESIZE4M || use_bigtsb_arena);
12721 12710                  if (tsb_size > MMU_PAGESIZE4M)
12722 12711                          slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT;
12723 12712                  else
12724 12713                          slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT;
12725 12714                  slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask);
12726 12715  
12727 12716                  ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE);
12728 12717                  ASSERT(ret == 0);
12729 12718                  hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo,
12730 12719                      0, NULL);
12731 12720                  as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE);
12732 12721          }
12733 12722  
12734 12723          if (kmem_cachep != NULL) {
12735 12724                  kmem_cache_free(kmem_cachep, tsbva);
12736 12725          } else {
12737 12726                  vmem_xfree(vmp, (void *)tsbva, tsb_size);
12738 12727          }
12739 12728          tsbinfo->tsb_va = (caddr_t)0xbad00bad;
12740 12729          atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size);
12741 12730  }
12742 12731  
12743 12732  static void
12744 12733  sfmmu_tsbinfo_free(struct tsb_info *tsbinfo)
12745 12734  {
12746 12735          if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) {
12747 12736                  sfmmu_tsb_free(tsbinfo);
12748 12737          }
12749 12738          kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo);
12750 12739  
12751 12740  }
12752 12741  
12753 12742  /*
12754 12743   * Setup all the references to physical memory for this tsbinfo.
12755 12744   * The underlying page(s) must be locked.
12756 12745   */
12757 12746  static void
12758 12747  sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn)
12759 12748  {
12760 12749          ASSERT(pfn != PFN_INVALID);
12761 12750          ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va));
12762 12751  
12763 12752  #ifndef sun4v
12764 12753          if (tsbinfo->tsb_szc == 0) {
12765 12754                  sfmmu_memtte(&tsbinfo->tsb_tte, pfn,
12766 12755                      PROT_WRITE|PROT_READ, TTE8K);
12767 12756          } else {
12768 12757                  /*
12769 12758                   * Round down PA and use a large mapping; the handlers will
12770 12759                   * compute the TSB pointer at the correct offset into the
12771 12760                   * big virtual page.  NOTE: this assumes all TSBs larger
12772 12761                   * than 8K must come from physically contiguous slabs of
12773 12762                   * size tsb_slab_size.
12774 12763                   */
12775 12764                  sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask,
12776 12765                      PROT_WRITE|PROT_READ, tsb_slab_ttesz);
12777 12766          }
12778 12767          tsbinfo->tsb_pa = ptob(pfn);
12779 12768  
12780 12769          TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */
12781 12770          TTE_SET_MOD(&tsbinfo->tsb_tte);    /* enable writes */
12782 12771  
12783 12772          ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte));
12784 12773          ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte));
12785 12774  #else /* sun4v */
12786 12775          tsbinfo->tsb_pa = ptob(pfn);
12787 12776  #endif /* sun4v */
12788 12777  }
12789 12778  
12790 12779  
12791 12780  /*
12792 12781   * Returns zero on success, ENOMEM if over the high water mark,
12793 12782   * or EAGAIN if the caller needs to retry with a smaller TSB
12794 12783   * size (or specify TSB_FORCEALLOC if the allocation can't fail).
12795 12784   *
12796 12785   * This call cannot fail to allocate a TSB if TSB_FORCEALLOC
12797 12786   * is specified and the TSB requested is PAGESIZE, though it
12798 12787   * may sleep waiting for memory if sufficient memory is not
12799 12788   * available.
12800 12789   */
12801 12790  static int
12802 12791  sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask,
12803 12792      int tsbcode, uint_t flags, sfmmu_t *sfmmup)
12804 12793  {
12805 12794          caddr_t vaddr = NULL;
12806 12795          caddr_t slab_vaddr;
12807 12796          uintptr_t slab_mask;
12808 12797          int tsbbytes = TSB_BYTES(tsbcode);
12809 12798          int lowmem = 0;
12810 12799          struct kmem_cache *kmem_cachep = NULL;
12811 12800          vmem_t *vmp = NULL;
12812 12801          lgrp_id_t lgrpid = LGRP_NONE;
12813 12802          pfn_t pfn;
12814 12803          uint_t cbflags = HAC_SLEEP;
12815 12804          page_t **pplist;
12816 12805          int ret;
12817 12806  
12818 12807          ASSERT(tsbbytes <= MMU_PAGESIZE4M || use_bigtsb_arena);
12819 12808          if (tsbbytes > MMU_PAGESIZE4M)
12820 12809                  slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT;
12821 12810          else
12822 12811                  slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT;
12823 12812  
12824 12813          if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK))
12825 12814                  flags |= TSB_ALLOC;
12826 12815  
12827 12816          ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE);
12828 12817  
12829 12818          tsbinfo->tsb_sfmmu = sfmmup;
12830 12819  
12831 12820          /*
12832 12821           * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and
12833 12822           * return.
12834 12823           */
12835 12824          if ((flags & TSB_ALLOC) == 0) {
12836 12825                  tsbinfo->tsb_szc = tsbcode;
12837 12826                  tsbinfo->tsb_ttesz_mask = tteszmask;
12838 12827                  tsbinfo->tsb_va = (caddr_t)0xbadbadbeef;
12839 12828                  tsbinfo->tsb_pa = -1;
12840 12829                  tsbinfo->tsb_tte.ll = 0;
12841 12830                  tsbinfo->tsb_next = NULL;
12842 12831                  tsbinfo->tsb_flags = TSB_SWAPPED;
12843 12832                  tsbinfo->tsb_cache = NULL;
12844 12833                  tsbinfo->tsb_vmp = NULL;
12845 12834                  return (0);
12846 12835          }
12847 12836  
12848 12837  #ifdef DEBUG
12849 12838          /*
12850 12839           * For debugging:
12851 12840           * Randomly force allocation failures every tsb_alloc_mtbf
12852 12841           * tries if TSB_FORCEALLOC is not specified.  This will
12853 12842           * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if
12854 12843           * it is even, to allow testing of both failure paths...
12855 12844           */
12856 12845          if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) &&
12857 12846              (tsb_alloc_count++ == tsb_alloc_mtbf)) {
12858 12847                  tsb_alloc_count = 0;
12859 12848                  tsb_alloc_fail_mtbf++;
12860 12849                  return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN);
12861 12850          }
12862 12851  #endif  /* DEBUG */
12863 12852  
12864 12853          /*
12865 12854           * Enforce high water mark if we are not doing a forced allocation
12866 12855           * and are not shrinking a process' TSB.
12867 12856           */
12868 12857          if ((flags & TSB_SHRINK) == 0 &&
12869 12858              (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) {
12870 12859                  if ((flags & TSB_FORCEALLOC) == 0)
12871 12860                          return (ENOMEM);
12872 12861                  lowmem = 1;
12873 12862          }
12874 12863  
12875 12864          /*
12876 12865           * Allocate from the correct location based upon the size of the TSB
12877 12866           * compared to the base page size, and what memory conditions dictate.
12878 12867           * Note we always do nonblocking allocations from the TSB arena since
12879 12868           * we don't want memory fragmentation to cause processes to block
12880 12869           * indefinitely waiting for memory; until the kernel algorithms that
12881 12870           * coalesce large pages are improved this is our best option.
12882 12871           *
12883 12872           * Algorithm:
12884 12873           *      If allocating a "large" TSB (>8K), allocate from the
12885 12874           *              appropriate kmem_tsb_default_arena vmem arena
12886 12875           *      else if low on memory or the TSB_FORCEALLOC flag is set or
12887 12876           *      tsb_forceheap is set
12888 12877           *              Allocate from kernel heap via sfmmu_tsb8k_cache with
12889 12878           *              KM_SLEEP (never fails)
12890 12879           *      else
12891 12880           *              Allocate from appropriate sfmmu_tsb_cache with
12892 12881           *              KM_NOSLEEP
12893 12882           *      endif
12894 12883           */
12895 12884          if (tsb_lgrp_affinity)
12896 12885                  lgrpid = lgrp_home_id(curthread);
12897 12886          if (lgrpid == LGRP_NONE)
12898 12887                  lgrpid = 0;     /* use lgrp of boot CPU */
12899 12888  
12900 12889          if (tsbbytes > MMU_PAGESIZE) {
12901 12890                  if (tsbbytes > MMU_PAGESIZE4M) {
12902 12891                          vmp = kmem_bigtsb_default_arena[lgrpid];
12903 12892                          vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes,
12904 12893                              0, 0, NULL, NULL, VM_NOSLEEP);
12905 12894                  } else {
12906 12895                          vmp = kmem_tsb_default_arena[lgrpid];
12907 12896                          vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes,
12908 12897                              0, 0, NULL, NULL, VM_NOSLEEP);
12909 12898                  }
12910 12899  #ifdef  DEBUG
12911 12900          } else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) {
12912 12901  #else   /* !DEBUG */
12913 12902          } else if (lowmem || (flags & TSB_FORCEALLOC)) {
12914 12903  #endif  /* DEBUG */
12915 12904                  kmem_cachep = sfmmu_tsb8k_cache;
12916 12905                  vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP);
12917 12906                  ASSERT(vaddr != NULL);
12918 12907          } else {
12919 12908                  kmem_cachep = sfmmu_tsb_cache[lgrpid];
12920 12909                  vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP);
12921 12910          }
12922 12911  
12923 12912          tsbinfo->tsb_cache = kmem_cachep;
12924 12913          tsbinfo->tsb_vmp = vmp;
12925 12914  
12926 12915          if (vaddr == NULL) {
12927 12916                  return (EAGAIN);
12928 12917          }
12929 12918  
12930 12919          atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes);
12931 12920          kmem_cachep = tsbinfo->tsb_cache;
12932 12921  
12933 12922          /*
12934 12923           * If we are allocating from outside the cage, then we need to
12935 12924           * register a relocation callback handler.  Note that for now
12936 12925           * since pseudo mappings always hang off of the slab's root page,
12937 12926           * we need only lock the first 8K of the TSB slab.  This is a bit
12938 12927           * hacky but it is good for performance.
12939 12928           */
12940 12929          if (kmem_cachep != sfmmu_tsb8k_cache) {
12941 12930                  slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask);
12942 12931                  ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE);
12943 12932                  ASSERT(ret == 0);
12944 12933                  ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes,
12945 12934                      cbflags, (void *)tsbinfo, &pfn, NULL);
12946 12935  
12947 12936                  /*
12948 12937                   * Need to free up resources if we could not successfully
12949 12938                   * add the callback function and return an error condition.
12950 12939                   */
12951 12940                  if (ret != 0) {
12952 12941                          if (kmem_cachep) {
12953 12942                                  kmem_cache_free(kmem_cachep, vaddr);
12954 12943                          } else {
12955 12944                                  vmem_xfree(vmp, (void *)vaddr, tsbbytes);
12956 12945                          }
12957 12946                          as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE,
12958 12947                              S_WRITE);
12959 12948                          return (EAGAIN);
12960 12949                  }
12961 12950          } else {
12962 12951                  /*
12963 12952                   * Since allocation of 8K TSBs from heap is rare and occurs
12964 12953                   * during memory pressure we allocate them from permanent
12965 12954                   * memory rather than using callbacks to get the PFN.
12966 12955                   */
12967 12956                  pfn = hat_getpfnum(kas.a_hat, vaddr);
12968 12957          }
12969 12958  
12970 12959          tsbinfo->tsb_va = vaddr;
12971 12960          tsbinfo->tsb_szc = tsbcode;
12972 12961          tsbinfo->tsb_ttesz_mask = tteszmask;
12973 12962          tsbinfo->tsb_next = NULL;
12974 12963          tsbinfo->tsb_flags = 0;
12975 12964  
12976 12965          sfmmu_tsbinfo_setup_phys(tsbinfo, pfn);
12977 12966  
12978 12967          sfmmu_inv_tsb(vaddr, tsbbytes);
12979 12968  
12980 12969          if (kmem_cachep != sfmmu_tsb8k_cache) {
12981 12970                  as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE);
12982 12971          }
12983 12972  
12984 12973          return (0);
12985 12974  }
12986 12975  
12987 12976  /*
12988 12977   * Initialize per cpu tsb and per cpu tsbmiss_area
12989 12978   */
12990 12979  void
12991 12980  sfmmu_init_tsbs(void)
12992 12981  {
12993 12982          int i;
12994 12983          struct tsbmiss  *tsbmissp;
12995 12984          struct kpmtsbm  *kpmtsbmp;
12996 12985  #ifndef sun4v
12997 12986          extern int      dcache_line_mask;
12998 12987  #endif /* sun4v */
12999 12988          extern uint_t   vac_colors;
13000 12989  
13001 12990          /*
13002 12991           * Init. tsb miss area.
13003 12992           */
13004 12993          tsbmissp = tsbmiss_area;
13005 12994  
13006 12995          for (i = 0; i < NCPU; tsbmissp++, i++) {
13007 12996                  /*
13008 12997                   * initialize the tsbmiss area.
13009 12998                   * Do this for all possible CPUs as some may be added
13010 12999                   * while the system is running. There is no cost to this.
13011 13000                   */
13012 13001                  tsbmissp->ksfmmup = ksfmmup;
13013 13002  #ifndef sun4v
13014 13003                  tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask;
13015 13004  #endif /* sun4v */
13016 13005                  tsbmissp->khashstart =
13017 13006                      (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash);
13018 13007                  tsbmissp->uhashstart =
13019 13008                      (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash);
13020 13009                  tsbmissp->khashsz = khmehash_num;
13021 13010                  tsbmissp->uhashsz = uhmehash_num;
13022 13011          }
13023 13012  
13024 13013          sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B',
13025 13014              sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0);
13026 13015  
13027 13016          if (kpm_enable == 0)
13028 13017                  return;
13029 13018  
13030 13019          /* -- Begin KPM specific init -- */
13031 13020  
13032 13021          if (kpm_smallpages) {
13033 13022                  /*
13034 13023                   * If we're using base pagesize pages for seg_kpm
13035 13024                   * mappings, we use the kernel TSB since we can't afford
13036 13025                   * to allocate a second huge TSB for these mappings.
13037 13026                   */
13038 13027                  kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base;
13039 13028                  kpm_tsbsz = ktsb_szcode;
13040 13029                  kpmsm_tsbbase = kpm_tsbbase;
13041 13030                  kpmsm_tsbsz = kpm_tsbsz;
13042 13031          } else {
13043 13032                  /*
13044 13033                   * In VAC conflict case, just put the entries in the
13045 13034                   * kernel 8K indexed TSB for now so we can find them.
13046 13035                   * This could really be changed in the future if we feel
13047 13036                   * the need...
13048 13037                   */
13049 13038                  kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base;
13050 13039                  kpmsm_tsbsz = ktsb_szcode;
13051 13040                  kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base;
13052 13041                  kpm_tsbsz = ktsb4m_szcode;
13053 13042          }
13054 13043  
13055 13044          kpmtsbmp = kpmtsbm_area;
13056 13045          for (i = 0; i < NCPU; kpmtsbmp++, i++) {
13057 13046                  /*
13058 13047                   * Initialize the kpmtsbm area.
13059 13048                   * Do this for all possible CPUs as some may be added
13060 13049                   * while the system is running. There is no cost to this.
13061 13050                   */
13062 13051                  kpmtsbmp->vbase = kpm_vbase;
13063 13052                  kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors;
13064 13053                  kpmtsbmp->sz_shift = kpm_size_shift;
13065 13054                  kpmtsbmp->kpmp_shift = kpmp_shift;
13066 13055                  kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft;
13067 13056                  if (kpm_smallpages == 0) {
13068 13057                          kpmtsbmp->kpmp_table_sz = kpmp_table_sz;
13069 13058                          kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table);
13070 13059                  } else {
13071 13060                          kpmtsbmp->kpmp_table_sz = kpmp_stable_sz;
13072 13061                          kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable);
13073 13062                  }
13074 13063                  kpmtsbmp->msegphashpa = va_to_pa(memseg_phash);
13075 13064                  kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG;
13076 13065  #ifdef  DEBUG
13077 13066                  kpmtsbmp->flags |= (kpm_tsbmtl) ?  KPMTSBM_TLTSBM_FLAG : 0;
13078 13067  #endif  /* DEBUG */
13079 13068                  if (ktsb_phys)
13080 13069                          kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG;
13081 13070          }
13082 13071  
13083 13072          /* -- End KPM specific init -- */
13084 13073  }
13085 13074  
13086 13075  /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */
13087 13076  struct tsb_info ktsb_info[2];
13088 13077  
13089 13078  /*
13090 13079   * Called from hat_kern_setup() to setup the tsb_info for ksfmmup.
13091 13080   */
13092 13081  void
13093 13082  sfmmu_init_ktsbinfo()
13094 13083  {
13095 13084          ASSERT(ksfmmup != NULL);
13096 13085          ASSERT(ksfmmup->sfmmu_tsb == NULL);
13097 13086          /*
13098 13087           * Allocate tsbinfos for kernel and copy in data
13099 13088           * to make debug easier and sun4v setup easier.
13100 13089           */
13101 13090          ktsb_info[0].tsb_sfmmu = ksfmmup;
13102 13091          ktsb_info[0].tsb_szc = ktsb_szcode;
13103 13092          ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K;
13104 13093          ktsb_info[0].tsb_va = ktsb_base;
13105 13094          ktsb_info[0].tsb_pa = ktsb_pbase;
13106 13095          ktsb_info[0].tsb_flags = 0;
13107 13096          ktsb_info[0].tsb_tte.ll = 0;
13108 13097          ktsb_info[0].tsb_cache = NULL;
13109 13098  
13110 13099          ktsb_info[1].tsb_sfmmu = ksfmmup;
13111 13100          ktsb_info[1].tsb_szc = ktsb4m_szcode;
13112 13101          ktsb_info[1].tsb_ttesz_mask = TSB4M;
13113 13102          ktsb_info[1].tsb_va = ktsb4m_base;
13114 13103          ktsb_info[1].tsb_pa = ktsb4m_pbase;
13115 13104          ktsb_info[1].tsb_flags = 0;
13116 13105          ktsb_info[1].tsb_tte.ll = 0;
13117 13106          ktsb_info[1].tsb_cache = NULL;
13118 13107  
13119 13108          /* Link them into ksfmmup. */
13120 13109          ktsb_info[0].tsb_next = &ktsb_info[1];
13121 13110          ktsb_info[1].tsb_next = NULL;
13122 13111          ksfmmup->sfmmu_tsb = &ktsb_info[0];
13123 13112  
13124 13113          sfmmu_setup_tsbinfo(ksfmmup);
13125 13114  }
13126 13115  
13127 13116  /*
13128 13117   * Cache the last value returned from va_to_pa().  If the VA specified
13129 13118   * in the current call to cached_va_to_pa() maps to the same Page (as the
13130 13119   * previous call to cached_va_to_pa()), then compute the PA using
13131 13120   * cached info, else call va_to_pa().
13132 13121   *
13133 13122   * Note: this function is neither MT-safe nor consistent in the presence
13134 13123   * of multiple, interleaved threads.  This function was created to enable
13135 13124   * an optimization used during boot (at a point when there's only one thread
13136 13125   * executing on the "boot CPU", and before startup_vm() has been called).
13137 13126   */
13138 13127  static uint64_t
13139 13128  cached_va_to_pa(void *vaddr)
13140 13129  {
13141 13130          static uint64_t prev_vaddr_base = 0;
13142 13131          static uint64_t prev_pfn = 0;
13143 13132  
13144 13133          if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) {
13145 13134                  return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET));
13146 13135          } else {
13147 13136                  uint64_t pa = va_to_pa(vaddr);
13148 13137  
13149 13138                  if (pa != ((uint64_t)-1)) {
13150 13139                          /*
13151 13140                           * Computed physical address is valid.  Cache its
13152 13141                           * related info for the next cached_va_to_pa() call.
13153 13142                           */
13154 13143                          prev_pfn = pa & MMU_PAGEMASK;
13155 13144                          prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK;
13156 13145                  }
13157 13146  
13158 13147                  return (pa);
13159 13148          }
13160 13149  }
13161 13150  
13162 13151  /*
13163 13152   * Carve up our nucleus hblk region.  We may allocate more hblks than
13164 13153   * asked due to rounding errors but we are guaranteed to have at least
13165 13154   * enough space to allocate the requested number of hblk8's and hblk1's.
13166 13155   */
13167 13156  void
13168 13157  sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1)
13169 13158  {
13170 13159          struct hme_blk *hmeblkp;
13171 13160          size_t hme8blk_sz, hme1blk_sz;
13172 13161          size_t i;
13173 13162          size_t hblk8_bound;
13174 13163          ulong_t j = 0, k = 0;
13175 13164  
13176 13165          ASSERT(addr != NULL && size != 0);
13177 13166  
13178 13167          /* Need to use proper structure alignment */
13179 13168          hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t));
13180 13169          hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t));
13181 13170  
13182 13171          nucleus_hblk8.list = (void *)addr;
13183 13172          nucleus_hblk8.index = 0;
13184 13173  
13185 13174          /*
13186 13175           * Use as much memory as possible for hblk8's since we
13187 13176           * expect all bop_alloc'ed memory to be allocated in 8k chunks.
13188 13177           * We need to hold back enough space for the hblk1's which
13189 13178           * we'll allocate next.
13190 13179           */
13191 13180          hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz;
13192 13181          for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) {
13193 13182                  hmeblkp = (struct hme_blk *)addr;
13194 13183                  addr += hme8blk_sz;
13195 13184                  hmeblkp->hblk_nuc_bit = 1;
13196 13185                  hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
13197 13186          }
13198 13187          nucleus_hblk8.len = j;
13199 13188          ASSERT(j >= nhblk8);
13200 13189          SFMMU_STAT_ADD(sf_hblk8_ncreate, j);
13201 13190  
13202 13191          nucleus_hblk1.list = (void *)addr;
13203 13192          nucleus_hblk1.index = 0;
13204 13193          for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) {
13205 13194                  hmeblkp = (struct hme_blk *)addr;
13206 13195                  addr += hme1blk_sz;
13207 13196                  hmeblkp->hblk_nuc_bit = 1;
13208 13197                  hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
13209 13198          }
13210 13199          ASSERT(k >= nhblk1);
13211 13200          nucleus_hblk1.len = k;
13212 13201          SFMMU_STAT_ADD(sf_hblk1_ncreate, k);
13213 13202  }
13214 13203  
13215 13204  /*
13216 13205   * This function is currently not supported on this platform. For what
13217 13206   * it's supposed to do, see hat.c and hat_srmmu.c
13218 13207   */
13219 13208  /* ARGSUSED */
13220 13209  faultcode_t
13221 13210  hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp,
13222 13211      uint_t flags)
13223 13212  {
13224 13213          return (FC_NOSUPPORT);
13225 13214  }
13226 13215  
13227 13216  /*
13228 13217   * Searchs the mapping list of the page for a mapping of the same size. If not
13229 13218   * found the corresponding bit is cleared in the p_index field. When large
13230 13219   * pages are more prevalent in the system, we can maintain the mapping list
13231 13220   * in order and we don't have to traverse the list each time. Just check the
13232 13221   * next and prev entries, and if both are of different size, we clear the bit.
13233 13222   */
13234 13223  static void
13235 13224  sfmmu_rm_large_mappings(page_t *pp, int ttesz)
13236 13225  {
13237 13226          struct sf_hment *sfhmep;
13238 13227          int     index;
13239 13228          pgcnt_t npgs;
13240 13229  
13241 13230          ASSERT(ttesz > TTE8K);
13242 13231  
13243 13232          ASSERT(sfmmu_mlist_held(pp));
13244 13233  
13245 13234          ASSERT(PP_ISMAPPED_LARGE(pp));
13246 13235  
13247 13236          /*
13248 13237           * Traverse mapping list looking for another mapping of same size.
13249 13238           * since we only want to clear index field if all mappings of
13250 13239           * that size are gone.
13251 13240           */
13252 13241  
13253 13242          for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
13254 13243                  if (IS_PAHME(sfhmep))
13255 13244                          continue;
13256 13245                  if (hme_size(sfhmep) == ttesz) {
13257 13246                          /*
13258 13247                           * another mapping of the same size. don't clear index.
13259 13248                           */
13260 13249                          return;
13261 13250                  }
13262 13251          }
13263 13252  
13264 13253          /*
13265 13254           * Clear the p_index bit for large page.
13266 13255           */
13267 13256          index = PAGESZ_TO_INDEX(ttesz);
13268 13257          npgs = TTEPAGES(ttesz);
13269 13258          while (npgs-- > 0) {
13270 13259                  ASSERT(pp->p_index & index);
13271 13260                  pp->p_index &= ~index;
13272 13261                  pp = PP_PAGENEXT(pp);
13273 13262          }
13274 13263  }
13275 13264  
13276 13265  /*
13277 13266   * return supported features
13278 13267   */
13279 13268  /* ARGSUSED */
13280 13269  int
13281 13270  hat_supported(enum hat_features feature, void *arg)
13282 13271  {
13283 13272          switch (feature) {
13284 13273          case    HAT_SHARED_PT:
13285 13274          case    HAT_DYNAMIC_ISM_UNMAP:
13286 13275          case    HAT_VMODSORT:
13287 13276                  return (1);
13288 13277          case    HAT_SHARED_REGIONS:
13289 13278                  if (shctx_on)
13290 13279                          return (1);
13291 13280                  else
13292 13281                          return (0);
13293 13282          default:
13294 13283                  return (0);
13295 13284          }
13296 13285  }
13297 13286  
13298 13287  void
13299 13288  hat_enter(struct hat *hat)
13300 13289  {
13301 13290          hatlock_t       *hatlockp;
13302 13291  
13303 13292          if (hat != ksfmmup) {
13304 13293                  hatlockp = TSB_HASH(hat);
13305 13294                  mutex_enter(HATLOCK_MUTEXP(hatlockp));
13306 13295          }
13307 13296  }
13308 13297  
13309 13298  void
13310 13299  hat_exit(struct hat *hat)
13311 13300  {
13312 13301          hatlock_t       *hatlockp;
13313 13302  
13314 13303          if (hat != ksfmmup) {
13315 13304                  hatlockp = TSB_HASH(hat);
13316 13305                  mutex_exit(HATLOCK_MUTEXP(hatlockp));
13317 13306          }
13318 13307  }
13319 13308  
13320 13309  /*ARGSUSED*/
13321 13310  void
13322 13311  hat_reserve(struct as *as, caddr_t addr, size_t len)
13323 13312  {
13324 13313  }
13325 13314  
13326 13315  static void
13327 13316  hat_kstat_init(void)
13328 13317  {
13329 13318          kstat_t *ksp;
13330 13319  
13331 13320          ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat",
13332 13321              KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat),
13333 13322              KSTAT_FLAG_VIRTUAL);
13334 13323          if (ksp) {
13335 13324                  ksp->ks_data = (void *) &sfmmu_global_stat;
13336 13325                  kstat_install(ksp);
13337 13326          }
13338 13327          ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat",
13339 13328              KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat),
13340 13329              KSTAT_FLAG_VIRTUAL);
13341 13330          if (ksp) {
13342 13331                  ksp->ks_data = (void *) &sfmmu_tsbsize_stat;
13343 13332                  kstat_install(ksp);
13344 13333          }
13345 13334          ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat",
13346 13335              KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU,
13347 13336              KSTAT_FLAG_WRITABLE);
13348 13337          if (ksp) {
13349 13338                  ksp->ks_update = sfmmu_kstat_percpu_update;
13350 13339                  kstat_install(ksp);
13351 13340          }
13352 13341  }
13353 13342  
13354 13343  /* ARGSUSED */
13355 13344  static int
13356 13345  sfmmu_kstat_percpu_update(kstat_t *ksp, int rw)
13357 13346  {
13358 13347          struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data;
13359 13348          struct tsbmiss *tsbm = tsbmiss_area;
13360 13349          struct kpmtsbm *kpmtsbm = kpmtsbm_area;
13361 13350          int i;
13362 13351  
13363 13352          ASSERT(cpu_kstat);
13364 13353          if (rw == KSTAT_READ) {
13365 13354                  for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) {
13366 13355                          cpu_kstat->sf_itlb_misses = 0;
13367 13356                          cpu_kstat->sf_dtlb_misses = 0;
13368 13357                          cpu_kstat->sf_utsb_misses = tsbm->utsb_misses -
13369 13358                              tsbm->uprot_traps;
13370 13359                          cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses +
13371 13360                              kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps;
13372 13361                          cpu_kstat->sf_tsb_hits = 0;
13373 13362                          cpu_kstat->sf_umod_faults = tsbm->uprot_traps;
13374 13363                          cpu_kstat->sf_kmod_faults = tsbm->kprot_traps;
13375 13364                  }
13376 13365          } else {
13377 13366                  /* KSTAT_WRITE is used to clear stats */
13378 13367                  for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) {
13379 13368                          tsbm->utsb_misses = 0;
13380 13369                          tsbm->ktsb_misses = 0;
13381 13370                          tsbm->uprot_traps = 0;
13382 13371                          tsbm->kprot_traps = 0;
13383 13372                          kpmtsbm->kpm_dtlb_misses = 0;
13384 13373                          kpmtsbm->kpm_tsb_misses = 0;
13385 13374                  }
13386 13375          }
13387 13376          return (0);
13388 13377  }
13389 13378  
13390 13379  #ifdef  DEBUG
13391 13380  
13392 13381  tte_t  *gorig[NCPU], *gcur[NCPU], *gnew[NCPU];
13393 13382  
13394 13383  /*
13395 13384   * A tte checker. *orig_old is the value we read before cas.
13396 13385   *      *cur is the value returned by cas.
13397 13386   *      *new is the desired value when we do the cas.
13398 13387   *
13399 13388   *      *hmeblkp is currently unused.
13400 13389   */
13401 13390  
13402 13391  /* ARGSUSED */
13403 13392  void
13404 13393  chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp)
13405 13394  {
13406 13395          pfn_t i, j, k;
13407 13396          int cpuid = CPU->cpu_id;
13408 13397  
13409 13398          gorig[cpuid] = orig_old;
13410 13399          gcur[cpuid] = cur;
13411 13400          gnew[cpuid] = new;
13412 13401  
13413 13402  #ifdef lint
13414 13403          hmeblkp = hmeblkp;
13415 13404  #endif
13416 13405  
13417 13406          if (TTE_IS_VALID(orig_old)) {
13418 13407                  if (TTE_IS_VALID(cur)) {
13419 13408                          i = TTE_TO_TTEPFN(orig_old);
13420 13409                          j = TTE_TO_TTEPFN(cur);
13421 13410                          k = TTE_TO_TTEPFN(new);
13422 13411                          if (i != j) {
13423 13412                                  /* remap error? */
13424 13413                                  panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j);
13425 13414                          }
13426 13415  
13427 13416                          if (i != k) {
13428 13417                                  /* remap error? */
13429 13418                                  panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k);
13430 13419                          }
13431 13420                  } else {
13432 13421                          if (TTE_IS_VALID(new)) {
13433 13422                                  panic("chk_tte: invalid cur? ");
13434 13423                          }
13435 13424  
13436 13425                          i = TTE_TO_TTEPFN(orig_old);
13437 13426                          k = TTE_TO_TTEPFN(new);
13438 13427                          if (i != k) {
13439 13428                                  panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k);
13440 13429                          }
13441 13430                  }
13442 13431          } else {
13443 13432                  if (TTE_IS_VALID(cur)) {
13444 13433                          j = TTE_TO_TTEPFN(cur);
13445 13434                          if (TTE_IS_VALID(new)) {
13446 13435                                  k = TTE_TO_TTEPFN(new);
13447 13436                                  if (j != k) {
13448 13437                                          panic("chk_tte: bad pfn4, 0x%lx, 0x%lx",
13449 13438                                              j, k);
13450 13439                                  }
13451 13440                          } else {
13452 13441                                  panic("chk_tte: why here?");
13453 13442                          }
13454 13443                  } else {
13455 13444                          if (!TTE_IS_VALID(new)) {
13456 13445                                  panic("chk_tte: why here2 ?");
13457 13446                          }
13458 13447                  }
13459 13448          }
13460 13449  }
13461 13450  
13462 13451  #endif /* DEBUG */
13463 13452  
13464 13453  extern void prefetch_tsbe_read(struct tsbe *);
13465 13454  extern void prefetch_tsbe_write(struct tsbe *);
13466 13455  
13467 13456  
13468 13457  /*
13469 13458   * We want to prefetch 7 cache lines ahead for our read prefetch.  This gives
13470 13459   * us optimal performance on Cheetah+.  You can only have 8 outstanding
13471 13460   * prefetches at any one time, so we opted for 7 read prefetches and 1 write
13472 13461   * prefetch to make the most utilization of the prefetch capability.
13473 13462   */
13474 13463  #define TSBE_PREFETCH_STRIDE (7)
13475 13464  
13476 13465  void
13477 13466  sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo)
13478 13467  {
13479 13468          int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc);
13480 13469          int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc);
13481 13470          int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc);
13482 13471          int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc);
13483 13472          struct tsbe *old;
13484 13473          struct tsbe *new;
13485 13474          struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va;
13486 13475          uint64_t va;
13487 13476          int new_offset;
13488 13477          int i;
13489 13478          int vpshift;
13490 13479          int last_prefetch;
13491 13480  
13492 13481          if (old_bytes == new_bytes) {
13493 13482                  bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes);
13494 13483          } else {
13495 13484  
13496 13485                  /*
13497 13486                   * A TSBE is 16 bytes which means there are four TSBE's per
13498 13487                   * P$ line (64 bytes), thus every 4 TSBE's we prefetch.
13499 13488                   */
13500 13489                  old = (struct tsbe *)old_tsbinfo->tsb_va;
13501 13490                  last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1));
13502 13491                  for (i = 0; i < old_entries; i++, old++) {
13503 13492                          if (((i & (4-1)) == 0) && (i < last_prefetch))
13504 13493                                  prefetch_tsbe_read(old);
13505 13494                          if (!old->tte_tag.tag_invalid) {
13506 13495                                  /*
13507 13496                                   * We have a valid TTE to remap.  Check the
13508 13497                                   * size.  We won't remap 64K or 512K TTEs
13509 13498                                   * because they span more than one TSB entry
13510 13499                                   * and are indexed using an 8K virt. page.
13511 13500                                   * Ditto for 32M and 256M TTEs.
13512 13501                                   */
13513 13502                                  if (TTE_CSZ(&old->tte_data) == TTE64K ||
13514 13503                                      TTE_CSZ(&old->tte_data) == TTE512K)
13515 13504                                          continue;
13516 13505                                  if (mmu_page_sizes == max_mmu_page_sizes) {
13517 13506                                          if (TTE_CSZ(&old->tte_data) == TTE32M ||
13518 13507                                              TTE_CSZ(&old->tte_data) == TTE256M)
13519 13508                                                  continue;
13520 13509                                  }
13521 13510  
13522 13511                                  /* clear the lower 22 bits of the va */
13523 13512                                  va = *(uint64_t *)old << 22;
13524 13513                                  /* turn va into a virtual pfn */
13525 13514                                  va >>= 22 - TSB_START_SIZE;
13526 13515                                  /*
13527 13516                                   * or in bits from the offset in the tsb
13528 13517                                   * to get the real virtual pfn. These
13529 13518                                   * correspond to bits [21:13] in the va
13530 13519                                   */
13531 13520                                  vpshift =
13532 13521                                      TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) &
13533 13522                                      0x1ff;
13534 13523                                  va |= (i << vpshift);
13535 13524                                  va >>= vpshift;
13536 13525                                  new_offset = va & (new_entries - 1);
13537 13526                                  new = new_base + new_offset;
13538 13527                                  prefetch_tsbe_write(new);
13539 13528                                  *new = *old;
13540 13529                          }
13541 13530                  }
13542 13531          }
13543 13532  }
13544 13533  
13545 13534  /*
13546 13535   * unused in sfmmu
13547 13536   */
13548 13537  void
13549 13538  hat_dump(void)
13550 13539  {
13551 13540  }
13552 13541  
13553 13542  /*
13554 13543   * Called when a thread is exiting and we have switched to the kernel address
13555 13544   * space.  Perform the same VM initialization resume() uses when switching
13556 13545   * processes.
13557 13546   *
13558 13547   * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but
13559 13548   * we call it anyway in case the semantics change in the future.
13560 13549   */
13561 13550  /*ARGSUSED*/
13562 13551  void
13563 13552  hat_thread_exit(kthread_t *thd)
13564 13553  {
13565 13554          uint_t pgsz_cnum;
13566 13555          uint_t pstate_save;
13567 13556  
13568 13557          ASSERT(thd->t_procp->p_as == &kas);
13569 13558  
13570 13559          pgsz_cnum = KCONTEXT;
13571 13560  #ifdef sun4u
13572 13561          pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT);
13573 13562  #endif
13574 13563  
13575 13564          /*
13576 13565           * Note that sfmmu_load_mmustate() is currently a no-op for
13577 13566           * kernel threads. We need to disable interrupts here,
13578 13567           * simply because otherwise sfmmu_load_mmustate() would panic
13579 13568           * if the caller does not disable interrupts.
13580 13569           */
13581 13570          pstate_save = sfmmu_disable_intrs();
13582 13571  
13583 13572          /* Compatibility Note: hw takes care of MMU_SCONTEXT1 */
13584 13573          sfmmu_setctx_sec(pgsz_cnum);
13585 13574          sfmmu_load_mmustate(ksfmmup);
13586 13575          sfmmu_enable_intrs(pstate_save);
13587 13576  }
13588 13577  
13589 13578  
13590 13579  /*
13591 13580   * SRD support
13592 13581   */
13593 13582  #define SRD_HASH_FUNCTION(vp)   (((((uintptr_t)(vp)) >> 4) ^ \
13594 13583                                      (((uintptr_t)(vp)) >> 11)) & \
13595 13584                                      srd_hashmask)
13596 13585  
13597 13586  /*
13598 13587   * Attach the process to the srd struct associated with the exec vnode
13599 13588   * from which the process is started.
13600 13589   */
13601 13590  void
13602 13591  hat_join_srd(struct hat *sfmmup, vnode_t *evp)
13603 13592  {
13604 13593          uint_t hash = SRD_HASH_FUNCTION(evp);
13605 13594          sf_srd_t *srdp;
13606 13595          sf_srd_t *newsrdp;
13607 13596  
13608 13597          ASSERT(sfmmup != ksfmmup);
13609 13598          ASSERT(sfmmup->sfmmu_srdp == NULL);
13610 13599  
13611 13600          if (!shctx_on) {
13612 13601                  return;
13613 13602          }
13614 13603  
13615 13604          VN_HOLD(evp);
13616 13605  
13617 13606          if (srd_buckets[hash].srdb_srdp != NULL) {
13618 13607                  mutex_enter(&srd_buckets[hash].srdb_lock);
13619 13608                  for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL;
13620 13609                      srdp = srdp->srd_hash) {
13621 13610                          if (srdp->srd_evp == evp) {
13622 13611                                  ASSERT(srdp->srd_refcnt >= 0);
13623 13612                                  sfmmup->sfmmu_srdp = srdp;
13624 13613                                  atomic_inc_32(
13625 13614                                      (volatile uint_t *)&srdp->srd_refcnt);
13626 13615                                  mutex_exit(&srd_buckets[hash].srdb_lock);
13627 13616                                  return;
13628 13617                          }
13629 13618                  }
13630 13619                  mutex_exit(&srd_buckets[hash].srdb_lock);
13631 13620          }
13632 13621          newsrdp = kmem_cache_alloc(srd_cache, KM_SLEEP);
13633 13622          ASSERT(newsrdp->srd_next_ismrid == 0 && newsrdp->srd_next_hmerid == 0);
13634 13623  
13635 13624          newsrdp->srd_evp = evp;
13636 13625          newsrdp->srd_refcnt = 1;
13637 13626          newsrdp->srd_hmergnfree = NULL;
13638 13627          newsrdp->srd_ismrgnfree = NULL;
13639 13628  
13640 13629          mutex_enter(&srd_buckets[hash].srdb_lock);
13641 13630          for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL;
13642 13631              srdp = srdp->srd_hash) {
13643 13632                  if (srdp->srd_evp == evp) {
13644 13633                          ASSERT(srdp->srd_refcnt >= 0);
13645 13634                          sfmmup->sfmmu_srdp = srdp;
13646 13635                          atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt);
13647 13636                          mutex_exit(&srd_buckets[hash].srdb_lock);
13648 13637                          kmem_cache_free(srd_cache, newsrdp);
13649 13638                          return;
13650 13639                  }
13651 13640          }
13652 13641          newsrdp->srd_hash = srd_buckets[hash].srdb_srdp;
13653 13642          srd_buckets[hash].srdb_srdp = newsrdp;
13654 13643          sfmmup->sfmmu_srdp = newsrdp;
13655 13644  
13656 13645          mutex_exit(&srd_buckets[hash].srdb_lock);
13657 13646  
13658 13647  }
13659 13648  
13660 13649  static void
13661 13650  sfmmu_leave_srd(sfmmu_t *sfmmup)
13662 13651  {
13663 13652          vnode_t *evp;
13664 13653          sf_srd_t *srdp = sfmmup->sfmmu_srdp;
13665 13654          uint_t hash;
13666 13655          sf_srd_t **prev_srdpp;
13667 13656          sf_region_t *rgnp;
13668 13657          sf_region_t *nrgnp;
13669 13658  #ifdef DEBUG
13670 13659          int rgns = 0;
13671 13660  #endif
13672 13661          int i;
13673 13662  
13674 13663          ASSERT(sfmmup != ksfmmup);
13675 13664          ASSERT(srdp != NULL);
13676 13665          ASSERT(srdp->srd_refcnt > 0);
13677 13666          ASSERT(sfmmup->sfmmu_scdp == NULL);
13678 13667          ASSERT(sfmmup->sfmmu_free == 1);
13679 13668  
13680 13669          sfmmup->sfmmu_srdp = NULL;
13681 13670          evp = srdp->srd_evp;
13682 13671          ASSERT(evp != NULL);
13683 13672          if (atomic_dec_32_nv((volatile uint_t *)&srdp->srd_refcnt)) {
13684 13673                  VN_RELE(evp);
13685 13674                  return;
13686 13675          }
13687 13676  
13688 13677          hash = SRD_HASH_FUNCTION(evp);
13689 13678          mutex_enter(&srd_buckets[hash].srdb_lock);
13690 13679          for (prev_srdpp = &srd_buckets[hash].srdb_srdp;
13691 13680              (srdp = *prev_srdpp) != NULL; prev_srdpp = &srdp->srd_hash) {
13692 13681                  if (srdp->srd_evp == evp) {
13693 13682                          break;
13694 13683                  }
13695 13684          }
13696 13685          if (srdp == NULL || srdp->srd_refcnt) {
13697 13686                  mutex_exit(&srd_buckets[hash].srdb_lock);
13698 13687                  VN_RELE(evp);
13699 13688                  return;
13700 13689          }
13701 13690          *prev_srdpp = srdp->srd_hash;
13702 13691          mutex_exit(&srd_buckets[hash].srdb_lock);
13703 13692  
13704 13693          ASSERT(srdp->srd_refcnt == 0);
13705 13694          VN_RELE(evp);
13706 13695  
13707 13696  #ifdef DEBUG
13708 13697          for (i = 0; i < SFMMU_MAX_REGION_BUCKETS; i++) {
13709 13698                  ASSERT(srdp->srd_rgnhash[i] == NULL);
13710 13699          }
13711 13700  #endif /* DEBUG */
13712 13701  
13713 13702          /* free each hme regions in the srd */
13714 13703          for (rgnp = srdp->srd_hmergnfree; rgnp != NULL; rgnp = nrgnp) {
13715 13704                  nrgnp = rgnp->rgn_next;
13716 13705                  ASSERT(rgnp->rgn_id < srdp->srd_next_hmerid);
13717 13706                  ASSERT(rgnp->rgn_refcnt == 0);
13718 13707                  ASSERT(rgnp->rgn_sfmmu_head == NULL);
13719 13708                  ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
13720 13709                  ASSERT(rgnp->rgn_hmeflags == 0);
13721 13710                  ASSERT(srdp->srd_hmergnp[rgnp->rgn_id] == rgnp);
13722 13711  #ifdef DEBUG
13723 13712                  for (i = 0; i < MMU_PAGE_SIZES; i++) {
13724 13713                          ASSERT(rgnp->rgn_ttecnt[i] == 0);
13725 13714                  }
13726 13715                  rgns++;
13727 13716  #endif /* DEBUG */
13728 13717                  kmem_cache_free(region_cache, rgnp);
13729 13718          }
13730 13719          ASSERT(rgns == srdp->srd_next_hmerid);
13731 13720  
13732 13721  #ifdef DEBUG
13733 13722          rgns = 0;
13734 13723  #endif
13735 13724          /* free each ism rgns in the srd */
13736 13725          for (rgnp = srdp->srd_ismrgnfree; rgnp != NULL; rgnp = nrgnp) {
13737 13726                  nrgnp = rgnp->rgn_next;
13738 13727                  ASSERT(rgnp->rgn_id < srdp->srd_next_ismrid);
13739 13728                  ASSERT(rgnp->rgn_refcnt == 0);
13740 13729                  ASSERT(rgnp->rgn_sfmmu_head == NULL);
13741 13730                  ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
13742 13731                  ASSERT(srdp->srd_ismrgnp[rgnp->rgn_id] == rgnp);
13743 13732  #ifdef DEBUG
13744 13733                  for (i = 0; i < MMU_PAGE_SIZES; i++) {
13745 13734                          ASSERT(rgnp->rgn_ttecnt[i] == 0);
13746 13735                  }
13747 13736                  rgns++;
13748 13737  #endif /* DEBUG */
13749 13738                  kmem_cache_free(region_cache, rgnp);
13750 13739          }
13751 13740          ASSERT(rgns == srdp->srd_next_ismrid);
13752 13741          ASSERT(srdp->srd_ismbusyrgns == 0);
13753 13742          ASSERT(srdp->srd_hmebusyrgns == 0);
13754 13743  
13755 13744          srdp->srd_next_ismrid = 0;
13756 13745          srdp->srd_next_hmerid = 0;
13757 13746  
13758 13747          bzero((void *)srdp->srd_ismrgnp,
13759 13748              sizeof (sf_region_t *) * SFMMU_MAX_ISM_REGIONS);
13760 13749          bzero((void *)srdp->srd_hmergnp,
13761 13750              sizeof (sf_region_t *) * SFMMU_MAX_HME_REGIONS);
13762 13751  
13763 13752          ASSERT(srdp->srd_scdp == NULL);
13764 13753          kmem_cache_free(srd_cache, srdp);
13765 13754  }
13766 13755  
13767 13756  /* ARGSUSED */
13768 13757  static int
13769 13758  sfmmu_srdcache_constructor(void *buf, void *cdrarg, int kmflags)
13770 13759  {
13771 13760          sf_srd_t *srdp = (sf_srd_t *)buf;
13772 13761          bzero(buf, sizeof (*srdp));
13773 13762  
13774 13763          mutex_init(&srdp->srd_mutex, NULL, MUTEX_DEFAULT, NULL);
13775 13764          mutex_init(&srdp->srd_scd_mutex, NULL, MUTEX_DEFAULT, NULL);
13776 13765          return (0);
13777 13766  }
13778 13767  
13779 13768  /* ARGSUSED */
13780 13769  static void
13781 13770  sfmmu_srdcache_destructor(void *buf, void *cdrarg)
13782 13771  {
13783 13772          sf_srd_t *srdp = (sf_srd_t *)buf;
13784 13773  
13785 13774          mutex_destroy(&srdp->srd_mutex);
13786 13775          mutex_destroy(&srdp->srd_scd_mutex);
13787 13776  }
13788 13777  
13789 13778  /*
13790 13779   * The caller makes sure hat_join_region()/hat_leave_region() can't be called
13791 13780   * at the same time for the same process and address range. This is ensured by
13792 13781   * the fact that address space is locked as writer when a process joins the
13793 13782   * regions. Therefore there's no need to hold an srd lock during the entire
13794 13783   * execution of hat_join_region()/hat_leave_region().
13795 13784   */
13796 13785  
13797 13786  #define RGN_HASH_FUNCTION(obj)  (((((uintptr_t)(obj)) >> 4) ^ \
13798 13787                                      (((uintptr_t)(obj)) >> 11)) & \
13799 13788                                          srd_rgn_hashmask)
13800 13789  /*
13801 13790   * This routine implements the shared context functionality required when
13802 13791   * attaching a segment to an address space. It must be called from
13803 13792   * hat_share() for D(ISM) segments and from segvn_create() for segments
13804 13793   * with the MAP_PRIVATE and MAP_TEXT flags set. It returns a region_cookie
13805 13794   * which is saved in the private segment data for hme segments and
13806 13795   * the ism_map structure for ism segments.
13807 13796   */
13808 13797  hat_region_cookie_t
13809 13798  hat_join_region(struct hat *sfmmup,
13810 13799          caddr_t r_saddr,
13811 13800          size_t r_size,
13812 13801          void *r_obj,
13813 13802          u_offset_t r_objoff,
13814 13803          uchar_t r_perm,
13815 13804          uchar_t r_pgszc,
13816 13805          hat_rgn_cb_func_t r_cb_function,
13817 13806          uint_t flags)
13818 13807  {
13819 13808          sf_srd_t *srdp = sfmmup->sfmmu_srdp;
13820 13809          uint_t rhash;
13821 13810          uint_t rid;
13822 13811          hatlock_t *hatlockp;
13823 13812          sf_region_t *rgnp;
13824 13813          sf_region_t *new_rgnp = NULL;
13825 13814          int i;
13826 13815          uint16_t *nextidp;
13827 13816          sf_region_t **freelistp;
13828 13817          int maxids;
13829 13818          sf_region_t **rarrp;
13830 13819          uint16_t *busyrgnsp;
13831 13820          ulong_t rttecnt;
13832 13821          uchar_t tteflag;
13833 13822          uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
13834 13823          int text = (r_type == HAT_REGION_TEXT);
13835 13824  
13836 13825          if (srdp == NULL || r_size == 0) {
13837 13826                  return (HAT_INVALID_REGION_COOKIE);
13838 13827          }
13839 13828  
13840 13829          ASSERT(sfmmup != ksfmmup);
13841 13830          ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
13842 13831          ASSERT(srdp->srd_refcnt > 0);
13843 13832          ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
13844 13833          ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
13845 13834          ASSERT(r_pgszc < mmu_page_sizes);
13846 13835          if (!IS_P2ALIGNED(r_saddr, TTEBYTES(r_pgszc)) ||
13847 13836              !IS_P2ALIGNED(r_size, TTEBYTES(r_pgszc))) {
13848 13837                  panic("hat_join_region: region addr or size is not aligned\n");
13849 13838          }
13850 13839  
13851 13840  
13852 13841          r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM :
13853 13842              SFMMU_REGION_HME;
13854 13843          /*
13855 13844           * Currently only support shared hmes for the read only main text
13856 13845           * region.
13857 13846           */
13858 13847          if (r_type == SFMMU_REGION_HME && ((r_obj != srdp->srd_evp) ||
13859 13848              (r_perm & PROT_WRITE))) {
13860 13849                  return (HAT_INVALID_REGION_COOKIE);
13861 13850          }
13862 13851  
13863 13852          rhash = RGN_HASH_FUNCTION(r_obj);
13864 13853  
13865 13854          if (r_type == SFMMU_REGION_ISM) {
13866 13855                  nextidp = &srdp->srd_next_ismrid;
13867 13856                  freelistp = &srdp->srd_ismrgnfree;
13868 13857                  maxids = SFMMU_MAX_ISM_REGIONS;
13869 13858                  rarrp = srdp->srd_ismrgnp;
13870 13859                  busyrgnsp = &srdp->srd_ismbusyrgns;
13871 13860          } else {
13872 13861                  nextidp = &srdp->srd_next_hmerid;
13873 13862                  freelistp = &srdp->srd_hmergnfree;
13874 13863                  maxids = SFMMU_MAX_HME_REGIONS;
13875 13864                  rarrp = srdp->srd_hmergnp;
13876 13865                  busyrgnsp = &srdp->srd_hmebusyrgns;
13877 13866          }
13878 13867  
13879 13868          mutex_enter(&srdp->srd_mutex);
13880 13869  
13881 13870          for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL;
13882 13871              rgnp = rgnp->rgn_hash) {
13883 13872                  if (rgnp->rgn_saddr == r_saddr && rgnp->rgn_size == r_size &&
13884 13873                      rgnp->rgn_obj == r_obj && rgnp->rgn_objoff == r_objoff &&
13885 13874                      rgnp->rgn_perm == r_perm && rgnp->rgn_pgszc == r_pgszc) {
13886 13875                          break;
13887 13876                  }
13888 13877          }
13889 13878  
13890 13879  rfound:
13891 13880          if (rgnp != NULL) {
13892 13881                  ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
13893 13882                  ASSERT(rgnp->rgn_cb_function == r_cb_function);
13894 13883                  ASSERT(rgnp->rgn_refcnt >= 0);
13895 13884                  rid = rgnp->rgn_id;
13896 13885                  ASSERT(rid < maxids);
13897 13886                  ASSERT(rarrp[rid] == rgnp);
13898 13887                  ASSERT(rid < *nextidp);
13899 13888                  atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt);
13900 13889                  mutex_exit(&srdp->srd_mutex);
13901 13890                  if (new_rgnp != NULL) {
13902 13891                          kmem_cache_free(region_cache, new_rgnp);
13903 13892                  }
13904 13893                  if (r_type == SFMMU_REGION_HME) {
13905 13894                          int myjoin =
13906 13895                              (sfmmup == astosfmmu(curthread->t_procp->p_as));
13907 13896  
13908 13897                          sfmmu_link_to_hmeregion(sfmmup, rgnp);
13909 13898                          /*
13910 13899                           * bitmap should be updated after linking sfmmu on
13911 13900                           * region list so that pageunload() doesn't skip
13912 13901                           * TSB/TLB flush. As soon as bitmap is updated another
13913 13902                           * thread in this process can already start accessing
13914 13903                           * this region.
13915 13904                           */
13916 13905                          /*
13917 13906                           * Normally ttecnt accounting is done as part of
13918 13907                           * pagefault handling. But a process may not take any
13919 13908                           * pagefaults on shared hmeblks created by some other
13920 13909                           * process. To compensate for this assume that the
13921 13910                           * entire region will end up faulted in using
13922 13911                           * the region's pagesize.
13923 13912                           *
13924 13913                           */
13925 13914                          if (r_pgszc > TTE8K) {
13926 13915                                  tteflag = 1 << r_pgszc;
13927 13916                                  if (disable_large_pages & tteflag) {
13928 13917                                          tteflag = 0;
13929 13918                                  }
13930 13919                          } else {
13931 13920                                  tteflag = 0;
13932 13921                          }
13933 13922                          if (tteflag && !(sfmmup->sfmmu_rtteflags & tteflag)) {
13934 13923                                  hatlockp = sfmmu_hat_enter(sfmmup);
13935 13924                                  sfmmup->sfmmu_rtteflags |= tteflag;
13936 13925                                  sfmmu_hat_exit(hatlockp);
13937 13926                          }
13938 13927                          hatlockp = sfmmu_hat_enter(sfmmup);
13939 13928  
13940 13929                          /*
13941 13930                           * Preallocate 1/4 of ttecnt's in 8K TSB for >= 4M
13942 13931                           * region to allow for large page allocation failure.
13943 13932                           */
13944 13933                          if (r_pgszc >= TTE4M) {
13945 13934                                  sfmmup->sfmmu_tsb0_4minflcnt +=
13946 13935                                      r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
13947 13936                          }
13948 13937  
13949 13938                          /* update sfmmu_ttecnt with the shme rgn ttecnt */
13950 13939                          rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
13951 13940                          atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc],
13952 13941                              rttecnt);
13953 13942  
13954 13943                          if (text && r_pgszc >= TTE4M &&
13955 13944                              (tteflag || ((disable_large_pages >> TTE4M) &
13956 13945                              ((1 << (r_pgszc - TTE4M + 1)) - 1))) &&
13957 13946                              !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) {
13958 13947                                  SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG);
13959 13948                          }
13960 13949  
13961 13950                          sfmmu_hat_exit(hatlockp);
13962 13951                          /*
13963 13952                           * On Panther we need to make sure TLB is programmed
13964 13953                           * to accept 32M/256M pages.  Call
13965 13954                           * sfmmu_check_page_sizes() now to make sure TLB is
13966 13955                           * setup before making hmeregions visible to other
13967 13956                           * threads.
13968 13957                           */
13969 13958                          sfmmu_check_page_sizes(sfmmup, 1);
13970 13959                          hatlockp = sfmmu_hat_enter(sfmmup);
13971 13960                          SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid);
13972 13961  
13973 13962                          /*
13974 13963                           * if context is invalid tsb miss exception code will
13975 13964                           * call sfmmu_check_page_sizes() and update tsbmiss
13976 13965                           * area later.
13977 13966                           */
13978 13967                          kpreempt_disable();
13979 13968                          if (myjoin &&
13980 13969                              (sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum
13981 13970                              != INVALID_CONTEXT)) {
13982 13971                                  struct tsbmiss *tsbmp;
13983 13972  
13984 13973                                  tsbmp = &tsbmiss_area[CPU->cpu_id];
13985 13974                                  ASSERT(sfmmup == tsbmp->usfmmup);
13986 13975                                  BT_SET(tsbmp->shmermap, rid);
13987 13976                                  if (r_pgszc > TTE64K) {
13988 13977                                          tsbmp->uhat_rtteflags |= tteflag;
13989 13978                                  }
13990 13979  
13991 13980                          }
13992 13981                          kpreempt_enable();
13993 13982  
13994 13983                          sfmmu_hat_exit(hatlockp);
13995 13984                          ASSERT((hat_region_cookie_t)((uint64_t)rid) !=
13996 13985                              HAT_INVALID_REGION_COOKIE);
13997 13986                  } else {
13998 13987                          hatlockp = sfmmu_hat_enter(sfmmup);
13999 13988                          SF_RGNMAP_ADD(sfmmup->sfmmu_ismregion_map, rid);
14000 13989                          sfmmu_hat_exit(hatlockp);
14001 13990                  }
14002 13991                  ASSERT(rid < maxids);
14003 13992  
14004 13993                  if (r_type == SFMMU_REGION_ISM) {
14005 13994                          sfmmu_find_scd(sfmmup);
14006 13995                  }
14007 13996                  return ((hat_region_cookie_t)((uint64_t)rid));
14008 13997          }
14009 13998  
14010 13999          ASSERT(new_rgnp == NULL);
14011 14000  
14012 14001          if (*busyrgnsp >= maxids) {
14013 14002                  mutex_exit(&srdp->srd_mutex);
14014 14003                  return (HAT_INVALID_REGION_COOKIE);
14015 14004          }
14016 14005  
14017 14006          ASSERT(MUTEX_HELD(&srdp->srd_mutex));
14018 14007          if (*freelistp != NULL) {
14019 14008                  rgnp = *freelistp;
14020 14009                  *freelistp = rgnp->rgn_next;
14021 14010                  ASSERT(rgnp->rgn_id < *nextidp);
14022 14011                  ASSERT(rgnp->rgn_id < maxids);
14023 14012                  ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
14024 14013                  ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK)
14025 14014                      == r_type);
14026 14015                  ASSERT(rarrp[rgnp->rgn_id] == rgnp);
14027 14016                  ASSERT(rgnp->rgn_hmeflags == 0);
14028 14017          } else {
14029 14018                  /*
14030 14019                   * release local locks before memory allocation.
14031 14020                   */
14032 14021                  mutex_exit(&srdp->srd_mutex);
14033 14022  
14034 14023                  new_rgnp = kmem_cache_alloc(region_cache, KM_SLEEP);
14035 14024  
14036 14025                  mutex_enter(&srdp->srd_mutex);
14037 14026                  for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL;
14038 14027                      rgnp = rgnp->rgn_hash) {
14039 14028                          if (rgnp->rgn_saddr == r_saddr &&
14040 14029                              rgnp->rgn_size == r_size &&
14041 14030                              rgnp->rgn_obj == r_obj &&
14042 14031                              rgnp->rgn_objoff == r_objoff &&
14043 14032                              rgnp->rgn_perm == r_perm &&
14044 14033                              rgnp->rgn_pgszc == r_pgszc) {
14045 14034                                  break;
14046 14035                          }
14047 14036                  }
14048 14037                  if (rgnp != NULL) {
14049 14038                          goto rfound;
14050 14039                  }
14051 14040  
14052 14041                  if (*nextidp >= maxids) {
14053 14042                          mutex_exit(&srdp->srd_mutex);
14054 14043                          goto fail;
14055 14044                  }
14056 14045                  rgnp = new_rgnp;
14057 14046                  new_rgnp = NULL;
14058 14047                  rgnp->rgn_id = (*nextidp)++;
14059 14048                  ASSERT(rgnp->rgn_id < maxids);
14060 14049                  ASSERT(rarrp[rgnp->rgn_id] == NULL);
14061 14050                  rarrp[rgnp->rgn_id] = rgnp;
14062 14051          }
14063 14052  
14064 14053          ASSERT(rgnp->rgn_sfmmu_head == NULL);
14065 14054          ASSERT(rgnp->rgn_hmeflags == 0);
14066 14055  #ifdef DEBUG
14067 14056          for (i = 0; i < MMU_PAGE_SIZES; i++) {
14068 14057                  ASSERT(rgnp->rgn_ttecnt[i] == 0);
14069 14058          }
14070 14059  #endif
14071 14060          rgnp->rgn_saddr = r_saddr;
14072 14061          rgnp->rgn_size = r_size;
14073 14062          rgnp->rgn_obj = r_obj;
14074 14063          rgnp->rgn_objoff = r_objoff;
14075 14064          rgnp->rgn_perm = r_perm;
14076 14065          rgnp->rgn_pgszc = r_pgszc;
14077 14066          rgnp->rgn_flags = r_type;
14078 14067          rgnp->rgn_refcnt = 0;
14079 14068          rgnp->rgn_cb_function = r_cb_function;
14080 14069          rgnp->rgn_hash = srdp->srd_rgnhash[rhash];
14081 14070          srdp->srd_rgnhash[rhash] = rgnp;
14082 14071          (*busyrgnsp)++;
14083 14072          ASSERT(*busyrgnsp <= maxids);
14084 14073          goto rfound;
14085 14074  
14086 14075  fail:
14087 14076          ASSERT(new_rgnp != NULL);
14088 14077          kmem_cache_free(region_cache, new_rgnp);
14089 14078          return (HAT_INVALID_REGION_COOKIE);
14090 14079  }
14091 14080  
14092 14081  /*
14093 14082   * This function implements the shared context functionality required
14094 14083   * when detaching a segment from an address space. It must be called
14095 14084   * from hat_unshare() for all D(ISM) segments and from segvn_unmap(),
14096 14085   * for segments with a valid region_cookie.
14097 14086   * It will also be called from all seg_vn routines which change a
14098 14087   * segment's attributes such as segvn_setprot(), segvn_setpagesize(),
14099 14088   * segvn_clrszc() & segvn_advise(), as well as in the case of COW fault
14100 14089   * from segvn_fault().
14101 14090   */
14102 14091  void
14103 14092  hat_leave_region(struct hat *sfmmup, hat_region_cookie_t rcookie, uint_t flags)
14104 14093  {
14105 14094          sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14106 14095          sf_scd_t *scdp;
14107 14096          uint_t rhash;
14108 14097          uint_t rid = (uint_t)((uint64_t)rcookie);
14109 14098          hatlock_t *hatlockp = NULL;
14110 14099          sf_region_t *rgnp;
14111 14100          sf_region_t **prev_rgnpp;
14112 14101          sf_region_t *cur_rgnp;
14113 14102          void *r_obj;
14114 14103          int i;
14115 14104          caddr_t r_saddr;
14116 14105          caddr_t r_eaddr;
14117 14106          size_t  r_size;
14118 14107          uchar_t r_pgszc;
14119 14108          uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
14120 14109  
14121 14110          ASSERT(sfmmup != ksfmmup);
14122 14111          ASSERT(srdp != NULL);
14123 14112          ASSERT(srdp->srd_refcnt > 0);
14124 14113          ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
14125 14114          ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
14126 14115          ASSERT(!sfmmup->sfmmu_free || sfmmup->sfmmu_scdp == NULL);
14127 14116  
14128 14117          r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM :
14129 14118              SFMMU_REGION_HME;
14130 14119  
14131 14120          if (r_type == SFMMU_REGION_ISM) {
14132 14121                  ASSERT(SFMMU_IS_ISMRID_VALID(rid));
14133 14122                  ASSERT(rid < SFMMU_MAX_ISM_REGIONS);
14134 14123                  rgnp = srdp->srd_ismrgnp[rid];
14135 14124          } else {
14136 14125                  ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14137 14126                  ASSERT(rid < SFMMU_MAX_HME_REGIONS);
14138 14127                  rgnp = srdp->srd_hmergnp[rid];
14139 14128          }
14140 14129          ASSERT(rgnp != NULL);
14141 14130          ASSERT(rgnp->rgn_id == rid);
14142 14131          ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14143 14132          ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
14144 14133          ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as));
14145 14134  
14146 14135          if (sfmmup->sfmmu_free) {
14147 14136                  ulong_t rttecnt;
14148 14137                  r_pgszc = rgnp->rgn_pgszc;
14149 14138                  r_size = rgnp->rgn_size;
14150 14139  
14151 14140                  ASSERT(sfmmup->sfmmu_scdp == NULL);
14152 14141                  if (r_type == SFMMU_REGION_ISM) {
14153 14142                          SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid);
14154 14143                  } else {
14155 14144                          /* update shme rgns ttecnt in sfmmu_ttecnt */
14156 14145                          rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
14157 14146                          ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt);
14158 14147  
14159 14148                          atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc],
14160 14149                              -rttecnt);
14161 14150  
14162 14151                          SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid);
14163 14152                  }
14164 14153          } else if (r_type == SFMMU_REGION_ISM) {
14165 14154                  hatlockp = sfmmu_hat_enter(sfmmup);
14166 14155                  ASSERT(rid < srdp->srd_next_ismrid);
14167 14156                  SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid);
14168 14157                  scdp = sfmmup->sfmmu_scdp;
14169 14158                  if (scdp != NULL &&
14170 14159                      SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) {
14171 14160                          sfmmu_leave_scd(sfmmup, r_type);
14172 14161                          ASSERT(sfmmu_hat_lock_held(sfmmup));
14173 14162                  }
14174 14163                  sfmmu_hat_exit(hatlockp);
14175 14164          } else {
14176 14165                  ulong_t rttecnt;
14177 14166                  r_pgszc = rgnp->rgn_pgszc;
14178 14167                  r_saddr = rgnp->rgn_saddr;
14179 14168                  r_size = rgnp->rgn_size;
14180 14169                  r_eaddr = r_saddr + r_size;
14181 14170  
14182 14171                  ASSERT(r_type == SFMMU_REGION_HME);
14183 14172                  hatlockp = sfmmu_hat_enter(sfmmup);
14184 14173                  ASSERT(rid < srdp->srd_next_hmerid);
14185 14174                  SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid);
14186 14175  
14187 14176                  /*
14188 14177                   * If region is part of an SCD call sfmmu_leave_scd().
14189 14178                   * Otherwise if process is not exiting and has valid context
14190 14179                   * just drop the context on the floor to lose stale TLB
14191 14180                   * entries and force the update of tsb miss area to reflect
14192 14181                   * the new region map. After that clean our TSB entries.
14193 14182                   */
14194 14183                  scdp = sfmmup->sfmmu_scdp;
14195 14184                  if (scdp != NULL &&
14196 14185                      SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
14197 14186                          sfmmu_leave_scd(sfmmup, r_type);
14198 14187                          ASSERT(sfmmu_hat_lock_held(sfmmup));
14199 14188                  }
14200 14189                  sfmmu_invalidate_ctx(sfmmup);
14201 14190  
14202 14191                  i = TTE8K;
14203 14192                  while (i < mmu_page_sizes) {
14204 14193                          if (rgnp->rgn_ttecnt[i] != 0) {
14205 14194                                  sfmmu_unload_tsb_range(sfmmup, r_saddr,
14206 14195                                      r_eaddr, i);
14207 14196                                  if (i < TTE4M) {
14208 14197                                          i = TTE4M;
14209 14198                                          continue;
14210 14199                                  } else {
14211 14200                                          break;
14212 14201                                  }
14213 14202                          }
14214 14203                          i++;
14215 14204                  }
14216 14205                  /* Remove the preallocated 1/4 8k ttecnt for 4M regions. */
14217 14206                  if (r_pgszc >= TTE4M) {
14218 14207                          rttecnt = r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
14219 14208                          ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >=
14220 14209                              rttecnt);
14221 14210                          sfmmup->sfmmu_tsb0_4minflcnt -= rttecnt;
14222 14211                  }
14223 14212  
14224 14213                  /* update shme rgns ttecnt in sfmmu_ttecnt */
14225 14214                  rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
14226 14215                  ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt);
14227 14216                  atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], -rttecnt);
14228 14217  
14229 14218                  sfmmu_hat_exit(hatlockp);
14230 14219                  if (scdp != NULL && sfmmup->sfmmu_scdp == NULL) {
14231 14220                          /* sfmmup left the scd, grow private tsb */
14232 14221                          sfmmu_check_page_sizes(sfmmup, 1);
14233 14222                  } else {
14234 14223                          sfmmu_check_page_sizes(sfmmup, 0);
14235 14224                  }
14236 14225          }
14237 14226  
14238 14227          if (r_type == SFMMU_REGION_HME) {
14239 14228                  sfmmu_unlink_from_hmeregion(sfmmup, rgnp);
14240 14229          }
14241 14230  
14242 14231          r_obj = rgnp->rgn_obj;
14243 14232          if (atomic_dec_32_nv((volatile uint_t *)&rgnp->rgn_refcnt)) {
14244 14233                  return;
14245 14234          }
14246 14235  
14247 14236          /*
14248 14237           * looks like nobody uses this region anymore. Free it.
14249 14238           */
14250 14239          rhash = RGN_HASH_FUNCTION(r_obj);
14251 14240          mutex_enter(&srdp->srd_mutex);
14252 14241          for (prev_rgnpp = &srdp->srd_rgnhash[rhash];
14253 14242              (cur_rgnp = *prev_rgnpp) != NULL;
14254 14243              prev_rgnpp = &cur_rgnp->rgn_hash) {
14255 14244                  if (cur_rgnp == rgnp && cur_rgnp->rgn_refcnt == 0) {
14256 14245                          break;
14257 14246                  }
14258 14247          }
14259 14248  
14260 14249          if (cur_rgnp == NULL) {
14261 14250                  mutex_exit(&srdp->srd_mutex);
14262 14251                  return;
14263 14252          }
14264 14253  
14265 14254          ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14266 14255          *prev_rgnpp = rgnp->rgn_hash;
14267 14256          if (r_type == SFMMU_REGION_ISM) {
14268 14257                  rgnp->rgn_flags |= SFMMU_REGION_FREE;
14269 14258                  ASSERT(rid < srdp->srd_next_ismrid);
14270 14259                  rgnp->rgn_next = srdp->srd_ismrgnfree;
14271 14260                  srdp->srd_ismrgnfree = rgnp;
14272 14261                  ASSERT(srdp->srd_ismbusyrgns > 0);
14273 14262                  srdp->srd_ismbusyrgns--;
14274 14263                  mutex_exit(&srdp->srd_mutex);
14275 14264                  return;
14276 14265          }
14277 14266          mutex_exit(&srdp->srd_mutex);
14278 14267  
14279 14268          /*
14280 14269           * Destroy region's hmeblks.
14281 14270           */
14282 14271          sfmmu_unload_hmeregion(srdp, rgnp);
14283 14272  
14284 14273          rgnp->rgn_hmeflags = 0;
14285 14274  
14286 14275          ASSERT(rgnp->rgn_sfmmu_head == NULL);
14287 14276          ASSERT(rgnp->rgn_id == rid);
14288 14277          for (i = 0; i < MMU_PAGE_SIZES; i++) {
14289 14278                  rgnp->rgn_ttecnt[i] = 0;
14290 14279          }
14291 14280          rgnp->rgn_flags |= SFMMU_REGION_FREE;
14292 14281          mutex_enter(&srdp->srd_mutex);
14293 14282          ASSERT(rid < srdp->srd_next_hmerid);
14294 14283          rgnp->rgn_next = srdp->srd_hmergnfree;
14295 14284          srdp->srd_hmergnfree = rgnp;
14296 14285          ASSERT(srdp->srd_hmebusyrgns > 0);
14297 14286          srdp->srd_hmebusyrgns--;
14298 14287          mutex_exit(&srdp->srd_mutex);
14299 14288  }
14300 14289  
14301 14290  /*
14302 14291   * For now only called for hmeblk regions and not for ISM regions.
14303 14292   */
14304 14293  void
14305 14294  hat_dup_region(struct hat *sfmmup, hat_region_cookie_t rcookie)
14306 14295  {
14307 14296          sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14308 14297          uint_t rid = (uint_t)((uint64_t)rcookie);
14309 14298          sf_region_t *rgnp;
14310 14299          sf_rgn_link_t *rlink;
14311 14300          sf_rgn_link_t *hrlink;
14312 14301          ulong_t rttecnt;
14313 14302  
14314 14303          ASSERT(sfmmup != ksfmmup);
14315 14304          ASSERT(srdp != NULL);
14316 14305          ASSERT(srdp->srd_refcnt > 0);
14317 14306  
14318 14307          ASSERT(rid < srdp->srd_next_hmerid);
14319 14308          ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14320 14309          ASSERT(rid < SFMMU_MAX_HME_REGIONS);
14321 14310  
14322 14311          rgnp = srdp->srd_hmergnp[rid];
14323 14312          ASSERT(rgnp->rgn_refcnt > 0);
14324 14313          ASSERT(rgnp->rgn_id == rid);
14325 14314          ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == SFMMU_REGION_HME);
14326 14315          ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
14327 14316  
14328 14317          atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt);
14329 14318  
14330 14319          /* LINTED: constant in conditional context */
14331 14320          SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 0);
14332 14321          ASSERT(rlink != NULL);
14333 14322          mutex_enter(&rgnp->rgn_mutex);
14334 14323          ASSERT(rgnp->rgn_sfmmu_head != NULL);
14335 14324          /* LINTED: constant in conditional context */
14336 14325          SFMMU_HMERID2RLINKP(rgnp->rgn_sfmmu_head, rid, hrlink, 0, 0);
14337 14326          ASSERT(hrlink != NULL);
14338 14327          ASSERT(hrlink->prev == NULL);
14339 14328          rlink->next = rgnp->rgn_sfmmu_head;
14340 14329          rlink->prev = NULL;
14341 14330          hrlink->prev = sfmmup;
14342 14331          /*
14343 14332           * make sure rlink's next field is correct
14344 14333           * before making this link visible.
14345 14334           */
14346 14335          membar_stst();
14347 14336          rgnp->rgn_sfmmu_head = sfmmup;
14348 14337          mutex_exit(&rgnp->rgn_mutex);
14349 14338  
14350 14339          /* update sfmmu_ttecnt with the shme rgn ttecnt */
14351 14340          rttecnt = rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc);
14352 14341          atomic_add_long(&sfmmup->sfmmu_ttecnt[rgnp->rgn_pgszc], rttecnt);
14353 14342          /* update tsb0 inflation count */
14354 14343          if (rgnp->rgn_pgszc >= TTE4M) {
14355 14344                  sfmmup->sfmmu_tsb0_4minflcnt +=
14356 14345                      rgnp->rgn_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
14357 14346          }
14358 14347          /*
14359 14348           * Update regionid bitmask without hat lock since no other thread
14360 14349           * can update this region bitmask right now.
14361 14350           */
14362 14351          SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid);
14363 14352  }
14364 14353  
14365 14354  /* ARGSUSED */
14366 14355  static int
14367 14356  sfmmu_rgncache_constructor(void *buf, void *cdrarg, int kmflags)
14368 14357  {
14369 14358          sf_region_t *rgnp = (sf_region_t *)buf;
14370 14359          bzero(buf, sizeof (*rgnp));
14371 14360  
14372 14361          mutex_init(&rgnp->rgn_mutex, NULL, MUTEX_DEFAULT, NULL);
14373 14362  
14374 14363          return (0);
14375 14364  }
14376 14365  
14377 14366  /* ARGSUSED */
14378 14367  static void
14379 14368  sfmmu_rgncache_destructor(void *buf, void *cdrarg)
14380 14369  {
14381 14370          sf_region_t *rgnp = (sf_region_t *)buf;
14382 14371          mutex_destroy(&rgnp->rgn_mutex);
14383 14372  }
14384 14373  
14385 14374  static int
14386 14375  sfrgnmap_isnull(sf_region_map_t *map)
14387 14376  {
14388 14377          int i;
14389 14378  
14390 14379          for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14391 14380                  if (map->bitmap[i] != 0) {
14392 14381                          return (0);
14393 14382                  }
14394 14383          }
14395 14384          return (1);
14396 14385  }
14397 14386  
14398 14387  static int
14399 14388  sfhmergnmap_isnull(sf_hmeregion_map_t *map)
14400 14389  {
14401 14390          int i;
14402 14391  
14403 14392          for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
14404 14393                  if (map->bitmap[i] != 0) {
14405 14394                          return (0);
14406 14395                  }
14407 14396          }
14408 14397          return (1);
14409 14398  }
14410 14399  
14411 14400  #ifdef DEBUG
14412 14401  static void
14413 14402  check_scd_sfmmu_list(sfmmu_t **headp, sfmmu_t *sfmmup, int onlist)
14414 14403  {
14415 14404          sfmmu_t *sp;
14416 14405          sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14417 14406  
14418 14407          for (sp = *headp; sp != NULL; sp = sp->sfmmu_scd_link.next) {
14419 14408                  ASSERT(srdp == sp->sfmmu_srdp);
14420 14409                  if (sp == sfmmup) {
14421 14410                          if (onlist) {
14422 14411                                  return;
14423 14412                          } else {
14424 14413                                  panic("shctx: sfmmu 0x%p found on scd"
14425 14414                                      "list 0x%p", (void *)sfmmup,
14426 14415                                      (void *)*headp);
14427 14416                          }
14428 14417                  }
14429 14418          }
14430 14419          if (onlist) {
14431 14420                  panic("shctx: sfmmu 0x%p not found on scd list 0x%p",
14432 14421                      (void *)sfmmup, (void *)*headp);
14433 14422          } else {
14434 14423                  return;
14435 14424          }
14436 14425  }
14437 14426  #else /* DEBUG */
14438 14427  #define check_scd_sfmmu_list(headp, sfmmup, onlist)
14439 14428  #endif /* DEBUG */
14440 14429  
14441 14430  /*
14442 14431   * Removes an sfmmu from the SCD sfmmu list.
14443 14432   */
14444 14433  static void
14445 14434  sfmmu_from_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup)
14446 14435  {
14447 14436          ASSERT(sfmmup->sfmmu_srdp != NULL);
14448 14437          check_scd_sfmmu_list(headp, sfmmup, 1);
14449 14438          if (sfmmup->sfmmu_scd_link.prev != NULL) {
14450 14439                  ASSERT(*headp != sfmmup);
14451 14440                  sfmmup->sfmmu_scd_link.prev->sfmmu_scd_link.next =
14452 14441                      sfmmup->sfmmu_scd_link.next;
14453 14442          } else {
14454 14443                  ASSERT(*headp == sfmmup);
14455 14444                  *headp = sfmmup->sfmmu_scd_link.next;
14456 14445          }
14457 14446          if (sfmmup->sfmmu_scd_link.next != NULL) {
14458 14447                  sfmmup->sfmmu_scd_link.next->sfmmu_scd_link.prev =
14459 14448                      sfmmup->sfmmu_scd_link.prev;
14460 14449          }
14461 14450  }
14462 14451  
14463 14452  
14464 14453  /*
14465 14454   * Adds an sfmmu to the start of the queue.
14466 14455   */
14467 14456  static void
14468 14457  sfmmu_to_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup)
14469 14458  {
14470 14459          check_scd_sfmmu_list(headp, sfmmup, 0);
14471 14460          sfmmup->sfmmu_scd_link.prev = NULL;
14472 14461          sfmmup->sfmmu_scd_link.next = *headp;
14473 14462          if (*headp != NULL)
14474 14463                  (*headp)->sfmmu_scd_link.prev = sfmmup;
14475 14464          *headp = sfmmup;
14476 14465  }
14477 14466  
14478 14467  /*
14479 14468   * Remove an scd from the start of the queue.
14480 14469   */
14481 14470  static void
14482 14471  sfmmu_remove_scd(sf_scd_t **headp, sf_scd_t *scdp)
14483 14472  {
14484 14473          if (scdp->scd_prev != NULL) {
14485 14474                  ASSERT(*headp != scdp);
14486 14475                  scdp->scd_prev->scd_next = scdp->scd_next;
14487 14476          } else {
14488 14477                  ASSERT(*headp == scdp);
14489 14478                  *headp = scdp->scd_next;
14490 14479          }
14491 14480  
14492 14481          if (scdp->scd_next != NULL) {
14493 14482                  scdp->scd_next->scd_prev = scdp->scd_prev;
14494 14483          }
14495 14484  }
14496 14485  
14497 14486  /*
14498 14487   * Add an scd to the start of the queue.
14499 14488   */
14500 14489  static void
14501 14490  sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *scdp)
14502 14491  {
14503 14492          scdp->scd_prev = NULL;
14504 14493          scdp->scd_next = *headp;
14505 14494          if (*headp != NULL) {
14506 14495                  (*headp)->scd_prev = scdp;
14507 14496          }
14508 14497          *headp = scdp;
14509 14498  }
14510 14499  
14511 14500  static int
14512 14501  sfmmu_alloc_scd_tsbs(sf_srd_t *srdp, sf_scd_t *scdp)
14513 14502  {
14514 14503          uint_t rid;
14515 14504          uint_t i;
14516 14505          uint_t j;
14517 14506          ulong_t w;
14518 14507          sf_region_t *rgnp;
14519 14508          ulong_t tte8k_cnt = 0;
14520 14509          ulong_t tte4m_cnt = 0;
14521 14510          uint_t tsb_szc;
14522 14511          sfmmu_t *scsfmmup = scdp->scd_sfmmup;
14523 14512          sfmmu_t *ism_hatid;
14524 14513          struct tsb_info *newtsb;
14525 14514          int szc;
14526 14515  
14527 14516          ASSERT(srdp != NULL);
14528 14517  
14529 14518          for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14530 14519                  if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14531 14520                          continue;
14532 14521                  }
14533 14522                  j = 0;
14534 14523                  while (w) {
14535 14524                          if (!(w & 0x1)) {
14536 14525                                  j++;
14537 14526                                  w >>= 1;
14538 14527                                  continue;
14539 14528                          }
14540 14529                          rid = (i << BT_ULSHIFT) | j;
14541 14530                          j++;
14542 14531                          w >>= 1;
14543 14532  
14544 14533                          if (rid < SFMMU_MAX_HME_REGIONS) {
14545 14534                                  rgnp = srdp->srd_hmergnp[rid];
14546 14535                                  ASSERT(rgnp->rgn_id == rid);
14547 14536                                  ASSERT(rgnp->rgn_refcnt > 0);
14548 14537  
14549 14538                                  if (rgnp->rgn_pgszc < TTE4M) {
14550 14539                                          tte8k_cnt += rgnp->rgn_size >>
14551 14540                                              TTE_PAGE_SHIFT(TTE8K);
14552 14541                                  } else {
14553 14542                                          ASSERT(rgnp->rgn_pgszc >= TTE4M);
14554 14543                                          tte4m_cnt += rgnp->rgn_size >>
14555 14544                                              TTE_PAGE_SHIFT(TTE4M);
14556 14545                                          /*
14557 14546                                           * Inflate SCD tsb0 by preallocating
14558 14547                                           * 1/4 8k ttecnt for 4M regions to
14559 14548                                           * allow for lgpg alloc failure.
14560 14549                                           */
14561 14550                                          tte8k_cnt += rgnp->rgn_size >>
14562 14551                                              (TTE_PAGE_SHIFT(TTE8K) + 2);
14563 14552                                  }
14564 14553                          } else {
14565 14554                                  rid -= SFMMU_MAX_HME_REGIONS;
14566 14555                                  rgnp = srdp->srd_ismrgnp[rid];
14567 14556                                  ASSERT(rgnp->rgn_id == rid);
14568 14557                                  ASSERT(rgnp->rgn_refcnt > 0);
14569 14558  
14570 14559                                  ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14571 14560                                  ASSERT(ism_hatid->sfmmu_ismhat);
14572 14561  
14573 14562                                  for (szc = 0; szc < TTE4M; szc++) {
14574 14563                                          tte8k_cnt +=
14575 14564                                              ism_hatid->sfmmu_ttecnt[szc] <<
14576 14565                                              TTE_BSZS_SHIFT(szc);
14577 14566                                  }
14578 14567  
14579 14568                                  ASSERT(rgnp->rgn_pgszc >= TTE4M);
14580 14569                                  if (rgnp->rgn_pgszc >= TTE4M) {
14581 14570                                          tte4m_cnt += rgnp->rgn_size >>
14582 14571                                              TTE_PAGE_SHIFT(TTE4M);
14583 14572                                  }
14584 14573                          }
14585 14574                  }
14586 14575          }
14587 14576  
14588 14577          tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt);
14589 14578  
14590 14579          /* Allocate both the SCD TSBs here. */
14591 14580          if (sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb,
14592 14581              tsb_szc, TSB8K|TSB64K|TSB512K, TSB_ALLOC, scsfmmup) &&
14593 14582              (tsb_szc <= TSB_4M_SZCODE ||
14594 14583              sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb,
14595 14584              TSB_4M_SZCODE, TSB8K|TSB64K|TSB512K,
14596 14585              TSB_ALLOC, scsfmmup))) {
14597 14586  
14598 14587                  SFMMU_STAT(sf_scd_1sttsb_allocfail);
14599 14588                  return (TSB_ALLOCFAIL);
14600 14589          } else {
14601 14590                  scsfmmup->sfmmu_tsb->tsb_flags |= TSB_SHAREDCTX;
14602 14591  
14603 14592                  if (tte4m_cnt) {
14604 14593                          tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt);
14605 14594                          if (sfmmu_tsbinfo_alloc(&newtsb, tsb_szc,
14606 14595                              TSB4M|TSB32M|TSB256M, TSB_ALLOC, scsfmmup) &&
14607 14596                              (tsb_szc <= TSB_4M_SZCODE ||
14608 14597                              sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE,
14609 14598                              TSB4M|TSB32M|TSB256M,
14610 14599                              TSB_ALLOC, scsfmmup))) {
14611 14600                                  /*
14612 14601                                   * If we fail to allocate the 2nd shared tsb,
14613 14602                                   * just free the 1st tsb, return failure.
14614 14603                                   */
14615 14604                                  sfmmu_tsbinfo_free(scsfmmup->sfmmu_tsb);
14616 14605                                  SFMMU_STAT(sf_scd_2ndtsb_allocfail);
14617 14606                                  return (TSB_ALLOCFAIL);
14618 14607                          } else {
14619 14608                                  ASSERT(scsfmmup->sfmmu_tsb->tsb_next == NULL);
14620 14609                                  newtsb->tsb_flags |= TSB_SHAREDCTX;
14621 14610                                  scsfmmup->sfmmu_tsb->tsb_next = newtsb;
14622 14611                                  SFMMU_STAT(sf_scd_2ndtsb_alloc);
14623 14612                          }
14624 14613                  }
14625 14614                  SFMMU_STAT(sf_scd_1sttsb_alloc);
14626 14615          }
14627 14616          return (TSB_SUCCESS);
14628 14617  }
14629 14618  
14630 14619  static void
14631 14620  sfmmu_free_scd_tsbs(sfmmu_t *scd_sfmmu)
14632 14621  {
14633 14622          while (scd_sfmmu->sfmmu_tsb != NULL) {
14634 14623                  struct tsb_info *next = scd_sfmmu->sfmmu_tsb->tsb_next;
14635 14624                  sfmmu_tsbinfo_free(scd_sfmmu->sfmmu_tsb);
14636 14625                  scd_sfmmu->sfmmu_tsb = next;
14637 14626          }
14638 14627  }
14639 14628  
14640 14629  /*
14641 14630   * Link the sfmmu onto the hme region list.
14642 14631   */
14643 14632  void
14644 14633  sfmmu_link_to_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp)
14645 14634  {
14646 14635          uint_t rid;
14647 14636          sf_rgn_link_t *rlink;
14648 14637          sfmmu_t *head;
14649 14638          sf_rgn_link_t *hrlink;
14650 14639  
14651 14640          rid = rgnp->rgn_id;
14652 14641          ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14653 14642  
14654 14643          /* LINTED: constant in conditional context */
14655 14644          SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 1);
14656 14645          ASSERT(rlink != NULL);
14657 14646          mutex_enter(&rgnp->rgn_mutex);
14658 14647          if ((head = rgnp->rgn_sfmmu_head) == NULL) {
14659 14648                  rlink->next = NULL;
14660 14649                  rlink->prev = NULL;
14661 14650                  /*
14662 14651                   * make sure rlink's next field is NULL
14663 14652                   * before making this link visible.
14664 14653                   */
14665 14654                  membar_stst();
14666 14655                  rgnp->rgn_sfmmu_head = sfmmup;
14667 14656          } else {
14668 14657                  /* LINTED: constant in conditional context */
14669 14658                  SFMMU_HMERID2RLINKP(head, rid, hrlink, 0, 0);
14670 14659                  ASSERT(hrlink != NULL);
14671 14660                  ASSERT(hrlink->prev == NULL);
14672 14661                  rlink->next = head;
14673 14662                  rlink->prev = NULL;
14674 14663                  hrlink->prev = sfmmup;
14675 14664                  /*
14676 14665                   * make sure rlink's next field is correct
14677 14666                   * before making this link visible.
14678 14667                   */
14679 14668                  membar_stst();
14680 14669                  rgnp->rgn_sfmmu_head = sfmmup;
14681 14670          }
14682 14671          mutex_exit(&rgnp->rgn_mutex);
14683 14672  }
14684 14673  
14685 14674  /*
14686 14675   * Unlink the sfmmu from the hme region list.
14687 14676   */
14688 14677  void
14689 14678  sfmmu_unlink_from_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp)
14690 14679  {
14691 14680          uint_t rid;
14692 14681          sf_rgn_link_t *rlink;
14693 14682  
14694 14683          rid = rgnp->rgn_id;
14695 14684          ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14696 14685  
14697 14686          /* LINTED: constant in conditional context */
14698 14687          SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0);
14699 14688          ASSERT(rlink != NULL);
14700 14689          mutex_enter(&rgnp->rgn_mutex);
14701 14690          if (rgnp->rgn_sfmmu_head == sfmmup) {
14702 14691                  sfmmu_t *next = rlink->next;
14703 14692                  rgnp->rgn_sfmmu_head = next;
14704 14693                  /*
14705 14694                   * if we are stopped by xc_attention() after this
14706 14695                   * point the forward link walking in
14707 14696                   * sfmmu_rgntlb_demap() will work correctly since the
14708 14697                   * head correctly points to the next element.
14709 14698                   */
14710 14699                  membar_stst();
14711 14700                  rlink->next = NULL;
14712 14701                  ASSERT(rlink->prev == NULL);
14713 14702                  if (next != NULL) {
14714 14703                          sf_rgn_link_t *nrlink;
14715 14704                          /* LINTED: constant in conditional context */
14716 14705                          SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0);
14717 14706                          ASSERT(nrlink != NULL);
14718 14707                          ASSERT(nrlink->prev == sfmmup);
14719 14708                          nrlink->prev = NULL;
14720 14709                  }
14721 14710          } else {
14722 14711                  sfmmu_t *next = rlink->next;
14723 14712                  sfmmu_t *prev = rlink->prev;
14724 14713                  sf_rgn_link_t *prlink;
14725 14714  
14726 14715                  ASSERT(prev != NULL);
14727 14716                  /* LINTED: constant in conditional context */
14728 14717                  SFMMU_HMERID2RLINKP(prev, rid, prlink, 0, 0);
14729 14718                  ASSERT(prlink != NULL);
14730 14719                  ASSERT(prlink->next == sfmmup);
14731 14720                  prlink->next = next;
14732 14721                  /*
14733 14722                   * if we are stopped by xc_attention()
14734 14723                   * after this point the forward link walking
14735 14724                   * will work correctly since the prev element
14736 14725                   * correctly points to the next element.
14737 14726                   */
14738 14727                  membar_stst();
14739 14728                  rlink->next = NULL;
14740 14729                  rlink->prev = NULL;
14741 14730                  if (next != NULL) {
14742 14731                          sf_rgn_link_t *nrlink;
14743 14732                          /* LINTED: constant in conditional context */
14744 14733                          SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0);
14745 14734                          ASSERT(nrlink != NULL);
14746 14735                          ASSERT(nrlink->prev == sfmmup);
14747 14736                          nrlink->prev = prev;
14748 14737                  }
14749 14738          }
14750 14739          mutex_exit(&rgnp->rgn_mutex);
14751 14740  }
14752 14741  
14753 14742  /*
14754 14743   * Link scd sfmmu onto ism or hme region list for each region in the
14755 14744   * scd region map.
14756 14745   */
14757 14746  void
14758 14747  sfmmu_link_scd_to_regions(sf_srd_t *srdp, sf_scd_t *scdp)
14759 14748  {
14760 14749          uint_t rid;
14761 14750          uint_t i;
14762 14751          uint_t j;
14763 14752          ulong_t w;
14764 14753          sf_region_t *rgnp;
14765 14754          sfmmu_t *scsfmmup;
14766 14755  
14767 14756          scsfmmup = scdp->scd_sfmmup;
14768 14757          ASSERT(scsfmmup->sfmmu_scdhat);
14769 14758          for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14770 14759                  if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14771 14760                          continue;
14772 14761                  }
14773 14762                  j = 0;
14774 14763                  while (w) {
14775 14764                          if (!(w & 0x1)) {
14776 14765                                  j++;
14777 14766                                  w >>= 1;
14778 14767                                  continue;
14779 14768                          }
14780 14769                          rid = (i << BT_ULSHIFT) | j;
14781 14770                          j++;
14782 14771                          w >>= 1;
14783 14772  
14784 14773                          if (rid < SFMMU_MAX_HME_REGIONS) {
14785 14774                                  rgnp = srdp->srd_hmergnp[rid];
14786 14775                                  ASSERT(rgnp->rgn_id == rid);
14787 14776                                  ASSERT(rgnp->rgn_refcnt > 0);
14788 14777                                  sfmmu_link_to_hmeregion(scsfmmup, rgnp);
14789 14778                          } else {
14790 14779                                  sfmmu_t *ism_hatid = NULL;
14791 14780                                  ism_ment_t *ism_ment;
14792 14781                                  rid -= SFMMU_MAX_HME_REGIONS;
14793 14782                                  rgnp = srdp->srd_ismrgnp[rid];
14794 14783                                  ASSERT(rgnp->rgn_id == rid);
14795 14784                                  ASSERT(rgnp->rgn_refcnt > 0);
14796 14785  
14797 14786                                  ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14798 14787                                  ASSERT(ism_hatid->sfmmu_ismhat);
14799 14788                                  ism_ment = &scdp->scd_ism_links[rid];
14800 14789                                  ism_ment->iment_hat = scsfmmup;
14801 14790                                  ism_ment->iment_base_va = rgnp->rgn_saddr;
14802 14791                                  mutex_enter(&ism_mlist_lock);
14803 14792                                  iment_add(ism_ment, ism_hatid);
14804 14793                                  mutex_exit(&ism_mlist_lock);
14805 14794  
14806 14795                          }
14807 14796                  }
14808 14797          }
14809 14798  }
14810 14799  /*
14811 14800   * Unlink scd sfmmu from ism or hme region list for each region in the
14812 14801   * scd region map.
14813 14802   */
14814 14803  void
14815 14804  sfmmu_unlink_scd_from_regions(sf_srd_t *srdp, sf_scd_t *scdp)
14816 14805  {
14817 14806          uint_t rid;
14818 14807          uint_t i;
14819 14808          uint_t j;
14820 14809          ulong_t w;
14821 14810          sf_region_t *rgnp;
14822 14811          sfmmu_t *scsfmmup;
14823 14812  
14824 14813          scsfmmup = scdp->scd_sfmmup;
14825 14814          for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14826 14815                  if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14827 14816                          continue;
14828 14817                  }
14829 14818                  j = 0;
14830 14819                  while (w) {
14831 14820                          if (!(w & 0x1)) {
14832 14821                                  j++;
14833 14822                                  w >>= 1;
14834 14823                                  continue;
14835 14824                          }
14836 14825                          rid = (i << BT_ULSHIFT) | j;
14837 14826                          j++;
14838 14827                          w >>= 1;
14839 14828  
14840 14829                          if (rid < SFMMU_MAX_HME_REGIONS) {
14841 14830                                  rgnp = srdp->srd_hmergnp[rid];
14842 14831                                  ASSERT(rgnp->rgn_id == rid);
14843 14832                                  ASSERT(rgnp->rgn_refcnt > 0);
14844 14833                                  sfmmu_unlink_from_hmeregion(scsfmmup,
14845 14834                                      rgnp);
14846 14835  
14847 14836                          } else {
14848 14837                                  sfmmu_t *ism_hatid = NULL;
14849 14838                                  ism_ment_t *ism_ment;
14850 14839                                  rid -= SFMMU_MAX_HME_REGIONS;
14851 14840                                  rgnp = srdp->srd_ismrgnp[rid];
14852 14841                                  ASSERT(rgnp->rgn_id == rid);
14853 14842                                  ASSERT(rgnp->rgn_refcnt > 0);
14854 14843  
14855 14844                                  ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14856 14845                                  ASSERT(ism_hatid->sfmmu_ismhat);
14857 14846                                  ism_ment = &scdp->scd_ism_links[rid];
14858 14847                                  ASSERT(ism_ment->iment_hat == scdp->scd_sfmmup);
14859 14848                                  ASSERT(ism_ment->iment_base_va ==
14860 14849                                      rgnp->rgn_saddr);
14861 14850                                  mutex_enter(&ism_mlist_lock);
14862 14851                                  iment_sub(ism_ment, ism_hatid);
14863 14852                                  mutex_exit(&ism_mlist_lock);
14864 14853  
14865 14854                          }
14866 14855                  }
14867 14856          }
14868 14857  }
14869 14858  /*
14870 14859   * Allocates and initialises a new SCD structure, this is called with
14871 14860   * the srd_scd_mutex held and returns with the reference count
14872 14861   * initialised to 1.
14873 14862   */
14874 14863  static sf_scd_t *
14875 14864  sfmmu_alloc_scd(sf_srd_t *srdp, sf_region_map_t *new_map)
14876 14865  {
14877 14866          sf_scd_t *new_scdp;
14878 14867          sfmmu_t *scsfmmup;
14879 14868          int i;
14880 14869  
14881 14870          ASSERT(MUTEX_HELD(&srdp->srd_scd_mutex));
14882 14871          new_scdp = kmem_cache_alloc(scd_cache, KM_SLEEP);
14883 14872  
14884 14873          scsfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP);
14885 14874          new_scdp->scd_sfmmup = scsfmmup;
14886 14875          scsfmmup->sfmmu_srdp = srdp;
14887 14876          scsfmmup->sfmmu_scdp = new_scdp;
14888 14877          scsfmmup->sfmmu_tsb0_4minflcnt = 0;
14889 14878          scsfmmup->sfmmu_scdhat = 1;
14890 14879          CPUSET_ALL(scsfmmup->sfmmu_cpusran);
14891 14880          bzero(scsfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE);
14892 14881  
14893 14882          ASSERT(max_mmu_ctxdoms > 0);
14894 14883          for (i = 0; i < max_mmu_ctxdoms; i++) {
14895 14884                  scsfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT;
14896 14885                  scsfmmup->sfmmu_ctxs[i].gnum = 0;
14897 14886          }
14898 14887  
14899 14888          for (i = 0; i < MMU_PAGE_SIZES; i++) {
14900 14889                  new_scdp->scd_rttecnt[i] = 0;
14901 14890          }
14902 14891  
14903 14892          new_scdp->scd_region_map = *new_map;
14904 14893          new_scdp->scd_refcnt = 1;
14905 14894          if (sfmmu_alloc_scd_tsbs(srdp, new_scdp) != TSB_SUCCESS) {
14906 14895                  kmem_cache_free(scd_cache, new_scdp);
14907 14896                  kmem_cache_free(sfmmuid_cache, scsfmmup);
14908 14897                  return (NULL);
14909 14898          }
14910 14899          if (&mmu_init_scd) {
14911 14900                  mmu_init_scd(new_scdp);
14912 14901          }
14913 14902          return (new_scdp);
14914 14903  }
14915 14904  
14916 14905  /*
14917 14906   * The first phase of a process joining an SCD. The hat structure is
14918 14907   * linked to the SCD queue and then the HAT_JOIN_SCD sfmmu flag is set
14919 14908   * and a cross-call with context invalidation is used to cause the
14920 14909   * remaining work to be carried out in the sfmmu_tsbmiss_exception()
14921 14910   * routine.
14922 14911   */
14923 14912  static void
14924 14913  sfmmu_join_scd(sf_scd_t *scdp, sfmmu_t *sfmmup)
14925 14914  {
14926 14915          hatlock_t *hatlockp;
14927 14916          sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14928 14917          int i;
14929 14918          sf_scd_t *old_scdp;
14930 14919  
14931 14920          ASSERT(srdp != NULL);
14932 14921          ASSERT(scdp != NULL);
14933 14922          ASSERT(scdp->scd_refcnt > 0);
14934 14923          ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
14935 14924  
14936 14925          if ((old_scdp = sfmmup->sfmmu_scdp) != NULL) {
14937 14926                  ASSERT(old_scdp != scdp);
14938 14927  
14939 14928                  mutex_enter(&old_scdp->scd_mutex);
14940 14929                  sfmmu_from_scd_list(&old_scdp->scd_sf_list, sfmmup);
14941 14930                  mutex_exit(&old_scdp->scd_mutex);
14942 14931                  /*
14943 14932                   * sfmmup leaves the old scd. Update sfmmu_ttecnt to
14944 14933                   * include the shme rgn ttecnt for rgns that
14945 14934                   * were in the old SCD
14946 14935                   */
14947 14936                  for (i = 0; i < mmu_page_sizes; i++) {
14948 14937                          ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
14949 14938                              old_scdp->scd_rttecnt[i]);
14950 14939                          atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
14951 14940                              sfmmup->sfmmu_scdrttecnt[i]);
14952 14941                  }
14953 14942          }
14954 14943  
14955 14944          /*
14956 14945           * Move sfmmu to the scd lists.
14957 14946           */
14958 14947          mutex_enter(&scdp->scd_mutex);
14959 14948          sfmmu_to_scd_list(&scdp->scd_sf_list, sfmmup);
14960 14949          mutex_exit(&scdp->scd_mutex);
14961 14950          SF_SCD_INCR_REF(scdp);
14962 14951  
14963 14952          hatlockp = sfmmu_hat_enter(sfmmup);
14964 14953          /*
14965 14954           * For a multi-thread process, we must stop
14966 14955           * all the other threads before joining the scd.
14967 14956           */
14968 14957  
14969 14958          SFMMU_FLAGS_SET(sfmmup, HAT_JOIN_SCD);
14970 14959  
14971 14960          sfmmu_invalidate_ctx(sfmmup);
14972 14961          sfmmup->sfmmu_scdp = scdp;
14973 14962  
14974 14963          /*
14975 14964           * Copy scd_rttecnt into sfmmup's sfmmu_scdrttecnt, and update
14976 14965           * sfmmu_ttecnt to not include the rgn ttecnt just joined in SCD.
14977 14966           */
14978 14967          for (i = 0; i < mmu_page_sizes; i++) {
14979 14968                  sfmmup->sfmmu_scdrttecnt[i] = scdp->scd_rttecnt[i];
14980 14969                  ASSERT(sfmmup->sfmmu_ttecnt[i] >= scdp->scd_rttecnt[i]);
14981 14970                  atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
14982 14971                      -sfmmup->sfmmu_scdrttecnt[i]);
14983 14972          }
14984 14973          /* update tsb0 inflation count */
14985 14974          if (old_scdp != NULL) {
14986 14975                  sfmmup->sfmmu_tsb0_4minflcnt +=
14987 14976                      old_scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
14988 14977          }
14989 14978          ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >=
14990 14979              scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt);
14991 14980          sfmmup->sfmmu_tsb0_4minflcnt -= scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
14992 14981  
14993 14982          sfmmu_hat_exit(hatlockp);
14994 14983  
14995 14984          if (old_scdp != NULL) {
14996 14985                  SF_SCD_DECR_REF(srdp, old_scdp);
14997 14986          }
14998 14987  
14999 14988  }
15000 14989  
15001 14990  /*
15002 14991   * This routine is called by a process to become part of an SCD. It is called
15003 14992   * from sfmmu_tsbmiss_exception() once most of the initial work has been
15004 14993   * done by sfmmu_join_scd(). This routine must not drop the hat lock.
15005 14994   */
15006 14995  static void
15007 14996  sfmmu_finish_join_scd(sfmmu_t *sfmmup)
15008 14997  {
15009 14998          struct tsb_info *tsbinfop;
15010 14999  
15011 15000          ASSERT(sfmmu_hat_lock_held(sfmmup));
15012 15001          ASSERT(sfmmup->sfmmu_scdp != NULL);
15013 15002          ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD));
15014 15003          ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15015 15004          ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID));
15016 15005  
15017 15006          for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
15018 15007              tsbinfop = tsbinfop->tsb_next) {
15019 15008                  if (tsbinfop->tsb_flags & TSB_SWAPPED) {
15020 15009                          continue;
15021 15010                  }
15022 15011                  ASSERT(!(tsbinfop->tsb_flags & TSB_RELOC_FLAG));
15023 15012  
15024 15013                  sfmmu_inv_tsb(tsbinfop->tsb_va,
15025 15014                      TSB_BYTES(tsbinfop->tsb_szc));
15026 15015          }
15027 15016  
15028 15017          /* Set HAT_CTX1_FLAG for all SCD ISMs */
15029 15018          sfmmu_ism_hatflags(sfmmup, 1);
15030 15019  
15031 15020          SFMMU_STAT(sf_join_scd);
15032 15021  }
15033 15022  
15034 15023  /*
15035 15024   * This routine is called in order to check if there is an SCD which matches
15036 15025   * the process's region map if not then a new SCD may be created.
15037 15026   */
15038 15027  static void
15039 15028  sfmmu_find_scd(sfmmu_t *sfmmup)
15040 15029  {
15041 15030          sf_srd_t *srdp = sfmmup->sfmmu_srdp;
15042 15031          sf_scd_t *scdp, *new_scdp;
15043 15032          int ret;
15044 15033  
15045 15034          ASSERT(srdp != NULL);
15046 15035          ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
15047 15036  
15048 15037          mutex_enter(&srdp->srd_scd_mutex);
15049 15038          for (scdp = srdp->srd_scdp; scdp != NULL;
15050 15039              scdp = scdp->scd_next) {
15051 15040                  SF_RGNMAP_EQUAL(&scdp->scd_region_map,
15052 15041                      &sfmmup->sfmmu_region_map, ret);
15053 15042                  if (ret == 1) {
15054 15043                          SF_SCD_INCR_REF(scdp);
15055 15044                          mutex_exit(&srdp->srd_scd_mutex);
15056 15045                          sfmmu_join_scd(scdp, sfmmup);
15057 15046                          ASSERT(scdp->scd_refcnt >= 2);
15058 15047                          atomic_dec_32((volatile uint32_t *)&scdp->scd_refcnt);
15059 15048                          return;
15060 15049                  } else {
15061 15050                          /*
15062 15051                           * If the sfmmu region map is a subset of the scd
15063 15052                           * region map, then the assumption is that this process
15064 15053                           * will continue attaching to ISM segments until the
15065 15054                           * region maps are equal.
15066 15055                           */
15067 15056                          SF_RGNMAP_IS_SUBSET(&scdp->scd_region_map,
15068 15057                              &sfmmup->sfmmu_region_map, ret);
15069 15058                          if (ret == 1) {
15070 15059                                  mutex_exit(&srdp->srd_scd_mutex);
15071 15060                                  return;
15072 15061                          }
15073 15062                  }
15074 15063          }
15075 15064  
15076 15065          ASSERT(scdp == NULL);
15077 15066          /*
15078 15067           * No matching SCD has been found, create a new one.
15079 15068           */
15080 15069          if ((new_scdp = sfmmu_alloc_scd(srdp, &sfmmup->sfmmu_region_map)) ==
15081 15070              NULL) {
15082 15071                  mutex_exit(&srdp->srd_scd_mutex);
15083 15072                  return;
15084 15073          }
15085 15074  
15086 15075          /*
15087 15076           * sfmmu_alloc_scd() returns with a ref count of 1 on the scd.
15088 15077           */
15089 15078  
15090 15079          /* Set scd_rttecnt for shme rgns in SCD */
15091 15080          sfmmu_set_scd_rttecnt(srdp, new_scdp);
15092 15081  
15093 15082          /*
15094 15083           * Link scd onto srd_scdp list and scd sfmmu onto region/iment lists.
15095 15084           */
15096 15085          sfmmu_link_scd_to_regions(srdp, new_scdp);
15097 15086          sfmmu_add_scd(&srdp->srd_scdp, new_scdp);
15098 15087          SFMMU_STAT_ADD(sf_create_scd, 1);
15099 15088  
15100 15089          mutex_exit(&srdp->srd_scd_mutex);
15101 15090          sfmmu_join_scd(new_scdp, sfmmup);
15102 15091          ASSERT(new_scdp->scd_refcnt >= 2);
15103 15092          atomic_dec_32((volatile uint32_t *)&new_scdp->scd_refcnt);
15104 15093  }
15105 15094  
15106 15095  /*
15107 15096   * This routine is called by a process to remove itself from an SCD. It is
15108 15097   * either called when the processes has detached from a segment or from
15109 15098   * hat_free_start() as a result of calling exit.
15110 15099   */
15111 15100  static void
15112 15101  sfmmu_leave_scd(sfmmu_t *sfmmup, uchar_t r_type)
15113 15102  {
15114 15103          sf_scd_t *scdp = sfmmup->sfmmu_scdp;
15115 15104          sf_srd_t *srdp =  sfmmup->sfmmu_srdp;
15116 15105          hatlock_t *hatlockp = TSB_HASH(sfmmup);
15117 15106          int i;
15118 15107  
15119 15108          ASSERT(scdp != NULL);
15120 15109          ASSERT(srdp != NULL);
15121 15110  
15122 15111          if (sfmmup->sfmmu_free) {
15123 15112                  /*
15124 15113                   * If the process is part of an SCD the sfmmu is unlinked
15125 15114                   * from scd_sf_list.
15126 15115                   */
15127 15116                  mutex_enter(&scdp->scd_mutex);
15128 15117                  sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup);
15129 15118                  mutex_exit(&scdp->scd_mutex);
15130 15119                  /*
15131 15120                   * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that
15132 15121                   * are about to leave the SCD
15133 15122                   */
15134 15123                  for (i = 0; i < mmu_page_sizes; i++) {
15135 15124                          ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
15136 15125                              scdp->scd_rttecnt[i]);
15137 15126                          atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15138 15127                              sfmmup->sfmmu_scdrttecnt[i]);
15139 15128                          sfmmup->sfmmu_scdrttecnt[i] = 0;
15140 15129                  }
15141 15130                  sfmmup->sfmmu_scdp = NULL;
15142 15131  
15143 15132                  SF_SCD_DECR_REF(srdp, scdp);
15144 15133                  return;
15145 15134          }
15146 15135  
15147 15136          ASSERT(r_type != SFMMU_REGION_ISM ||
15148 15137              SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15149 15138          ASSERT(scdp->scd_refcnt);
15150 15139          ASSERT(!sfmmup->sfmmu_free);
15151 15140          ASSERT(sfmmu_hat_lock_held(sfmmup));
15152 15141          ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as));
15153 15142  
15154 15143          /*
15155 15144           * Wait for ISM maps to be updated.
15156 15145           */
15157 15146          if (r_type != SFMMU_REGION_ISM) {
15158 15147                  while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY) &&
15159 15148                      sfmmup->sfmmu_scdp != NULL) {
15160 15149                          cv_wait(&sfmmup->sfmmu_tsb_cv,
15161 15150                              HATLOCK_MUTEXP(hatlockp));
15162 15151                  }
15163 15152  
15164 15153                  if (sfmmup->sfmmu_scdp == NULL) {
15165 15154                          sfmmu_hat_exit(hatlockp);
15166 15155                          return;
15167 15156                  }
15168 15157                  SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
15169 15158          }
15170 15159  
15171 15160          if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
15172 15161                  SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD);
15173 15162                  /*
15174 15163                   * Since HAT_JOIN_SCD was set our context
15175 15164                   * is still invalid.
15176 15165                   */
15177 15166          } else {
15178 15167                  /*
15179 15168                   * For a multi-thread process, we must stop
15180 15169                   * all the other threads before leaving the scd.
15181 15170                   */
15182 15171  
15183 15172                  sfmmu_invalidate_ctx(sfmmup);
15184 15173          }
15185 15174  
15186 15175          /* Clear all the rid's for ISM, delete flags, etc */
15187 15176          ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15188 15177          sfmmu_ism_hatflags(sfmmup, 0);
15189 15178  
15190 15179          /*
15191 15180           * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that
15192 15181           * are in SCD before this sfmmup leaves the SCD.
15193 15182           */
15194 15183          for (i = 0; i < mmu_page_sizes; i++) {
15195 15184                  ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
15196 15185                      scdp->scd_rttecnt[i]);
15197 15186                  atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15198 15187                      sfmmup->sfmmu_scdrttecnt[i]);
15199 15188                  sfmmup->sfmmu_scdrttecnt[i] = 0;
15200 15189                  /* update ismttecnt to include SCD ism before hat leaves SCD */
15201 15190                  sfmmup->sfmmu_ismttecnt[i] += sfmmup->sfmmu_scdismttecnt[i];
15202 15191                  sfmmup->sfmmu_scdismttecnt[i] = 0;
15203 15192          }
15204 15193          /* update tsb0 inflation count */
15205 15194          sfmmup->sfmmu_tsb0_4minflcnt += scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
15206 15195  
15207 15196          if (r_type != SFMMU_REGION_ISM) {
15208 15197                  SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
15209 15198          }
15210 15199          sfmmup->sfmmu_scdp = NULL;
15211 15200  
15212 15201          sfmmu_hat_exit(hatlockp);
15213 15202  
15214 15203          /*
15215 15204           * Unlink sfmmu from scd_sf_list this can be done without holding
15216 15205           * the hat lock as we hold the sfmmu_as lock which prevents
15217 15206           * hat_join_region from adding this thread to the scd again. Other
15218 15207           * threads check if sfmmu_scdp is NULL under hat lock and if it's NULL
15219 15208           * they won't get here, since sfmmu_leave_scd() clears sfmmu_scdp
15220 15209           * while holding the hat lock.
15221 15210           */
15222 15211          mutex_enter(&scdp->scd_mutex);
15223 15212          sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup);
15224 15213          mutex_exit(&scdp->scd_mutex);
15225 15214          SFMMU_STAT(sf_leave_scd);
15226 15215  
15227 15216          SF_SCD_DECR_REF(srdp, scdp);
15228 15217          hatlockp = sfmmu_hat_enter(sfmmup);
15229 15218  
15230 15219  }
15231 15220  
15232 15221  /*
15233 15222   * Unlink and free up an SCD structure with a reference count of 0.
15234 15223   */
15235 15224  static void
15236 15225  sfmmu_destroy_scd(sf_srd_t *srdp, sf_scd_t *scdp, sf_region_map_t *scd_rmap)
15237 15226  {
15238 15227          sfmmu_t *scsfmmup;
15239 15228          sf_scd_t *sp;
15240 15229          hatlock_t *shatlockp;
15241 15230          int i, ret;
15242 15231  
15243 15232          mutex_enter(&srdp->srd_scd_mutex);
15244 15233          for (sp = srdp->srd_scdp; sp != NULL; sp = sp->scd_next) {
15245 15234                  if (sp == scdp)
15246 15235                          break;
15247 15236          }
15248 15237          if (sp == NULL || sp->scd_refcnt) {
15249 15238                  mutex_exit(&srdp->srd_scd_mutex);
15250 15239                  return;
15251 15240          }
15252 15241  
15253 15242          /*
15254 15243           * It is possible that the scd has been freed and reallocated with a
15255 15244           * different region map while we've been waiting for the srd_scd_mutex.
15256 15245           */
15257 15246          SF_RGNMAP_EQUAL(scd_rmap, &sp->scd_region_map, ret);
15258 15247          if (ret != 1) {
15259 15248                  mutex_exit(&srdp->srd_scd_mutex);
15260 15249                  return;
15261 15250          }
15262 15251  
15263 15252          ASSERT(scdp->scd_sf_list == NULL);
15264 15253          /*
15265 15254           * Unlink scd from srd_scdp list.
15266 15255           */
15267 15256          sfmmu_remove_scd(&srdp->srd_scdp, scdp);
15268 15257          mutex_exit(&srdp->srd_scd_mutex);
15269 15258  
15270 15259          sfmmu_unlink_scd_from_regions(srdp, scdp);
15271 15260  
15272 15261          /* Clear shared context tsb and release ctx */
15273 15262          scsfmmup = scdp->scd_sfmmup;
15274 15263  
15275 15264          /*
15276 15265           * create a barrier so that scd will not be destroyed
15277 15266           * if other thread still holds the same shared hat lock.
15278 15267           * E.g., sfmmu_tsbmiss_exception() needs to acquire the
15279 15268           * shared hat lock before checking the shared tsb reloc flag.
15280 15269           */
15281 15270          shatlockp = sfmmu_hat_enter(scsfmmup);
15282 15271          sfmmu_hat_exit(shatlockp);
15283 15272  
15284 15273          sfmmu_free_scd_tsbs(scsfmmup);
15285 15274  
15286 15275          for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
15287 15276                  if (scsfmmup->sfmmu_hmeregion_links[i] != NULL) {
15288 15277                          kmem_free(scsfmmup->sfmmu_hmeregion_links[i],
15289 15278                              SFMMU_L2_HMERLINKS_SIZE);
15290 15279                          scsfmmup->sfmmu_hmeregion_links[i] = NULL;
15291 15280                  }
15292 15281          }
15293 15282          kmem_cache_free(sfmmuid_cache, scsfmmup);
15294 15283          kmem_cache_free(scd_cache, scdp);
15295 15284          SFMMU_STAT(sf_destroy_scd);
15296 15285  }
15297 15286  
15298 15287  /*
15299 15288   * Modifies the HAT_CTX1_FLAG for each of the ISM segments which correspond to
15300 15289   * bits which are set in the ism_region_map parameter. This flag indicates to
15301 15290   * the tsbmiss handler that mapping for these segments should be loaded using
15302 15291   * the shared context.
15303 15292   */
15304 15293  static void
15305 15294  sfmmu_ism_hatflags(sfmmu_t *sfmmup, int addflag)
15306 15295  {
15307 15296          sf_scd_t *scdp = sfmmup->sfmmu_scdp;
15308 15297          ism_blk_t *ism_blkp;
15309 15298          ism_map_t *ism_map;
15310 15299          int i, rid;
15311 15300  
15312 15301          ASSERT(sfmmup->sfmmu_iblk != NULL);
15313 15302          ASSERT(scdp != NULL);
15314 15303          /*
15315 15304           * Note that the caller either set HAT_ISMBUSY flag or checked
15316 15305           * under hat lock that HAT_ISMBUSY was not set by another thread.
15317 15306           */
15318 15307          ASSERT(sfmmu_hat_lock_held(sfmmup));
15319 15308  
15320 15309          ism_blkp = sfmmup->sfmmu_iblk;
15321 15310          while (ism_blkp != NULL) {
15322 15311                  ism_map = ism_blkp->iblk_maps;
15323 15312                  for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) {
15324 15313                          rid = ism_map[i].imap_rid;
15325 15314                          if (rid == SFMMU_INVALID_ISMRID) {
15326 15315                                  continue;
15327 15316                          }
15328 15317                          ASSERT(rid >= 0 && rid < SFMMU_MAX_ISM_REGIONS);
15329 15318                          if (SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid) &&
15330 15319                              addflag) {
15331 15320                                  ism_map[i].imap_hatflags |=
15332 15321                                      HAT_CTX1_FLAG;
15333 15322                          } else {
15334 15323                                  ism_map[i].imap_hatflags &=
15335 15324                                      ~HAT_CTX1_FLAG;
15336 15325                          }
15337 15326                  }
15338 15327                  ism_blkp = ism_blkp->iblk_next;
15339 15328          }
15340 15329  }
15341 15330  
15342 15331  static int
15343 15332  sfmmu_srd_lock_held(sf_srd_t *srdp)
15344 15333  {
15345 15334          return (MUTEX_HELD(&srdp->srd_mutex));
15346 15335  }
15347 15336  
15348 15337  /* ARGSUSED */
15349 15338  static int
15350 15339  sfmmu_scdcache_constructor(void *buf, void *cdrarg, int kmflags)
15351 15340  {
15352 15341          sf_scd_t *scdp = (sf_scd_t *)buf;
15353 15342  
15354 15343          bzero(buf, sizeof (sf_scd_t));
15355 15344          mutex_init(&scdp->scd_mutex, NULL, MUTEX_DEFAULT, NULL);
15356 15345          return (0);
15357 15346  }
15358 15347  
15359 15348  /* ARGSUSED */
15360 15349  static void
15361 15350  sfmmu_scdcache_destructor(void *buf, void *cdrarg)
15362 15351  {
15363 15352          sf_scd_t *scdp = (sf_scd_t *)buf;
15364 15353  
15365 15354          mutex_destroy(&scdp->scd_mutex);
15366 15355  }
15367 15356  
15368 15357  /*
15369 15358   * The listp parameter is a pointer to a list of hmeblks which are partially
15370 15359   * freed as result of calling sfmmu_hblk_hash_rm(), the last phase of the
15371 15360   * freeing process is to cross-call all cpus to ensure that there are no
15372 15361   * remaining cached references.
15373 15362   *
15374 15363   * If the local generation number is less than the global then we can free
15375 15364   * hmeblks which are already on the pending queue as another cpu has completed
15376 15365   * the cross-call.
15377 15366   *
15378 15367   * We cross-call to make sure that there are no threads on other cpus accessing
15379 15368   * these hmblks and then complete the process of freeing them under the
15380 15369   * following conditions:
15381 15370   *      The total number of pending hmeblks is greater than the threshold
15382 15371   *      The reserve list has fewer than HBLK_RESERVE_CNT hmeblks
15383 15372   *      It is at least 1 second since the last time we cross-called
15384 15373   *
15385 15374   * Otherwise, we add the hmeblks to the per-cpu pending queue.
15386 15375   */
15387 15376  static void
15388 15377  sfmmu_hblks_list_purge(struct hme_blk **listp, int dontfree)
15389 15378  {
15390 15379          struct hme_blk *hblkp, *pr_hblkp = NULL;
15391 15380          int             count = 0;
15392 15381          cpuset_t        cpuset = cpu_ready_set;
15393 15382          cpu_hme_pend_t  *cpuhp;
15394 15383          timestruc_t     now;
15395 15384          int             one_second_expired = 0;
15396 15385  
15397 15386          gethrestime_lasttick(&now);
15398 15387  
15399 15388          for (hblkp = *listp; hblkp != NULL; hblkp = hblkp->hblk_next) {
15400 15389                  ASSERT(hblkp->hblk_shw_bit == 0);
15401 15390                  ASSERT(hblkp->hblk_shared == 0);
15402 15391                  count++;
15403 15392                  pr_hblkp = hblkp;
15404 15393          }
15405 15394  
15406 15395          cpuhp = &cpu_hme_pend[CPU->cpu_seqid];
15407 15396          mutex_enter(&cpuhp->chp_mutex);
15408 15397  
15409 15398          if ((cpuhp->chp_count + count) == 0) {
15410 15399                  mutex_exit(&cpuhp->chp_mutex);
15411 15400                  return;
15412 15401          }
15413 15402  
15414 15403          if ((now.tv_sec - cpuhp->chp_timestamp) > 1) {
15415 15404                  one_second_expired  = 1;
15416 15405          }
15417 15406  
15418 15407          if (!dontfree && (freehblkcnt < HBLK_RESERVE_CNT ||
15419 15408              (cpuhp->chp_count + count) > cpu_hme_pend_thresh ||
15420 15409              one_second_expired)) {
15421 15410                  /* Append global list to local */
15422 15411                  if (pr_hblkp == NULL) {
15423 15412                          *listp = cpuhp->chp_listp;
15424 15413                  } else {
15425 15414                          pr_hblkp->hblk_next = cpuhp->chp_listp;
15426 15415                  }
15427 15416                  cpuhp->chp_listp = NULL;
15428 15417                  cpuhp->chp_count = 0;
15429 15418                  cpuhp->chp_timestamp = now.tv_sec;
15430 15419                  mutex_exit(&cpuhp->chp_mutex);
15431 15420  
15432 15421                  kpreempt_disable();
15433 15422                  CPUSET_DEL(cpuset, CPU->cpu_id);
15434 15423                  xt_sync(cpuset);
15435 15424                  xt_sync(cpuset);
15436 15425                  kpreempt_enable();
15437 15426  
15438 15427                  /*
15439 15428                   * At this stage we know that no trap handlers on other
15440 15429                   * cpus can have references to hmeblks on the list.
15441 15430                   */
15442 15431                  sfmmu_hblk_free(listp);
15443 15432          } else if (*listp != NULL) {
15444 15433                  pr_hblkp->hblk_next = cpuhp->chp_listp;
15445 15434                  cpuhp->chp_listp = *listp;
15446 15435                  cpuhp->chp_count += count;
15447 15436                  *listp = NULL;
15448 15437                  mutex_exit(&cpuhp->chp_mutex);
15449 15438          } else {
15450 15439                  mutex_exit(&cpuhp->chp_mutex);
15451 15440          }
15452 15441  }
15453 15442  
15454 15443  /*
15455 15444   * Add an hmeblk to the the hash list.
15456 15445   */
15457 15446  void
15458 15447  sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15459 15448          uint64_t hblkpa)
15460 15449  {
15461 15450          ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
15462 15451  #ifdef  DEBUG
15463 15452          if (hmebp->hmeblkp == NULL) {
15464 15453                  ASSERT(hmebp->hmeh_nextpa == HMEBLK_ENDPA);
15465 15454          }
15466 15455  #endif /* DEBUG */
15467 15456  
15468 15457          hmeblkp->hblk_nextpa = hmebp->hmeh_nextpa;
15469 15458          /*
15470 15459           * Since the TSB miss handler now does not lock the hash chain before
15471 15460           * walking it, make sure that the hmeblks nextpa is globally visible
15472 15461           * before we make the hmeblk globally visible by updating the chain root
15473 15462           * pointer in the hash bucket.
15474 15463           */
15475 15464          membar_producer();
15476 15465          hmebp->hmeh_nextpa = hblkpa;
15477 15466          hmeblkp->hblk_next = hmebp->hmeblkp;
15478 15467          hmebp->hmeblkp = hmeblkp;
15479 15468  
15480 15469  }
15481 15470  
15482 15471  /*
15483 15472   * This function is the first part of a 2 part process to remove an hmeblk
15484 15473   * from the hash chain. In this phase we unlink the hmeblk from the hash chain
15485 15474   * but leave the next physical pointer unchanged. The hmeblk is then linked onto
15486 15475   * a per-cpu pending list using the virtual address pointer.
15487 15476   *
15488 15477   * TSB miss trap handlers that start after this phase will no longer see
15489 15478   * this hmeblk. TSB miss handlers that still cache this hmeblk in a register
15490 15479   * can still use it for further chain traversal because we haven't yet modifed
15491 15480   * the next physical pointer or freed it.
15492 15481   *
15493 15482   * In the second phase of hmeblk removal we'll issue a barrier xcall before
15494 15483   * we reuse or free this hmeblk. This will make sure all lingering references to
15495 15484   * the hmeblk after first phase disappear before we finally reclaim it.
15496 15485   * This scheme eliminates the need for TSB miss handlers to lock hmeblk chains
15497 15486   * during their traversal.
15498 15487   *
15499 15488   * The hmehash_mutex must be held when calling this function.
15500 15489   *
15501 15490   * Input:
15502 15491   *       hmebp - hme hash bucket pointer
15503 15492   *       hmeblkp - address of hmeblk to be removed
15504 15493   *       pr_hblk - virtual address of previous hmeblkp
15505 15494   *       listp - pointer to list of hmeblks linked by virtual address
15506 15495   *       free_now flag - indicates that a complete removal from the hash chains
15507 15496   *                       is necessary.
15508 15497   *
15509 15498   * It is inefficient to use the free_now flag as a cross-call is required to
15510 15499   * remove a single hmeblk from the hash chain but is necessary when hmeblks are
15511 15500   * in short supply.
15512 15501   */
15513 15502  void
15514 15503  sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15515 15504      struct hme_blk *pr_hblk, struct hme_blk **listp,
15516 15505      int free_now)
15517 15506  {
15518 15507          int shw_size, vshift;
15519 15508          struct hme_blk *shw_hblkp;
15520 15509          uint_t          shw_mask, newshw_mask;
15521 15510          caddr_t         vaddr;
15522 15511          int             size;
15523 15512          cpuset_t cpuset = cpu_ready_set;
15524 15513  
15525 15514          ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
15526 15515  
15527 15516          if (hmebp->hmeblkp == hmeblkp) {
15528 15517                  hmebp->hmeh_nextpa = hmeblkp->hblk_nextpa;
15529 15518                  hmebp->hmeblkp = hmeblkp->hblk_next;
15530 15519          } else {
15531 15520                  pr_hblk->hblk_nextpa = hmeblkp->hblk_nextpa;
15532 15521                  pr_hblk->hblk_next = hmeblkp->hblk_next;
15533 15522          }
15534 15523  
15535 15524          size = get_hblk_ttesz(hmeblkp);
15536 15525          shw_hblkp = hmeblkp->hblk_shadow;
15537 15526          if (shw_hblkp) {
15538 15527                  ASSERT(hblktosfmmu(hmeblkp) != KHATID);
15539 15528                  ASSERT(!hmeblkp->hblk_shared);
15540 15529  #ifdef  DEBUG
15541 15530                  if (mmu_page_sizes == max_mmu_page_sizes) {
15542 15531                          ASSERT(size < TTE256M);
15543 15532                  } else {
15544 15533                          ASSERT(size < TTE4M);
15545 15534                  }
15546 15535  #endif /* DEBUG */
15547 15536  
15548 15537                  shw_size = get_hblk_ttesz(shw_hblkp);
15549 15538                  vaddr = (caddr_t)get_hblk_base(hmeblkp);
15550 15539                  vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
15551 15540                  ASSERT(vshift < 8);
15552 15541                  /*
15553 15542                   * Atomically clear shadow mask bit
15554 15543                   */
15555 15544                  do {
15556 15545                          shw_mask = shw_hblkp->hblk_shw_mask;
15557 15546                          ASSERT(shw_mask & (1 << vshift));
15558 15547                          newshw_mask = shw_mask & ~(1 << vshift);
15559 15548                          newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask,
15560 15549                              shw_mask, newshw_mask);
15561 15550                  } while (newshw_mask != shw_mask);
15562 15551                  hmeblkp->hblk_shadow = NULL;
15563 15552          }
15564 15553          hmeblkp->hblk_shw_bit = 0;
15565 15554  
15566 15555          if (hmeblkp->hblk_shared) {
15567 15556  #ifdef  DEBUG
15568 15557                  sf_srd_t        *srdp;
15569 15558                  sf_region_t     *rgnp;
15570 15559                  uint_t          rid;
15571 15560  
15572 15561                  srdp = hblktosrd(hmeblkp);
15573 15562                  ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
15574 15563                  rid = hmeblkp->hblk_tag.htag_rid;
15575 15564                  ASSERT(SFMMU_IS_SHMERID_VALID(rid));
15576 15565                  ASSERT(rid < SFMMU_MAX_HME_REGIONS);
15577 15566                  rgnp = srdp->srd_hmergnp[rid];
15578 15567                  ASSERT(rgnp != NULL);
15579 15568                  SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
15580 15569  #endif /* DEBUG */
15581 15570                  hmeblkp->hblk_shared = 0;
15582 15571          }
15583 15572          if (free_now) {
15584 15573                  kpreempt_disable();
15585 15574                  CPUSET_DEL(cpuset, CPU->cpu_id);
15586 15575                  xt_sync(cpuset);
15587 15576                  xt_sync(cpuset);
15588 15577                  kpreempt_enable();
15589 15578  
15590 15579                  hmeblkp->hblk_nextpa = HMEBLK_ENDPA;
15591 15580                  hmeblkp->hblk_next = NULL;
15592 15581          } else {
15593 15582                  /* Append hmeblkp to listp for processing later. */
15594 15583                  hmeblkp->hblk_next = *listp;
15595 15584                  *listp = hmeblkp;
15596 15585          }
15597 15586  }
15598 15587  
15599 15588  /*
15600 15589   * This routine is called when memory is in short supply and returns a free
15601 15590   * hmeblk of the requested size from the cpu pending lists.
15602 15591   */
15603 15592  static struct hme_blk *
15604 15593  sfmmu_check_pending_hblks(int size)
15605 15594  {
15606 15595          int i;
15607 15596          struct hme_blk *hmeblkp = NULL, *last_hmeblkp;
15608 15597          int found_hmeblk;
15609 15598          cpuset_t cpuset = cpu_ready_set;
15610 15599          cpu_hme_pend_t *cpuhp;
15611 15600  
15612 15601          /* Flush cpu hblk pending queues */
15613 15602          for (i = 0; i < NCPU; i++) {
15614 15603                  cpuhp = &cpu_hme_pend[i];
15615 15604                  if (cpuhp->chp_listp != NULL)  {
15616 15605                          mutex_enter(&cpuhp->chp_mutex);
15617 15606                          if (cpuhp->chp_listp == NULL)  {
15618 15607                                  mutex_exit(&cpuhp->chp_mutex);
15619 15608                                  continue;
15620 15609                          }
15621 15610                          found_hmeblk = 0;
15622 15611                          last_hmeblkp = NULL;
15623 15612                          for (hmeblkp = cpuhp->chp_listp; hmeblkp != NULL;
15624 15613                              hmeblkp = hmeblkp->hblk_next) {
15625 15614                                  if (get_hblk_ttesz(hmeblkp) == size) {
15626 15615                                          if (last_hmeblkp == NULL) {
15627 15616                                                  cpuhp->chp_listp =
15628 15617                                                      hmeblkp->hblk_next;
15629 15618                                          } else {
15630 15619                                                  last_hmeblkp->hblk_next =
15631 15620                                                      hmeblkp->hblk_next;
15632 15621                                          }
15633 15622                                          ASSERT(cpuhp->chp_count > 0);
15634 15623                                          cpuhp->chp_count--;
15635 15624                                          found_hmeblk = 1;
15636 15625                                          break;
15637 15626                                  } else {
15638 15627                                          last_hmeblkp = hmeblkp;
15639 15628                                  }
15640 15629                          }
15641 15630                          mutex_exit(&cpuhp->chp_mutex);
15642 15631  
15643 15632                          if (found_hmeblk) {
15644 15633                                  kpreempt_disable();
15645 15634                                  CPUSET_DEL(cpuset, CPU->cpu_id);
15646 15635                                  xt_sync(cpuset);
15647 15636                                  xt_sync(cpuset);
15648 15637                                  kpreempt_enable();
15649 15638                                  return (hmeblkp);
15650 15639                          }
15651 15640                  }
15652 15641          }
15653 15642          return (NULL);
15654 15643  }
  
    | 
      ↓ open down ↓ | 
    9436 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX