Print this page
    
OS-3088 need a lighterweight page invalidation mechanism for zone memcap
OS-881 To workaround OS-580 add support to only invalidate mappings from a single process
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/i86pc/vm/hat_i86.c
          +++ new/usr/src/uts/i86pc/vm/hat_i86.c
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  
    | 
      ↓ open down ↓ | 
    20 lines elided | 
    
      ↑ open up ↑ | 
  
  21   21  /*
  22   22   * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
  23   23   */
  24   24  /*
  25   25   * Copyright (c) 2010, Intel Corporation.
  26   26   * All rights reserved.
  27   27   */
  28   28  /*
  29   29   * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
  30   30   * Copyright (c) 2014, 2015 by Delphix. All rights reserved.
       31 + * Copyright 2014 Joyent, Inc.  All rights reserved.
  31   32   */
  32   33  
  33   34  /*
  34   35   * VM - Hardware Address Translation management for i386 and amd64
  35   36   *
  36   37   * Implementation of the interfaces described in <common/vm/hat.h>
  37   38   *
  38   39   * Nearly all the details of how the hardware is managed should not be
  39   40   * visible outside this layer except for misc. machine specific functions
  40   41   * that work in conjunction with this code.
  41   42   *
  42   43   * Routines used only inside of i86pc/vm start with hati_ for HAT Internal.
  43   44   */
  44   45  
  45   46  #include <sys/machparam.h>
  46   47  #include <sys/machsystm.h>
  47   48  #include <sys/mman.h>
  48   49  #include <sys/types.h>
  49   50  #include <sys/systm.h>
  50   51  #include <sys/cpuvar.h>
  51   52  #include <sys/thread.h>
  52   53  #include <sys/proc.h>
  53   54  #include <sys/cpu.h>
  54   55  #include <sys/kmem.h>
  55   56  #include <sys/disp.h>
  56   57  #include <sys/shm.h>
  57   58  #include <sys/sysmacros.h>
  58   59  #include <sys/machparam.h>
  59   60  #include <sys/vmem.h>
  60   61  #include <sys/vmsystm.h>
  61   62  #include <sys/promif.h>
  62   63  #include <sys/var.h>
  63   64  #include <sys/x86_archext.h>
  64   65  #include <sys/atomic.h>
  65   66  #include <sys/bitmap.h>
  66   67  #include <sys/controlregs.h>
  67   68  #include <sys/bootconf.h>
  68   69  #include <sys/bootsvcs.h>
  69   70  #include <sys/bootinfo.h>
  70   71  #include <sys/archsystm.h>
  71   72  
  72   73  #include <vm/seg_kmem.h>
  73   74  #include <vm/hat_i86.h>
  74   75  #include <vm/as.h>
  75   76  #include <vm/seg.h>
  76   77  #include <vm/page.h>
  77   78  #include <vm/seg_kp.h>
  78   79  #include <vm/seg_kpm.h>
  79   80  #include <vm/vm_dep.h>
  80   81  #ifdef __xpv
  81   82  #include <sys/hypervisor.h>
  82   83  #endif
  83   84  #include <vm/kboot_mmu.h>
  84   85  #include <vm/seg_spt.h>
  85   86  
  86   87  #include <sys/cmn_err.h>
  87   88  
  88   89  /*
  89   90   * Basic parameters for hat operation.
  90   91   */
  91   92  struct hat_mmu_info mmu;
  92   93  
  93   94  /*
  94   95   * The page that is the kernel's top level pagetable.
  95   96   *
  96   97   * For 32 bit PAE support on i86pc, the kernel hat will use the 1st 4 entries
  97   98   * on this 4K page for its top level page table. The remaining groups of
  98   99   * 4 entries are used for per processor copies of user VLP pagetables for
  99  100   * running threads.  See hat_switch() and reload_pae32() for details.
 100  101   *
 101  102   * vlp_page[0..3] - level==2 PTEs for kernel HAT
 102  103   * vlp_page[4..7] - level==2 PTEs for user thread on cpu 0
 103  104   * vlp_page[8..11]  - level==2 PTE for user thread on cpu 1
 104  105   * etc...
 105  106   */
 106  107  static x86pte_t *vlp_page;
 107  108  
 108  109  /*
 109  110   * forward declaration of internal utility routines
 110  111   */
 111  112  static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected,
 112  113          x86pte_t new);
 113  114  
 114  115  /*
 115  116   * The kernel address space exists in all HATs. To implement this the
 116  117   * kernel reserves a fixed number of entries in the topmost level(s) of page
 117  118   * tables. The values are setup during startup and then copied to every user
 118  119   * hat created by hat_alloc(). This means that kernelbase must be:
 119  120   *
 120  121   *        4Meg aligned for 32 bit kernels
 121  122   *      512Gig aligned for x86_64 64 bit kernel
 122  123   *
 123  124   * The hat_kernel_range_ts describe what needs to be copied from kernel hat
 124  125   * to each user hat.
 125  126   */
 126  127  typedef struct hat_kernel_range {
 127  128          level_t         hkr_level;
 128  129          uintptr_t       hkr_start_va;
 129  130          uintptr_t       hkr_end_va;     /* zero means to end of memory */
 130  131  } hat_kernel_range_t;
 131  132  #define NUM_KERNEL_RANGE 2
 132  133  static hat_kernel_range_t kernel_ranges[NUM_KERNEL_RANGE];
 133  134  static int num_kernel_ranges;
 134  135  
 135  136  uint_t use_boot_reserve = 1;    /* cleared after early boot process */
 136  137  uint_t can_steal_post_boot = 0; /* set late in boot to enable stealing */
 137  138  
 138  139  /*
 139  140   * enable_1gpg: controls 1g page support for user applications.
 140  141   * By default, 1g pages are exported to user applications. enable_1gpg can
 141  142   * be set to 0 to not export.
 142  143   */
 143  144  int     enable_1gpg = 1;
 144  145  
 145  146  /*
 146  147   * AMD shanghai processors provide better management of 1gb ptes in its tlb.
 147  148   * By default, 1g page support will be disabled for pre-shanghai AMD
 148  149   * processors that don't have optimal tlb support for the 1g page size.
 149  150   * chk_optimal_1gtlb can be set to 0 to force 1g page support on sub-optimal
 150  151   * processors.
 151  152   */
 152  153  int     chk_optimal_1gtlb = 1;
 153  154  
 154  155  
 155  156  #ifdef DEBUG
 156  157  uint_t  map1gcnt;
 157  158  #endif
 158  159  
 159  160  
 160  161  /*
 161  162   * A cpuset for all cpus. This is used for kernel address cross calls, since
 162  163   * the kernel addresses apply to all cpus.
 163  164   */
 164  165  cpuset_t khat_cpuset;
 165  166  
 166  167  /*
 167  168   * management stuff for hat structures
 168  169   */
 169  170  kmutex_t        hat_list_lock;
 170  171  kcondvar_t      hat_list_cv;
 171  172  kmem_cache_t    *hat_cache;
 172  173  kmem_cache_t    *hat_hash_cache;
 173  174  kmem_cache_t    *vlp_hash_cache;
 174  175  
 175  176  /*
 176  177   * Simple statistics
 177  178   */
 178  179  struct hatstats hatstat;
 179  180  
 180  181  /*
 181  182   * Some earlier hypervisor versions do not emulate cmpxchg of PTEs
 182  183   * correctly.  For such hypervisors we must set PT_USER for kernel
 183  184   * entries ourselves (normally the emulation would set PT_USER for
 184  185   * kernel entries and PT_USER|PT_GLOBAL for user entries).  pt_kern is
 185  186   * thus set appropriately.  Note that dboot/kbm is OK, as only the full
 186  187   * HAT uses cmpxchg() and the other paths (hypercall etc.) were never
 187  188   * incorrect.
 188  189   */
 189  190  int pt_kern;
 190  191  
 191  192  /*
 192  193   * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's.
 193  194   */
 194  195  extern void atomic_orb(uchar_t *addr, uchar_t val);
 195  196  extern void atomic_andb(uchar_t *addr, uchar_t val);
 196  197  
 197  198  #ifndef __xpv
 198  199  extern pfn_t memseg_get_start(struct memseg *);
 199  200  #endif
 200  201  
 201  202  #define PP_GETRM(pp, rmmask)    (pp->p_nrm & rmmask)
 202  203  #define PP_ISMOD(pp)            PP_GETRM(pp, P_MOD)
 203  204  #define PP_ISREF(pp)            PP_GETRM(pp, P_REF)
 204  205  #define PP_ISRO(pp)             PP_GETRM(pp, P_RO)
 205  206  
 206  207  #define PP_SETRM(pp, rm)        atomic_orb(&(pp->p_nrm), rm)
 207  208  #define PP_SETMOD(pp)           PP_SETRM(pp, P_MOD)
 208  209  #define PP_SETREF(pp)           PP_SETRM(pp, P_REF)
 209  210  #define PP_SETRO(pp)            PP_SETRM(pp, P_RO)
 210  211  
 211  212  #define PP_CLRRM(pp, rm)        atomic_andb(&(pp->p_nrm), ~(rm))
 212  213  #define PP_CLRMOD(pp)           PP_CLRRM(pp, P_MOD)
 213  214  #define PP_CLRREF(pp)           PP_CLRRM(pp, P_REF)
 214  215  #define PP_CLRRO(pp)            PP_CLRRM(pp, P_RO)
 215  216  #define PP_CLRALL(pp)           PP_CLRRM(pp, P_MOD | P_REF | P_RO)
 216  217  
 217  218  /*
 218  219   * kmem cache constructor for struct hat
 219  220   */
 220  221  /*ARGSUSED*/
 221  222  static int
 222  223  hati_constructor(void *buf, void *handle, int kmflags)
 223  224  {
 224  225          hat_t   *hat = buf;
 225  226  
 226  227          mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
 227  228          bzero(hat->hat_pages_mapped,
 228  229              sizeof (pgcnt_t) * (mmu.max_page_level + 1));
 229  230          hat->hat_ism_pgcnt = 0;
 230  231          hat->hat_stats = 0;
 231  232          hat->hat_flags = 0;
 232  233          CPUSET_ZERO(hat->hat_cpus);
 233  234          hat->hat_htable = NULL;
 234  235          hat->hat_ht_hash = NULL;
 235  236          return (0);
 236  237  }
 237  238  
 238  239  /*
 239  240   * Allocate a hat structure for as. We also create the top level
 240  241   * htable and initialize it to contain the kernel hat entries.
 241  242   */
 242  243  hat_t *
 243  244  hat_alloc(struct as *as)
 244  245  {
 245  246          hat_t                   *hat;
 246  247          htable_t                *ht;    /* top level htable */
 247  248          uint_t                  use_vlp;
 248  249          uint_t                  r;
 249  250          hat_kernel_range_t      *rp;
 250  251          uintptr_t               va;
 251  252          uintptr_t               eva;
 252  253          uint_t                  start;
 253  254          uint_t                  cnt;
 254  255          htable_t                *src;
 255  256  
 256  257          /*
 257  258           * Once we start creating user process HATs we can enable
 258  259           * the htable_steal() code.
 259  260           */
 260  261          if (can_steal_post_boot == 0)
 261  262                  can_steal_post_boot = 1;
 262  263  
 263  264          ASSERT(AS_WRITE_HELD(as));
 264  265          hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
 265  266          hat->hat_as = as;
 266  267          mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
 267  268          ASSERT(hat->hat_flags == 0);
 268  269  
 269  270  #if defined(__xpv)
 270  271          /*
 271  272           * No VLP stuff on the hypervisor due to the 64-bit split top level
 272  273           * page tables.  On 32-bit it's not needed as the hypervisor takes
 273  274           * care of copying the top level PTEs to a below 4Gig page.
 274  275           */
 275  276          use_vlp = 0;
 276  277  #else   /* __xpv */
 277  278          /* 32 bit processes uses a VLP style hat when running with PAE */
 278  279  #if defined(__amd64)
 279  280          use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32);
 280  281  #elif defined(__i386)
 281  282          use_vlp = mmu.pae_hat;
 282  283  #endif
 283  284  #endif  /* __xpv */
 284  285          if (use_vlp) {
 285  286                  hat->hat_flags = HAT_VLP;
 286  287                  bzero(hat->hat_vlp_ptes, VLP_SIZE);
 287  288          }
 288  289  
 289  290          /*
 290  291           * Allocate the htable hash
 291  292           */
 292  293          if ((hat->hat_flags & HAT_VLP)) {
 293  294                  hat->hat_num_hash = mmu.vlp_hash_cnt;
 294  295                  hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP);
 295  296          } else {
 296  297                  hat->hat_num_hash = mmu.hash_cnt;
 297  298                  hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP);
 298  299          }
 299  300          bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *));
 300  301  
 301  302          /*
 302  303           * Initialize Kernel HAT entries at the top of the top level page
 303  304           * tables for the new hat.
 304  305           */
 305  306          hat->hat_htable = NULL;
 306  307          hat->hat_ht_cached = NULL;
 307  308          XPV_DISALLOW_MIGRATE();
 308  309          ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL);
 309  310          hat->hat_htable = ht;
 310  311  
 311  312  #if defined(__amd64)
 312  313          if (hat->hat_flags & HAT_VLP)
 313  314                  goto init_done;
 314  315  #endif
 315  316  
 316  317          for (r = 0; r < num_kernel_ranges; ++r) {
 317  318                  rp = &kernel_ranges[r];
 318  319                  for (va = rp->hkr_start_va; va != rp->hkr_end_va;
 319  320                      va += cnt * LEVEL_SIZE(rp->hkr_level)) {
 320  321  
 321  322                          if (rp->hkr_level == TOP_LEVEL(hat))
 322  323                                  ht = hat->hat_htable;
 323  324                          else
 324  325                                  ht = htable_create(hat, va, rp->hkr_level,
 325  326                                      NULL);
 326  327  
 327  328                          start = htable_va2entry(va, ht);
 328  329                          cnt = HTABLE_NUM_PTES(ht) - start;
 329  330                          eva = va +
 330  331                              ((uintptr_t)cnt << LEVEL_SHIFT(rp->hkr_level));
 331  332                          if (rp->hkr_end_va != 0 &&
 332  333                              (eva > rp->hkr_end_va || eva == 0))
 333  334                                  cnt = htable_va2entry(rp->hkr_end_va, ht) -
 334  335                                      start;
 335  336  
 336  337  #if defined(__i386) && !defined(__xpv)
 337  338                          if (ht->ht_flags & HTABLE_VLP) {
 338  339                                  bcopy(&vlp_page[start],
 339  340                                      &hat->hat_vlp_ptes[start],
 340  341                                      cnt * sizeof (x86pte_t));
 341  342                                  continue;
 342  343                          }
 343  344  #endif
 344  345                          src = htable_lookup(kas.a_hat, va, rp->hkr_level);
 345  346                          ASSERT(src != NULL);
 346  347                          x86pte_copy(src, ht, start, cnt);
 347  348                          htable_release(src);
 348  349                  }
 349  350          }
 350  351  
 351  352  init_done:
 352  353  
 353  354  #if defined(__xpv)
 354  355          /*
 355  356           * Pin top level page tables after initializing them
 356  357           */
 357  358          xen_pin(hat->hat_htable->ht_pfn, mmu.max_level);
 358  359  #if defined(__amd64)
 359  360          xen_pin(hat->hat_user_ptable, mmu.max_level);
 360  361  #endif
 361  362  #endif
 362  363          XPV_ALLOW_MIGRATE();
 363  364  
 364  365          /*
 365  366           * Put it at the start of the global list of all hats (used by stealing)
 366  367           *
 367  368           * kas.a_hat is not in the list but is instead used to find the
 368  369           * first and last items in the list.
 369  370           *
 370  371           * - kas.a_hat->hat_next points to the start of the user hats.
 371  372           *   The list ends where hat->hat_next == NULL
 372  373           *
 373  374           * - kas.a_hat->hat_prev points to the last of the user hats.
 374  375           *   The list begins where hat->hat_prev == NULL
 375  376           */
 376  377          mutex_enter(&hat_list_lock);
 377  378          hat->hat_prev = NULL;
 378  379          hat->hat_next = kas.a_hat->hat_next;
 379  380          if (hat->hat_next)
 380  381                  hat->hat_next->hat_prev = hat;
 381  382          else
 382  383                  kas.a_hat->hat_prev = hat;
 383  384          kas.a_hat->hat_next = hat;
 384  385          mutex_exit(&hat_list_lock);
 385  386  
 386  387          return (hat);
 387  388  }
 388  389  
 389  390  /*
 390  391   * process has finished executing but as has not been cleaned up yet.
 391  392   */
 392  393  /*ARGSUSED*/
 393  394  void
 394  395  hat_free_start(hat_t *hat)
 395  396  {
 396  397          ASSERT(AS_WRITE_HELD(hat->hat_as));
 397  398  
 398  399          /*
 399  400           * If the hat is currently a stealing victim, wait for the stealing
 400  401           * to finish.  Once we mark it as HAT_FREEING, htable_steal()
 401  402           * won't look at its pagetables anymore.
 402  403           */
 403  404          mutex_enter(&hat_list_lock);
 404  405          while (hat->hat_flags & HAT_VICTIM)
 405  406                  cv_wait(&hat_list_cv, &hat_list_lock);
 406  407          hat->hat_flags |= HAT_FREEING;
 407  408          mutex_exit(&hat_list_lock);
 408  409  }
 409  410  
 410  411  /*
 411  412   * An address space is being destroyed, so we destroy the associated hat.
 412  413   */
 413  414  void
 414  415  hat_free_end(hat_t *hat)
 415  416  {
 416  417          kmem_cache_t *cache;
 417  418  
 418  419          ASSERT(hat->hat_flags & HAT_FREEING);
 419  420  
 420  421          /*
 421  422           * must not be running on the given hat
 422  423           */
 423  424          ASSERT(CPU->cpu_current_hat != hat);
 424  425  
 425  426          /*
 426  427           * Remove it from the list of HATs
 427  428           */
 428  429          mutex_enter(&hat_list_lock);
 429  430          if (hat->hat_prev)
 430  431                  hat->hat_prev->hat_next = hat->hat_next;
 431  432          else
 432  433                  kas.a_hat->hat_next = hat->hat_next;
 433  434          if (hat->hat_next)
 434  435                  hat->hat_next->hat_prev = hat->hat_prev;
 435  436          else
 436  437                  kas.a_hat->hat_prev = hat->hat_prev;
 437  438          mutex_exit(&hat_list_lock);
 438  439          hat->hat_next = hat->hat_prev = NULL;
 439  440  
 440  441  #if defined(__xpv)
 441  442          /*
 442  443           * On the hypervisor, unpin top level page table(s)
 443  444           */
 444  445          xen_unpin(hat->hat_htable->ht_pfn);
 445  446  #if defined(__amd64)
 446  447          xen_unpin(hat->hat_user_ptable);
 447  448  #endif
 448  449  #endif
 449  450  
 450  451          /*
 451  452           * Make a pass through the htables freeing them all up.
 452  453           */
 453  454          htable_purge_hat(hat);
 454  455  
 455  456          /*
 456  457           * Decide which kmem cache the hash table came from, then free it.
 457  458           */
 458  459          if (hat->hat_flags & HAT_VLP)
 459  460                  cache = vlp_hash_cache;
 460  461          else
 461  462                  cache = hat_hash_cache;
 462  463          kmem_cache_free(cache, hat->hat_ht_hash);
 463  464          hat->hat_ht_hash = NULL;
 464  465  
 465  466          hat->hat_flags = 0;
 466  467          kmem_cache_free(hat_cache, hat);
 467  468  }
 468  469  
 469  470  /*
 470  471   * round kernelbase down to a supported value to use for _userlimit
 471  472   *
 472  473   * userlimit must be aligned down to an entry in the top level htable.
 473  474   * The one exception is for 32 bit HAT's running PAE.
 474  475   */
 475  476  uintptr_t
 476  477  hat_kernelbase(uintptr_t va)
 477  478  {
 478  479  #if defined(__i386)
 479  480          va &= LEVEL_MASK(1);
 480  481  #endif
 481  482          if (IN_VA_HOLE(va))
 482  483                  panic("_userlimit %p will fall in VA hole\n", (void *)va);
 483  484          return (va);
 484  485  }
 485  486  
 486  487  /*
 487  488   *
 488  489   */
 489  490  static void
 490  491  set_max_page_level()
 491  492  {
 492  493          level_t lvl;
 493  494  
 494  495          if (!kbm_largepage_support) {
 495  496                  lvl = 0;
 496  497          } else {
 497  498                  if (is_x86_feature(x86_featureset, X86FSET_1GPG)) {
 498  499                          lvl = 2;
 499  500                          if (chk_optimal_1gtlb &&
 500  501                              cpuid_opteron_erratum(CPU, 6671130)) {
 501  502                                  lvl = 1;
 502  503                          }
 503  504                          if (plat_mnode_xcheck(LEVEL_SIZE(2) >>
 504  505                              LEVEL_SHIFT(0))) {
 505  506                                  lvl = 1;
 506  507                          }
 507  508                  } else {
 508  509                          lvl = 1;
 509  510                  }
 510  511          }
 511  512          mmu.max_page_level = lvl;
 512  513  
 513  514          if ((lvl == 2) && (enable_1gpg == 0))
 514  515                  mmu.umax_page_level = 1;
 515  516          else
 516  517                  mmu.umax_page_level = lvl;
 517  518  }
 518  519  
 519  520  /*
 520  521   * Initialize hat data structures based on processor MMU information.
 521  522   */
 522  523  void
 523  524  mmu_init(void)
 524  525  {
 525  526          uint_t max_htables;
 526  527          uint_t pa_bits;
 527  528          uint_t va_bits;
 528  529          int i;
 529  530  
 530  531          /*
 531  532           * If CPU enabled the page table global bit, use it for the kernel
 532  533           * This is bit 7 in CR4 (PGE - Page Global Enable).
 533  534           */
 534  535          if (is_x86_feature(x86_featureset, X86FSET_PGE) &&
 535  536              (getcr4() & CR4_PGE) != 0)
 536  537                  mmu.pt_global = PT_GLOBAL;
 537  538  
 538  539          /*
 539  540           * Detect NX and PAE usage.
 540  541           */
 541  542          mmu.pae_hat = kbm_pae_support;
 542  543          if (kbm_nx_support)
 543  544                  mmu.pt_nx = PT_NX;
 544  545          else
 545  546                  mmu.pt_nx = 0;
 546  547  
 547  548          /*
 548  549           * Use CPU info to set various MMU parameters
 549  550           */
 550  551          cpuid_get_addrsize(CPU, &pa_bits, &va_bits);
 551  552  
 552  553          if (va_bits < sizeof (void *) * NBBY) {
 553  554                  mmu.hole_start = (1ul << (va_bits - 1));
 554  555                  mmu.hole_end = 0ul - mmu.hole_start - 1;
 555  556          } else {
 556  557                  mmu.hole_end = 0;
 557  558                  mmu.hole_start = mmu.hole_end - 1;
 558  559          }
 559  560  #if defined(OPTERON_ERRATUM_121)
 560  561          /*
 561  562           * If erratum 121 has already been detected at this time, hole_start
 562  563           * contains the value to be subtracted from mmu.hole_start.
 563  564           */
 564  565          ASSERT(hole_start == 0 || opteron_erratum_121 != 0);
 565  566          hole_start = mmu.hole_start - hole_start;
 566  567  #else
 567  568          hole_start = mmu.hole_start;
 568  569  #endif
 569  570          hole_end = mmu.hole_end;
 570  571  
 571  572          mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1);
 572  573          if (mmu.pae_hat == 0 && pa_bits > 32)
 573  574                  mmu.highest_pfn = PFN_4G - 1;
 574  575  
 575  576          if (mmu.pae_hat) {
 576  577                  mmu.pte_size = 8;       /* 8 byte PTEs */
 577  578                  mmu.pte_size_shift = 3;
 578  579          } else {
 579  580                  mmu.pte_size = 4;       /* 4 byte PTEs */
 580  581                  mmu.pte_size_shift = 2;
 581  582          }
 582  583  
 583  584          if (mmu.pae_hat && !is_x86_feature(x86_featureset, X86FSET_PAE))
 584  585                  panic("Processor does not support PAE");
 585  586  
 586  587          if (!is_x86_feature(x86_featureset, X86FSET_CX8))
 587  588                  panic("Processor does not support cmpxchg8b instruction");
 588  589  
 589  590  #if defined(__amd64)
 590  591  
 591  592          mmu.num_level = 4;
 592  593          mmu.max_level = 3;
 593  594          mmu.ptes_per_table = 512;
 594  595          mmu.top_level_count = 512;
 595  596  
 596  597          mmu.level_shift[0] = 12;
 597  598          mmu.level_shift[1] = 21;
 598  599          mmu.level_shift[2] = 30;
 599  600          mmu.level_shift[3] = 39;
 600  601  
 601  602  #elif defined(__i386)
 602  603  
 603  604          if (mmu.pae_hat) {
 604  605                  mmu.num_level = 3;
 605  606                  mmu.max_level = 2;
 606  607                  mmu.ptes_per_table = 512;
 607  608                  mmu.top_level_count = 4;
 608  609  
 609  610                  mmu.level_shift[0] = 12;
 610  611                  mmu.level_shift[1] = 21;
 611  612                  mmu.level_shift[2] = 30;
 612  613  
 613  614          } else {
 614  615                  mmu.num_level = 2;
 615  616                  mmu.max_level = 1;
 616  617                  mmu.ptes_per_table = 1024;
 617  618                  mmu.top_level_count = 1024;
 618  619  
 619  620                  mmu.level_shift[0] = 12;
 620  621                  mmu.level_shift[1] = 22;
 621  622          }
 622  623  
 623  624  #endif  /* __i386 */
 624  625  
 625  626          for (i = 0; i < mmu.num_level; ++i) {
 626  627                  mmu.level_size[i] = 1UL << mmu.level_shift[i];
 627  628                  mmu.level_offset[i] = mmu.level_size[i] - 1;
 628  629                  mmu.level_mask[i] = ~mmu.level_offset[i];
 629  630          }
 630  631  
 631  632          set_max_page_level();
 632  633  
 633  634          mmu_page_sizes = mmu.max_page_level + 1;
 634  635          mmu_exported_page_sizes = mmu.umax_page_level + 1;
 635  636  
 636  637          /* restrict legacy applications from using pagesizes 1g and above */
 637  638          mmu_legacy_page_sizes =
 638  639              (mmu_exported_page_sizes > 2) ? 2 : mmu_exported_page_sizes;
 639  640  
 640  641  
 641  642          for (i = 0; i <= mmu.max_page_level; ++i) {
 642  643                  mmu.pte_bits[i] = PT_VALID | pt_kern;
 643  644                  if (i > 0)
 644  645                          mmu.pte_bits[i] |= PT_PAGESIZE;
 645  646          }
 646  647  
 647  648          /*
 648  649           * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level.
 649  650           */
 650  651          for (i = 1; i < mmu.num_level; ++i)
 651  652                  mmu.ptp_bits[i] = PT_PTPBITS;
 652  653  
 653  654  #if defined(__i386)
 654  655          mmu.ptp_bits[2] = PT_VALID;
 655  656  #endif
 656  657  
 657  658          /*
 658  659           * Compute how many hash table entries to have per process for htables.
 659  660           * We start with 1 page's worth of entries.
 660  661           *
 661  662           * If physical memory is small, reduce the amount need to cover it.
 662  663           */
 663  664          max_htables = physmax / mmu.ptes_per_table;
 664  665          mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *);
 665  666          while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables)
 666  667                  mmu.hash_cnt >>= 1;
 667  668          mmu.vlp_hash_cnt = mmu.hash_cnt;
 668  669  
 669  670  #if defined(__amd64)
 670  671          /*
 671  672           * If running in 64 bits and physical memory is large,
 672  673           * increase the size of the cache to cover all of memory for
 673  674           * a 64 bit process.
 674  675           */
 675  676  #define HASH_MAX_LENGTH 4
 676  677          while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables)
 677  678                  mmu.hash_cnt <<= 1;
 678  679  #endif
 679  680  }
 680  681  
 681  682  
 682  683  /*
 683  684   * initialize hat data structures
 684  685   */
 685  686  void
 686  687  hat_init()
 687  688  {
 688  689  #if defined(__i386)
 689  690          /*
 690  691           * _userlimit must be aligned correctly
 691  692           */
 692  693          if ((_userlimit & LEVEL_MASK(1)) != _userlimit) {
 693  694                  prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n",
 694  695                      (void *)_userlimit, (void *)LEVEL_SIZE(1));
 695  696                  halt("hat_init(): Unable to continue");
 696  697          }
 697  698  #endif
 698  699  
 699  700          cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL);
 700  701  
 701  702          /*
 702  703           * initialize kmem caches
 703  704           */
 704  705          htable_init();
 705  706          hment_init();
 706  707  
 707  708          hat_cache = kmem_cache_create("hat_t",
 708  709              sizeof (hat_t), 0, hati_constructor, NULL, NULL,
 709  710              NULL, 0, 0);
 710  711  
 711  712          hat_hash_cache = kmem_cache_create("HatHash",
 712  713              mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
 713  714              NULL, 0, 0);
 714  715  
 715  716          /*
 716  717           * VLP hats can use a smaller hash table size on large memroy machines
 717  718           */
 718  719          if (mmu.hash_cnt == mmu.vlp_hash_cnt) {
 719  720                  vlp_hash_cache = hat_hash_cache;
 720  721          } else {
 721  722                  vlp_hash_cache = kmem_cache_create("HatVlpHash",
 722  723                      mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
 723  724                      NULL, 0, 0);
 724  725          }
 725  726  
 726  727          /*
 727  728           * Set up the kernel's hat
 728  729           */
 729  730          AS_LOCK_ENTER(&kas, RW_WRITER);
 730  731          kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP);
 731  732          mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
 732  733          kas.a_hat->hat_as = &kas;
 733  734          kas.a_hat->hat_flags = 0;
 734  735          AS_LOCK_EXIT(&kas);
 735  736  
 736  737          CPUSET_ZERO(khat_cpuset);
 737  738          CPUSET_ADD(khat_cpuset, CPU->cpu_id);
 738  739  
 739  740          /*
 740  741           * The kernel hat's next pointer serves as the head of the hat list .
 741  742           * The kernel hat's prev pointer tracks the last hat on the list for
 742  743           * htable_steal() to use.
 743  744           */
 744  745          kas.a_hat->hat_next = NULL;
 745  746          kas.a_hat->hat_prev = NULL;
 746  747  
 747  748          /*
 748  749           * Allocate an htable hash bucket for the kernel
 749  750           * XX64 - tune for 64 bit procs
 750  751           */
 751  752          kas.a_hat->hat_num_hash = mmu.hash_cnt;
 752  753          kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP);
 753  754          bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *));
 754  755  
 755  756          /*
 756  757           * zero out the top level and cached htable pointers
 757  758           */
 758  759          kas.a_hat->hat_ht_cached = NULL;
 759  760          kas.a_hat->hat_htable = NULL;
 760  761  
 761  762          /*
 762  763           * Pre-allocate hrm_hashtab before enabling the collection of
 763  764           * refmod statistics.  Allocating on the fly would mean us
 764  765           * running the risk of suffering recursive mutex enters or
 765  766           * deadlocks.
 766  767           */
 767  768          hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
 768  769              KM_SLEEP);
 769  770  }
 770  771  
 771  772  /*
 772  773   * Prepare CPU specific pagetables for VLP processes on 64 bit kernels.
 773  774   *
 774  775   * Each CPU has a set of 2 pagetables that are reused for any 32 bit
 775  776   * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and
 776  777   * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes.
 777  778   */
 778  779  /*ARGSUSED*/
 779  780  static void
 780  781  hat_vlp_setup(struct cpu *cpu)
 781  782  {
 782  783  #if defined(__amd64) && !defined(__xpv)
 783  784          struct hat_cpu_info *hci = cpu->cpu_hat_info;
 784  785          pfn_t pfn;
 785  786  
 786  787          /*
 787  788           * allocate the level==2 page table for the bottom most
 788  789           * 512Gig of address space (this is where 32 bit apps live)
 789  790           */
 790  791          ASSERT(hci != NULL);
 791  792          hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
 792  793  
 793  794          /*
 794  795           * Allocate a top level pagetable and copy the kernel's
 795  796           * entries into it. Then link in hci_vlp_l2ptes in the 1st entry.
 796  797           */
 797  798          hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
 798  799          hci->hci_vlp_pfn =
 799  800              hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes);
 800  801          ASSERT(hci->hci_vlp_pfn != PFN_INVALID);
 801  802          bcopy(vlp_page, hci->hci_vlp_l3ptes, MMU_PAGESIZE);
 802  803  
 803  804          pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes);
 804  805          ASSERT(pfn != PFN_INVALID);
 805  806          hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2);
 806  807  #endif /* __amd64 && !__xpv */
 807  808  }
 808  809  
 809  810  /*ARGSUSED*/
 810  811  static void
 811  812  hat_vlp_teardown(cpu_t *cpu)
 812  813  {
 813  814  #if defined(__amd64) && !defined(__xpv)
 814  815          struct hat_cpu_info *hci;
 815  816  
 816  817          if ((hci = cpu->cpu_hat_info) == NULL)
 817  818                  return;
 818  819          if (hci->hci_vlp_l2ptes)
 819  820                  kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE);
 820  821          if (hci->hci_vlp_l3ptes)
 821  822                  kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE);
 822  823  #endif
 823  824  }
 824  825  
 825  826  #define NEXT_HKR(r, l, s, e) {                  \
 826  827          kernel_ranges[r].hkr_level = l;         \
 827  828          kernel_ranges[r].hkr_start_va = s;      \
 828  829          kernel_ranges[r].hkr_end_va = e;        \
 829  830          ++r;                                    \
 830  831  }
 831  832  
 832  833  /*
 833  834   * Finish filling in the kernel hat.
 834  835   * Pre fill in all top level kernel page table entries for the kernel's
 835  836   * part of the address range.  From this point on we can't use any new
 836  837   * kernel large pages if they need PTE's at max_level
 837  838   *
 838  839   * create the kmap mappings.
 839  840   */
 840  841  void
 841  842  hat_init_finish(void)
 842  843  {
 843  844          size_t          size;
 844  845          uint_t          r = 0;
 845  846          uintptr_t       va;
 846  847          hat_kernel_range_t *rp;
 847  848  
 848  849  
 849  850          /*
 850  851           * We are now effectively running on the kernel hat.
 851  852           * Clearing use_boot_reserve shuts off using the pre-allocated boot
 852  853           * reserve for all HAT allocations.  From here on, the reserves are
 853  854           * only used when avoiding recursion in kmem_alloc().
 854  855           */
 855  856          use_boot_reserve = 0;
 856  857          htable_adjust_reserve();
 857  858  
 858  859          /*
 859  860           * User HATs are initialized with copies of all kernel mappings in
 860  861           * higher level page tables. Ensure that those entries exist.
 861  862           */
 862  863  #if defined(__amd64)
 863  864  
 864  865          NEXT_HKR(r, 3, kernelbase, 0);
 865  866  #if defined(__xpv)
 866  867          NEXT_HKR(r, 3, HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END);
 867  868  #endif
 868  869  
 869  870  #elif defined(__i386)
 870  871  
 871  872  #if !defined(__xpv)
 872  873          if (mmu.pae_hat) {
 873  874                  va = kernelbase;
 874  875                  if ((va & LEVEL_MASK(2)) != va) {
 875  876                          va = P2ROUNDUP(va, LEVEL_SIZE(2));
 876  877                          NEXT_HKR(r, 1, kernelbase, va);
 877  878                  }
 878  879                  if (va != 0)
 879  880                          NEXT_HKR(r, 2, va, 0);
 880  881          } else
 881  882  #endif /* __xpv */
 882  883                  NEXT_HKR(r, 1, kernelbase, 0);
 883  884  
 884  885  #endif /* __i386 */
 885  886  
 886  887          num_kernel_ranges = r;
 887  888  
 888  889          /*
 889  890           * Create all the kernel pagetables that will have entries
 890  891           * shared to user HATs.
 891  892           */
 892  893          for (r = 0; r < num_kernel_ranges; ++r) {
 893  894                  rp = &kernel_ranges[r];
 894  895                  for (va = rp->hkr_start_va; va != rp->hkr_end_va;
 895  896                      va += LEVEL_SIZE(rp->hkr_level)) {
 896  897                          htable_t *ht;
 897  898  
 898  899                          if (IN_HYPERVISOR_VA(va))
 899  900                                  continue;
 900  901  
 901  902                          /* can/must skip if a page mapping already exists */
 902  903                          if (rp->hkr_level <= mmu.max_page_level &&
 903  904                              (ht = htable_getpage(kas.a_hat, va, NULL)) !=
 904  905                              NULL) {
 905  906                                  htable_release(ht);
 906  907                                  continue;
 907  908                          }
 908  909  
 909  910                          (void) htable_create(kas.a_hat, va, rp->hkr_level - 1,
 910  911                              NULL);
 911  912                  }
 912  913          }
 913  914  
 914  915          /*
 915  916           * 32 bit PAE metal kernels use only 4 of the 512 entries in the
 916  917           * page holding the top level pagetable. We use the remainder for
 917  918           * the "per CPU" page tables for VLP processes.
 918  919           * Map the top level kernel pagetable into the kernel to make
 919  920           * it easy to use bcopy access these tables.
 920  921           */
 921  922          if (mmu.pae_hat) {
 922  923                  vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
 923  924                  hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE,
 924  925                      kas.a_hat->hat_htable->ht_pfn,
 925  926  #if !defined(__xpv)
 926  927                      PROT_WRITE |
 927  928  #endif
 928  929                      PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK,
 929  930                      HAT_LOAD | HAT_LOAD_NOCONSIST);
 930  931          }
 931  932          hat_vlp_setup(CPU);
 932  933  
 933  934          /*
 934  935           * Create kmap (cached mappings of kernel PTEs)
 935  936           * for 32 bit we map from segmap_start .. ekernelheap
 936  937           * for 64 bit we map from segmap_start .. segmap_start + segmapsize;
 937  938           */
 938  939  #if defined(__i386)
 939  940          size = (uintptr_t)ekernelheap - segmap_start;
 940  941  #elif defined(__amd64)
 941  942          size = segmapsize;
 942  943  #endif
 943  944          hat_kmap_init((uintptr_t)segmap_start, size);
 944  945  }
 945  946  
 946  947  /*
 947  948   * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references
 948  949   * are 32 bit, so for safety we must use atomic_cas_64() to install these.
 949  950   */
 950  951  #ifdef __i386
 951  952  static void
 952  953  reload_pae32(hat_t *hat, cpu_t *cpu)
 953  954  {
 954  955          x86pte_t *src;
 955  956          x86pte_t *dest;
 956  957          x86pte_t pte;
 957  958          int i;
 958  959  
 959  960          /*
 960  961           * Load the 4 entries of the level 2 page table into this
 961  962           * cpu's range of the vlp_page and point cr3 at them.
 962  963           */
 963  964          ASSERT(mmu.pae_hat);
 964  965          src = hat->hat_vlp_ptes;
 965  966          dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES;
 966  967          for (i = 0; i < VLP_NUM_PTES; ++i) {
 967  968                  for (;;) {
 968  969                          pte = dest[i];
 969  970                          if (pte == src[i])
 970  971                                  break;
 971  972                          if (atomic_cas_64(dest + i, pte, src[i]) != src[i])
 972  973                                  break;
 973  974                  }
 974  975          }
 975  976  }
 976  977  #endif
 977  978  
 978  979  /*
 979  980   * Switch to a new active hat, maintaining bit masks to track active CPUs.
 980  981   *
 981  982   * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it
 982  983   * remains a 32-bit value.
 983  984   */
 984  985  void
 985  986  hat_switch(hat_t *hat)
 986  987  {
 987  988          uint64_t        newcr3;
 988  989          cpu_t           *cpu = CPU;
 989  990          hat_t           *old = cpu->cpu_current_hat;
 990  991  
 991  992          /*
 992  993           * set up this information first, so we don't miss any cross calls
 993  994           */
 994  995          if (old != NULL) {
 995  996                  if (old == hat)
 996  997                          return;
 997  998                  if (old != kas.a_hat)
 998  999                          CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id);
 999 1000          }
1000 1001  
1001 1002          /*
1002 1003           * Add this CPU to the active set for this HAT.
1003 1004           */
1004 1005          if (hat != kas.a_hat) {
1005 1006                  CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id);
1006 1007          }
1007 1008          cpu->cpu_current_hat = hat;
1008 1009  
1009 1010          /*
1010 1011           * now go ahead and load cr3
1011 1012           */
1012 1013          if (hat->hat_flags & HAT_VLP) {
1013 1014  #if defined(__amd64)
1014 1015                  x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
1015 1016  
1016 1017                  VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1017 1018                  newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn);
1018 1019  #elif defined(__i386)
1019 1020                  reload_pae32(hat, cpu);
1020 1021                  newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) +
1021 1022                      (cpu->cpu_id + 1) * VLP_SIZE;
1022 1023  #endif
1023 1024          } else {
1024 1025                  newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn);
1025 1026          }
1026 1027  #ifdef __xpv
1027 1028          {
1028 1029                  struct mmuext_op t[2];
1029 1030                  uint_t retcnt;
1030 1031                  uint_t opcnt = 1;
1031 1032  
1032 1033                  t[0].cmd = MMUEXT_NEW_BASEPTR;
1033 1034                  t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
1034 1035  #if defined(__amd64)
1035 1036                  /*
1036 1037                   * There's an interesting problem here, as to what to
1037 1038                   * actually specify when switching to the kernel hat.
1038 1039                   * For now we'll reuse the kernel hat again.
1039 1040                   */
1040 1041                  t[1].cmd = MMUEXT_NEW_USER_BASEPTR;
1041 1042                  if (hat == kas.a_hat)
1042 1043                          t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
1043 1044                  else
1044 1045                          t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable);
1045 1046                  ++opcnt;
1046 1047  #endif  /* __amd64 */
1047 1048                  if (HYPERVISOR_mmuext_op(t, opcnt, &retcnt, DOMID_SELF) < 0)
1048 1049                          panic("HYPERVISOR_mmu_update() failed");
1049 1050                  ASSERT(retcnt == opcnt);
1050 1051  
1051 1052          }
1052 1053  #else
1053 1054          setcr3(newcr3);
1054 1055  #endif
1055 1056          ASSERT(cpu == CPU);
1056 1057  }
1057 1058  
1058 1059  /*
1059 1060   * Utility to return a valid x86pte_t from protections, pfn, and level number
1060 1061   */
1061 1062  static x86pte_t
1062 1063  hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags)
1063 1064  {
1064 1065          x86pte_t        pte;
1065 1066          uint_t          cache_attr = attr & HAT_ORDER_MASK;
1066 1067  
1067 1068          pte = MAKEPTE(pfn, level);
1068 1069  
1069 1070          if (attr & PROT_WRITE)
1070 1071                  PTE_SET(pte, PT_WRITABLE);
1071 1072  
1072 1073          if (attr & PROT_USER)
1073 1074                  PTE_SET(pte, PT_USER);
1074 1075  
1075 1076          if (!(attr & PROT_EXEC))
1076 1077                  PTE_SET(pte, mmu.pt_nx);
1077 1078  
1078 1079          /*
1079 1080           * Set the software bits used track ref/mod sync's and hments.
1080 1081           * If not using REF/MOD, set them to avoid h/w rewriting PTEs.
1081 1082           */
1082 1083          if (flags & HAT_LOAD_NOCONSIST)
1083 1084                  PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD);
1084 1085          else if (attr & HAT_NOSYNC)
1085 1086                  PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD);
1086 1087  
1087 1088          /*
1088 1089           * Set the caching attributes in the PTE. The combination
1089 1090           * of attributes are poorly defined, so we pay attention
1090 1091           * to them in the given order.
1091 1092           *
1092 1093           * The test for HAT_STRICTORDER is different because it's defined
1093 1094           * as "0" - which was a stupid thing to do, but is too late to change!
1094 1095           */
1095 1096          if (cache_attr == HAT_STRICTORDER) {
1096 1097                  PTE_SET(pte, PT_NOCACHE);
1097 1098          /*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */
1098 1099          } else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) {
1099 1100                  /* nothing to set */;
1100 1101          } else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) {
1101 1102                  PTE_SET(pte, PT_NOCACHE);
1102 1103                  if (is_x86_feature(x86_featureset, X86FSET_PAT))
1103 1104                          PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE);
1104 1105                  else
1105 1106                          PTE_SET(pte, PT_WRITETHRU);
1106 1107          } else {
1107 1108                  panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr);
1108 1109          }
1109 1110  
1110 1111          return (pte);
1111 1112  }
1112 1113  
1113 1114  /*
1114 1115   * Duplicate address translations of the parent to the child.
1115 1116   * This function really isn't used anymore.
1116 1117   */
1117 1118  /*ARGSUSED*/
1118 1119  int
1119 1120  hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag)
1120 1121  {
1121 1122          ASSERT((uintptr_t)addr < kernelbase);
1122 1123          ASSERT(new != kas.a_hat);
1123 1124          ASSERT(old != kas.a_hat);
1124 1125          return (0);
1125 1126  }
1126 1127  
1127 1128  /*
1128 1129   * Allocate any hat resources required for a process being swapped in.
1129 1130   */
1130 1131  /*ARGSUSED*/
1131 1132  void
1132 1133  hat_swapin(hat_t *hat)
1133 1134  {
1134 1135          /* do nothing - we let everything fault back in */
1135 1136  }
1136 1137  
1137 1138  /*
1138 1139   * Unload all translations associated with an address space of a process
1139 1140   * that is being swapped out.
1140 1141   */
1141 1142  void
1142 1143  hat_swapout(hat_t *hat)
1143 1144  {
1144 1145          uintptr_t       vaddr = (uintptr_t)0;
1145 1146          uintptr_t       eaddr = _userlimit;
1146 1147          htable_t        *ht = NULL;
1147 1148          level_t         l;
1148 1149  
1149 1150          XPV_DISALLOW_MIGRATE();
1150 1151          /*
1151 1152           * We can't just call hat_unload(hat, 0, _userlimit...)  here, because
1152 1153           * seg_spt and shared pagetables can't be swapped out.
1153 1154           * Take a look at segspt_shmswapout() - it's a big no-op.
1154 1155           *
1155 1156           * Instead we'll walk through all the address space and unload
1156 1157           * any mappings which we are sure are not shared, not locked.
1157 1158           */
1158 1159          ASSERT(IS_PAGEALIGNED(vaddr));
1159 1160          ASSERT(IS_PAGEALIGNED(eaddr));
1160 1161          ASSERT(AS_LOCK_HELD(hat->hat_as));
1161 1162          if ((uintptr_t)hat->hat_as->a_userlimit < eaddr)
1162 1163                  eaddr = (uintptr_t)hat->hat_as->a_userlimit;
1163 1164  
1164 1165          while (vaddr < eaddr) {
1165 1166                  (void) htable_walk(hat, &ht, &vaddr, eaddr);
1166 1167                  if (ht == NULL)
1167 1168                          break;
1168 1169  
1169 1170                  ASSERT(!IN_VA_HOLE(vaddr));
1170 1171  
1171 1172                  /*
1172 1173                   * If the page table is shared skip its entire range.
1173 1174                   */
1174 1175                  l = ht->ht_level;
1175 1176                  if (ht->ht_flags & HTABLE_SHARED_PFN) {
1176 1177                          vaddr = ht->ht_vaddr + LEVEL_SIZE(l + 1);
1177 1178                          htable_release(ht);
1178 1179                          ht = NULL;
1179 1180                          continue;
1180 1181                  }
1181 1182  
1182 1183                  /*
1183 1184                   * If the page table has no locked entries, unload this one.
1184 1185                   */
1185 1186                  if (ht->ht_lock_cnt == 0)
1186 1187                          hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l),
1187 1188                              HAT_UNLOAD_UNMAP);
1188 1189  
1189 1190                  /*
1190 1191                   * If we have a level 0 page table with locked entries,
1191 1192                   * skip the entire page table, otherwise skip just one entry.
1192 1193                   */
1193 1194                  if (ht->ht_lock_cnt > 0 && l == 0)
1194 1195                          vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
1195 1196                  else
1196 1197                          vaddr += LEVEL_SIZE(l);
1197 1198          }
1198 1199          if (ht)
1199 1200                  htable_release(ht);
1200 1201  
1201 1202          /*
1202 1203           * We're in swapout because the system is low on memory, so
1203 1204           * go back and flush all the htables off the cached list.
1204 1205           */
1205 1206          htable_purge_hat(hat);
1206 1207          XPV_ALLOW_MIGRATE();
1207 1208  }
1208 1209  
1209 1210  /*
1210 1211   * returns number of bytes that have valid mappings in hat.
1211 1212   */
1212 1213  size_t
1213 1214  hat_get_mapped_size(hat_t *hat)
1214 1215  {
1215 1216          size_t total = 0;
1216 1217          int l;
1217 1218  
1218 1219          for (l = 0; l <= mmu.max_page_level; l++)
1219 1220                  total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l));
1220 1221          total += hat->hat_ism_pgcnt;
1221 1222  
1222 1223          return (total);
1223 1224  }
1224 1225  
1225 1226  /*
1226 1227   * enable/disable collection of stats for hat.
1227 1228   */
1228 1229  int
1229 1230  hat_stats_enable(hat_t *hat)
1230 1231  {
1231 1232          atomic_inc_32(&hat->hat_stats);
1232 1233          return (1);
1233 1234  }
1234 1235  
1235 1236  void
1236 1237  hat_stats_disable(hat_t *hat)
1237 1238  {
1238 1239          atomic_dec_32(&hat->hat_stats);
1239 1240  }
1240 1241  
1241 1242  /*
1242 1243   * Utility to sync the ref/mod bits from a page table entry to the page_t
1243 1244   * We must be holding the mapping list lock when this is called.
1244 1245   */
1245 1246  static void
1246 1247  hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level)
1247 1248  {
1248 1249          uint_t  rm = 0;
1249 1250          pgcnt_t pgcnt;
1250 1251  
1251 1252          if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
1252 1253                  return;
1253 1254  
1254 1255          if (PTE_GET(pte, PT_REF))
1255 1256                  rm |= P_REF;
1256 1257  
1257 1258          if (PTE_GET(pte, PT_MOD))
1258 1259                  rm |= P_MOD;
1259 1260  
1260 1261          if (rm == 0)
1261 1262                  return;
1262 1263  
1263 1264          /*
1264 1265           * sync to all constituent pages of a large page
1265 1266           */
1266 1267          ASSERT(x86_hm_held(pp));
1267 1268          pgcnt = page_get_pagecnt(level);
1268 1269          ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
1269 1270          for (; pgcnt > 0; --pgcnt) {
1270 1271                  /*
1271 1272                   * hat_page_demote() can't decrease
1272 1273                   * pszc below this mapping size
1273 1274                   * since this large mapping existed after we
1274 1275                   * took mlist lock.
1275 1276                   */
1276 1277                  ASSERT(pp->p_szc >= level);
1277 1278                  hat_page_setattr(pp, rm);
1278 1279                  ++pp;
1279 1280          }
1280 1281  }
1281 1282  
1282 1283  /*
1283 1284   * This the set of PTE bits for PFN, permissions and caching
1284 1285   * that are allowed to change on a HAT_LOAD_REMAP
1285 1286   */
1286 1287  #define PT_REMAP_BITS                                                   \
1287 1288          (PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU |                \
1288 1289          PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE | PT_IGNORE | PT_REF | PT_MOD)
1289 1290  
1290 1291  #define REMAPASSERT(EX) if (!(EX)) panic("hati_pte_map: " #EX)
1291 1292  /*
1292 1293   * Do the low-level work to get a mapping entered into a HAT's pagetables
1293 1294   * and in the mapping list of the associated page_t.
1294 1295   */
1295 1296  static int
1296 1297  hati_pte_map(
1297 1298          htable_t        *ht,
1298 1299          uint_t          entry,
1299 1300          page_t          *pp,
1300 1301          x86pte_t        pte,
1301 1302          int             flags,
1302 1303          void            *pte_ptr)
1303 1304  {
1304 1305          hat_t           *hat = ht->ht_hat;
1305 1306          x86pte_t        old_pte;
1306 1307          level_t         l = ht->ht_level;
1307 1308          hment_t         *hm;
1308 1309          uint_t          is_consist;
1309 1310          uint_t          is_locked;
1310 1311          int             rv = 0;
1311 1312  
1312 1313          /*
1313 1314           * Is this a consistent (ie. need mapping list lock) mapping?
1314 1315           */
1315 1316          is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0);
1316 1317  
1317 1318          /*
1318 1319           * Track locked mapping count in the htable.  Do this first,
1319 1320           * as we track locking even if there already is a mapping present.
1320 1321           */
1321 1322          is_locked = (flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat;
1322 1323          if (is_locked)
1323 1324                  HTABLE_LOCK_INC(ht);
1324 1325  
1325 1326          /*
1326 1327           * Acquire the page's mapping list lock and get an hment to use.
1327 1328           * Note that hment_prepare() might return NULL.
1328 1329           */
1329 1330          if (is_consist) {
1330 1331                  x86_hm_enter(pp);
1331 1332                  hm = hment_prepare(ht, entry, pp);
1332 1333          }
1333 1334  
1334 1335          /*
1335 1336           * Set the new pte, retrieving the old one at the same time.
1336 1337           */
1337 1338          old_pte = x86pte_set(ht, entry, pte, pte_ptr);
1338 1339  
1339 1340          /*
1340 1341           * Did we get a large page / page table collision?
1341 1342           */
1342 1343          if (old_pte == LPAGE_ERROR) {
1343 1344                  if (is_locked)
1344 1345                          HTABLE_LOCK_DEC(ht);
1345 1346                  rv = -1;
1346 1347                  goto done;
1347 1348          }
1348 1349  
1349 1350          /*
1350 1351           * If the mapping didn't change there is nothing more to do.
1351 1352           */
1352 1353          if (PTE_EQUIV(pte, old_pte))
1353 1354                  goto done;
1354 1355  
1355 1356          /*
1356 1357           * Install a new mapping in the page's mapping list
1357 1358           */
1358 1359          if (!PTE_ISVALID(old_pte)) {
1359 1360                  if (is_consist) {
1360 1361                          hment_assign(ht, entry, pp, hm);
1361 1362                          x86_hm_exit(pp);
1362 1363                  } else {
1363 1364                          ASSERT(flags & HAT_LOAD_NOCONSIST);
1364 1365                  }
1365 1366  #if defined(__amd64)
1366 1367                  if (ht->ht_flags & HTABLE_VLP) {
1367 1368                          cpu_t *cpu = CPU;
1368 1369                          x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
1369 1370                          VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1370 1371                  }
1371 1372  #endif
1372 1373                  HTABLE_INC(ht->ht_valid_cnt);
1373 1374                  PGCNT_INC(hat, l);
1374 1375                  return (rv);
1375 1376          }
1376 1377  
1377 1378          /*
1378 1379           * Remap's are more complicated:
1379 1380           *  - HAT_LOAD_REMAP must be specified if changing the pfn.
1380 1381           *    We also require that NOCONSIST be specified.
1381 1382           *  - Otherwise only permission or caching bits may change.
1382 1383           */
1383 1384          if (!PTE_ISPAGE(old_pte, l))
1384 1385                  panic("non-null/page mapping pte=" FMT_PTE, old_pte);
1385 1386  
1386 1387          if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) {
1387 1388                  REMAPASSERT(flags & HAT_LOAD_REMAP);
1388 1389                  REMAPASSERT(flags & HAT_LOAD_NOCONSIST);
1389 1390                  REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
1390 1391                  REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) ==
1391 1392                      pf_is_memory(PTE2PFN(pte, l)));
1392 1393                  REMAPASSERT(!is_consist);
1393 1394          }
1394 1395  
1395 1396          /*
1396 1397           * We only let remaps change the certain bits in the PTE.
1397 1398           */
1398 1399          if (PTE_GET(old_pte, ~PT_REMAP_BITS) != PTE_GET(pte, ~PT_REMAP_BITS))
1399 1400                  panic("remap bits changed: old_pte="FMT_PTE", pte="FMT_PTE"\n",
1400 1401                      old_pte, pte);
1401 1402  
1402 1403          /*
1403 1404           * We don't create any mapping list entries on a remap, so release
1404 1405           * any allocated hment after we drop the mapping list lock.
1405 1406           */
1406 1407  done:
1407 1408          if (is_consist) {
1408 1409                  x86_hm_exit(pp);
1409 1410                  if (hm != NULL)
1410 1411                          hment_free(hm);
1411 1412          }
1412 1413          return (rv);
1413 1414  }
1414 1415  
1415 1416  /*
1416 1417   * Internal routine to load a single page table entry. This only fails if
1417 1418   * we attempt to overwrite a page table link with a large page.
1418 1419   */
1419 1420  static int
1420 1421  hati_load_common(
1421 1422          hat_t           *hat,
1422 1423          uintptr_t       va,
1423 1424          page_t          *pp,
1424 1425          uint_t          attr,
1425 1426          uint_t          flags,
1426 1427          level_t         level,
1427 1428          pfn_t           pfn)
1428 1429  {
1429 1430          htable_t        *ht;
1430 1431          uint_t          entry;
1431 1432          x86pte_t        pte;
1432 1433          int             rv = 0;
1433 1434  
1434 1435          /*
1435 1436           * The number 16 is arbitrary and here to catch a recursion problem
1436 1437           * early before we blow out the kernel stack.
1437 1438           */
1438 1439          ++curthread->t_hatdepth;
1439 1440          ASSERT(curthread->t_hatdepth < 16);
1440 1441  
1441 1442          ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1442 1443  
1443 1444          if (flags & HAT_LOAD_SHARE)
1444 1445                  hat->hat_flags |= HAT_SHARED;
1445 1446  
1446 1447          /*
1447 1448           * Find the page table that maps this page if it already exists.
1448 1449           */
1449 1450          ht = htable_lookup(hat, va, level);
1450 1451  
1451 1452          /*
1452 1453           * We must have HAT_LOAD_NOCONSIST if page_t is NULL.
1453 1454           */
1454 1455          if (pp == NULL)
1455 1456                  flags |= HAT_LOAD_NOCONSIST;
1456 1457  
1457 1458          if (ht == NULL) {
1458 1459                  ht = htable_create(hat, va, level, NULL);
1459 1460                  ASSERT(ht != NULL);
1460 1461          }
1461 1462          entry = htable_va2entry(va, ht);
1462 1463  
1463 1464          /*
1464 1465           * a bunch of paranoid error checking
1465 1466           */
1466 1467          ASSERT(ht->ht_busy > 0);
1467 1468          if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht))
1468 1469                  panic("hati_load_common: bad htable %p, va %p",
1469 1470                      (void *)ht, (void *)va);
1470 1471          ASSERT(ht->ht_level == level);
1471 1472  
1472 1473          /*
1473 1474           * construct the new PTE
1474 1475           */
1475 1476          if (hat == kas.a_hat)
1476 1477                  attr &= ~PROT_USER;
1477 1478          pte = hati_mkpte(pfn, attr, level, flags);
1478 1479          if (hat == kas.a_hat && va >= kernelbase)
1479 1480                  PTE_SET(pte, mmu.pt_global);
1480 1481  
1481 1482          /*
1482 1483           * establish the mapping
1483 1484           */
1484 1485          rv = hati_pte_map(ht, entry, pp, pte, flags, NULL);
1485 1486  
1486 1487          /*
1487 1488           * release the htable and any reserves
1488 1489           */
1489 1490          htable_release(ht);
1490 1491          --curthread->t_hatdepth;
1491 1492          return (rv);
1492 1493  }
1493 1494  
1494 1495  /*
1495 1496   * special case of hat_memload to deal with some kernel addrs for performance
1496 1497   */
1497 1498  static void
1498 1499  hat_kmap_load(
1499 1500          caddr_t         addr,
1500 1501          page_t          *pp,
1501 1502          uint_t          attr,
1502 1503          uint_t          flags)
1503 1504  {
1504 1505          uintptr_t       va = (uintptr_t)addr;
1505 1506          x86pte_t        pte;
1506 1507          pfn_t           pfn = page_pptonum(pp);
1507 1508          pgcnt_t         pg_off = mmu_btop(va - mmu.kmap_addr);
1508 1509          htable_t        *ht;
1509 1510          uint_t          entry;
1510 1511          void            *pte_ptr;
1511 1512  
1512 1513          /*
1513 1514           * construct the requested PTE
1514 1515           */
1515 1516          attr &= ~PROT_USER;
1516 1517          attr |= HAT_STORECACHING_OK;
1517 1518          pte = hati_mkpte(pfn, attr, 0, flags);
1518 1519          PTE_SET(pte, mmu.pt_global);
1519 1520  
1520 1521          /*
1521 1522           * Figure out the pte_ptr and htable and use common code to finish up
1522 1523           */
1523 1524          if (mmu.pae_hat)
1524 1525                  pte_ptr = mmu.kmap_ptes + pg_off;
1525 1526          else
1526 1527                  pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off;
1527 1528          ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >>
1528 1529              LEVEL_SHIFT(1)];
1529 1530          entry = htable_va2entry(va, ht);
1530 1531          ++curthread->t_hatdepth;
1531 1532          ASSERT(curthread->t_hatdepth < 16);
1532 1533          (void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr);
1533 1534          --curthread->t_hatdepth;
1534 1535  }
1535 1536  
1536 1537  /*
1537 1538   * hat_memload() - load a translation to the given page struct
1538 1539   *
1539 1540   * Flags for hat_memload/hat_devload/hat_*attr.
1540 1541   *
1541 1542   *      HAT_LOAD        Default flags to load a translation to the page.
1542 1543   *
1543 1544   *      HAT_LOAD_LOCK   Lock down mapping resources; hat_map(), hat_memload(),
1544 1545   *                      and hat_devload().
1545 1546   *
1546 1547   *      HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list.
1547 1548   *                      sets PT_NOCONSIST
1548 1549   *
1549 1550   *      HAT_LOAD_SHARE  A flag to hat_memload() to indicate h/w page tables
1550 1551   *                      that map some user pages (not kas) is shared by more
1551 1552   *                      than one process (eg. ISM).
1552 1553   *
1553 1554   *      HAT_LOAD_REMAP  Reload a valid pte with a different page frame.
1554 1555   *
1555 1556   *      HAT_NO_KALLOC   Do not kmem_alloc while creating the mapping; at this
1556 1557   *                      point, it's setting up mapping to allocate internal
1557 1558   *                      hat layer data structures.  This flag forces hat layer
1558 1559   *                      to tap its reserves in order to prevent infinite
1559 1560   *                      recursion.
1560 1561   *
1561 1562   * The following is a protection attribute (like PROT_READ, etc.)
1562 1563   *
1563 1564   *      HAT_NOSYNC      set PT_NOSYNC - this mapping's ref/mod bits
1564 1565   *                      are never cleared.
1565 1566   *
1566 1567   * Installing new valid PTE's and creation of the mapping list
1567 1568   * entry are controlled under the same lock. It's derived from the
1568 1569   * page_t being mapped.
1569 1570   */
1570 1571  static uint_t supported_memload_flags =
1571 1572          HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST |
1572 1573          HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT;
1573 1574  
1574 1575  void
1575 1576  hat_memload(
1576 1577          hat_t           *hat,
1577 1578          caddr_t         addr,
1578 1579          page_t          *pp,
1579 1580          uint_t          attr,
1580 1581          uint_t          flags)
1581 1582  {
1582 1583          uintptr_t       va = (uintptr_t)addr;
1583 1584          level_t         level = 0;
1584 1585          pfn_t           pfn = page_pptonum(pp);
1585 1586  
1586 1587          XPV_DISALLOW_MIGRATE();
1587 1588          ASSERT(IS_PAGEALIGNED(va));
1588 1589          ASSERT(hat == kas.a_hat || va < _userlimit);
1589 1590          ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1590 1591          ASSERT((flags & supported_memload_flags) == flags);
1591 1592  
1592 1593          ASSERT(!IN_VA_HOLE(va));
1593 1594          ASSERT(!PP_ISFREE(pp));
1594 1595  
1595 1596          /*
1596 1597           * kernel address special case for performance.
1597 1598           */
1598 1599          if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
1599 1600                  ASSERT(hat == kas.a_hat);
1600 1601                  hat_kmap_load(addr, pp, attr, flags);
1601 1602                  XPV_ALLOW_MIGRATE();
1602 1603                  return;
1603 1604          }
1604 1605  
1605 1606          /*
1606 1607           * This is used for memory with normal caching enabled, so
1607 1608           * always set HAT_STORECACHING_OK.
1608 1609           */
1609 1610          attr |= HAT_STORECACHING_OK;
1610 1611          if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0)
1611 1612                  panic("unexpected hati_load_common() failure");
1612 1613          XPV_ALLOW_MIGRATE();
1613 1614  }
1614 1615  
1615 1616  /* ARGSUSED */
1616 1617  void
1617 1618  hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
1618 1619      uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
1619 1620  {
1620 1621          hat_memload(hat, addr, pp, attr, flags);
1621 1622  }
1622 1623  
1623 1624  /*
1624 1625   * Load the given array of page structs using large pages when possible
1625 1626   */
1626 1627  void
1627 1628  hat_memload_array(
1628 1629          hat_t           *hat,
1629 1630          caddr_t         addr,
1630 1631          size_t          len,
1631 1632          page_t          **pages,
1632 1633          uint_t          attr,
1633 1634          uint_t          flags)
1634 1635  {
1635 1636          uintptr_t       va = (uintptr_t)addr;
1636 1637          uintptr_t       eaddr = va + len;
1637 1638          level_t         level;
1638 1639          size_t          pgsize;
1639 1640          pgcnt_t         pgindx = 0;
1640 1641          pfn_t           pfn;
1641 1642          pgcnt_t         i;
1642 1643  
1643 1644          XPV_DISALLOW_MIGRATE();
1644 1645          ASSERT(IS_PAGEALIGNED(va));
1645 1646          ASSERT(hat == kas.a_hat || va + len <= _userlimit);
1646 1647          ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1647 1648          ASSERT((flags & supported_memload_flags) == flags);
1648 1649  
1649 1650          /*
1650 1651           * memload is used for memory with full caching enabled, so
1651 1652           * set HAT_STORECACHING_OK.
1652 1653           */
1653 1654          attr |= HAT_STORECACHING_OK;
1654 1655  
1655 1656          /*
1656 1657           * handle all pages using largest possible pagesize
1657 1658           */
1658 1659          while (va < eaddr) {
1659 1660                  /*
1660 1661                   * decide what level mapping to use (ie. pagesize)
1661 1662                   */
1662 1663                  pfn = page_pptonum(pages[pgindx]);
1663 1664                  for (level = mmu.max_page_level; ; --level) {
1664 1665                          pgsize = LEVEL_SIZE(level);
1665 1666                          if (level == 0)
1666 1667                                  break;
1667 1668  
1668 1669                          if (!IS_P2ALIGNED(va, pgsize) ||
1669 1670                              (eaddr - va) < pgsize ||
1670 1671                              !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize))
1671 1672                                  continue;
1672 1673  
1673 1674                          /*
1674 1675                           * To use a large mapping of this size, all the
1675 1676                           * pages we are passed must be sequential subpages
1676 1677                           * of the large page.
1677 1678                           * hat_page_demote() can't change p_szc because
1678 1679                           * all pages are locked.
1679 1680                           */
1680 1681                          if (pages[pgindx]->p_szc >= level) {
1681 1682                                  for (i = 0; i < mmu_btop(pgsize); ++i) {
1682 1683                                          if (pfn + i !=
1683 1684                                              page_pptonum(pages[pgindx + i]))
1684 1685                                                  break;
1685 1686                                          ASSERT(pages[pgindx + i]->p_szc >=
1686 1687                                              level);
1687 1688                                          ASSERT(pages[pgindx] + i ==
1688 1689                                              pages[pgindx + i]);
1689 1690                                  }
1690 1691                                  if (i == mmu_btop(pgsize)) {
1691 1692  #ifdef DEBUG
1692 1693                                          if (level == 2)
1693 1694                                                  map1gcnt++;
1694 1695  #endif
1695 1696                                          break;
1696 1697                                  }
1697 1698                          }
1698 1699                  }
1699 1700  
1700 1701                  /*
1701 1702                   * Load this page mapping. If the load fails, try a smaller
1702 1703                   * pagesize.
1703 1704                   */
1704 1705                  ASSERT(!IN_VA_HOLE(va));
1705 1706                  while (hati_load_common(hat, va, pages[pgindx], attr,
1706 1707                      flags, level, pfn) != 0) {
1707 1708                          if (level == 0)
1708 1709                                  panic("unexpected hati_load_common() failure");
1709 1710                          --level;
1710 1711                          pgsize = LEVEL_SIZE(level);
1711 1712                  }
1712 1713  
1713 1714                  /*
1714 1715                   * move to next page
1715 1716                   */
1716 1717                  va += pgsize;
1717 1718                  pgindx += mmu_btop(pgsize);
1718 1719          }
1719 1720          XPV_ALLOW_MIGRATE();
1720 1721  }
1721 1722  
1722 1723  /* ARGSUSED */
1723 1724  void
1724 1725  hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
1725 1726      struct page **pps, uint_t attr, uint_t flags,
1726 1727      hat_region_cookie_t rcookie)
1727 1728  {
1728 1729          hat_memload_array(hat, addr, len, pps, attr, flags);
1729 1730  }
1730 1731  
1731 1732  /*
1732 1733   * void hat_devload(hat, addr, len, pf, attr, flags)
1733 1734   *      load/lock the given page frame number
1734 1735   *
1735 1736   * Advisory ordering attributes. Apply only to device mappings.
1736 1737   *
1737 1738   * HAT_STRICTORDER: the CPU must issue the references in order, as the
1738 1739   *      programmer specified.  This is the default.
1739 1740   * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds
1740 1741   *      of reordering; store or load with store or load).
1741 1742   * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores
1742 1743   *      to consecutive locations (for example, turn two consecutive byte
1743 1744   *      stores into one halfword store), and it may batch individual loads
1744 1745   *      (for example, turn two consecutive byte loads into one halfword load).
1745 1746   *      This also implies re-ordering.
1746 1747   * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it
1747 1748   *      until another store occurs.  The default is to fetch new data
1748 1749   *      on every load.  This also implies merging.
1749 1750   * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to
1750 1751   *      the device (perhaps with other data) at a later time.  The default is
1751 1752   *      to push the data right away.  This also implies load caching.
1752 1753   *
1753 1754   * Equivalent of hat_memload(), but can be used for device memory where
1754 1755   * there are no page_t's and we support additional flags (write merging, etc).
1755 1756   * Note that we can have large page mappings with this interface.
1756 1757   */
1757 1758  int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK |
1758 1759          HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK |
1759 1760          HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
1760 1761  
1761 1762  void
1762 1763  hat_devload(
1763 1764          hat_t           *hat,
1764 1765          caddr_t         addr,
1765 1766          size_t          len,
1766 1767          pfn_t           pfn,
1767 1768          uint_t          attr,
1768 1769          int             flags)
1769 1770  {
1770 1771          uintptr_t       va = ALIGN2PAGE(addr);
1771 1772          uintptr_t       eva = va + len;
1772 1773          level_t         level;
1773 1774          size_t          pgsize;
1774 1775          page_t          *pp;
1775 1776          int             f;      /* per PTE copy of flags  - maybe modified */
1776 1777          uint_t          a;      /* per PTE copy of attr */
1777 1778  
1778 1779          XPV_DISALLOW_MIGRATE();
1779 1780          ASSERT(IS_PAGEALIGNED(va));
1780 1781          ASSERT(hat == kas.a_hat || eva <= _userlimit);
1781 1782          ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1782 1783          ASSERT((flags & supported_devload_flags) == flags);
1783 1784  
1784 1785          /*
1785 1786           * handle all pages
1786 1787           */
1787 1788          while (va < eva) {
1788 1789  
1789 1790                  /*
1790 1791                   * decide what level mapping to use (ie. pagesize)
1791 1792                   */
1792 1793                  for (level = mmu.max_page_level; ; --level) {
1793 1794                          pgsize = LEVEL_SIZE(level);
1794 1795                          if (level == 0)
1795 1796                                  break;
1796 1797                          if (IS_P2ALIGNED(va, pgsize) &&
1797 1798                              (eva - va) >= pgsize &&
1798 1799                              IS_P2ALIGNED(pfn, mmu_btop(pgsize))) {
1799 1800  #ifdef DEBUG
1800 1801                                  if (level == 2)
1801 1802                                          map1gcnt++;
1802 1803  #endif
1803 1804                                  break;
1804 1805                          }
1805 1806                  }
1806 1807  
1807 1808                  /*
1808 1809                   * If this is just memory then allow caching (this happens
1809 1810                   * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used
1810 1811                   * to override that. If we don't have a page_t then make sure
1811 1812                   * NOCONSIST is set.
1812 1813                   */
1813 1814                  a = attr;
1814 1815                  f = flags;
1815 1816                  if (!pf_is_memory(pfn))
1816 1817                          f |= HAT_LOAD_NOCONSIST;
1817 1818                  else if (!(a & HAT_PLAT_NOCACHE))
1818 1819                          a |= HAT_STORECACHING_OK;
1819 1820  
1820 1821                  if (f & HAT_LOAD_NOCONSIST)
1821 1822                          pp = NULL;
1822 1823                  else
1823 1824                          pp = page_numtopp_nolock(pfn);
1824 1825  
1825 1826                  /*
1826 1827                   * Check to make sure we are really trying to map a valid
1827 1828                   * memory page. The caller wishing to intentionally map
1828 1829                   * free memory pages will have passed the HAT_LOAD_NOCONSIST
1829 1830                   * flag, then pp will be NULL.
1830 1831                   */
1831 1832                  if (pp != NULL) {
1832 1833                          if (PP_ISFREE(pp)) {
1833 1834                                  panic("hat_devload: loading "
1834 1835                                      "a mapping to free page %p", (void *)pp);
1835 1836                          }
1836 1837  
1837 1838                          if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
1838 1839                                  panic("hat_devload: loading a mapping "
1839 1840                                      "to an unlocked page %p",
1840 1841                                      (void *)pp);
1841 1842                          }
1842 1843                  }
1843 1844  
1844 1845                  /*
1845 1846                   * load this page mapping
1846 1847                   */
1847 1848                  ASSERT(!IN_VA_HOLE(va));
1848 1849                  while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) {
1849 1850                          if (level == 0)
1850 1851                                  panic("unexpected hati_load_common() failure");
1851 1852                          --level;
1852 1853                          pgsize = LEVEL_SIZE(level);
1853 1854                  }
1854 1855  
1855 1856                  /*
1856 1857                   * move to next page
1857 1858                   */
1858 1859                  va += pgsize;
1859 1860                  pfn += mmu_btop(pgsize);
1860 1861          }
1861 1862          XPV_ALLOW_MIGRATE();
1862 1863  }
1863 1864  
1864 1865  /*
1865 1866   * void hat_unlock(hat, addr, len)
1866 1867   *      unlock the mappings to a given range of addresses
1867 1868   *
1868 1869   * Locks are tracked by ht_lock_cnt in the htable.
1869 1870   */
1870 1871  void
1871 1872  hat_unlock(hat_t *hat, caddr_t addr, size_t len)
1872 1873  {
1873 1874          uintptr_t       vaddr = (uintptr_t)addr;
1874 1875          uintptr_t       eaddr = vaddr + len;
1875 1876          htable_t        *ht = NULL;
1876 1877  
1877 1878          /*
1878 1879           * kernel entries are always locked, we don't track lock counts
1879 1880           */
1880 1881          ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
1881 1882          ASSERT(IS_PAGEALIGNED(vaddr));
1882 1883          ASSERT(IS_PAGEALIGNED(eaddr));
1883 1884          if (hat == kas.a_hat)
1884 1885                  return;
1885 1886          if (eaddr > _userlimit)
1886 1887                  panic("hat_unlock() address out of range - above _userlimit");
1887 1888  
1888 1889          XPV_DISALLOW_MIGRATE();
1889 1890          ASSERT(AS_LOCK_HELD(hat->hat_as));
1890 1891          while (vaddr < eaddr) {
1891 1892                  (void) htable_walk(hat, &ht, &vaddr, eaddr);
1892 1893                  if (ht == NULL)
1893 1894                          break;
1894 1895  
1895 1896                  ASSERT(!IN_VA_HOLE(vaddr));
1896 1897  
1897 1898                  if (ht->ht_lock_cnt < 1)
1898 1899                          panic("hat_unlock(): lock_cnt < 1, "
1899 1900                              "htable=%p, vaddr=%p\n", (void *)ht, (void *)vaddr);
1900 1901                  HTABLE_LOCK_DEC(ht);
1901 1902  
1902 1903                  vaddr += LEVEL_SIZE(ht->ht_level);
1903 1904          }
1904 1905          if (ht)
1905 1906                  htable_release(ht);
1906 1907          XPV_ALLOW_MIGRATE();
1907 1908  }
1908 1909  
1909 1910  /* ARGSUSED */
1910 1911  void
1911 1912  hat_unlock_region(struct hat *hat, caddr_t addr, size_t len,
1912 1913      hat_region_cookie_t rcookie)
1913 1914  {
1914 1915          panic("No shared region support on x86");
1915 1916  }
1916 1917  
1917 1918  #if !defined(__xpv)
1918 1919  /*
1919 1920   * Cross call service routine to demap a virtual page on
1920 1921   * the current CPU or flush all mappings in TLB.
1921 1922   */
1922 1923  /*ARGSUSED*/
1923 1924  static int
1924 1925  hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
1925 1926  {
1926 1927          hat_t   *hat = (hat_t *)a1;
1927 1928          caddr_t addr = (caddr_t)a2;
1928 1929          size_t len = (size_t)a3;
1929 1930  
1930 1931          /*
1931 1932           * If the target hat isn't the kernel and this CPU isn't operating
1932 1933           * in the target hat, we can ignore the cross call.
1933 1934           */
1934 1935          if (hat != kas.a_hat && hat != CPU->cpu_current_hat)
1935 1936                  return (0);
1936 1937  
1937 1938          /*
1938 1939           * For a normal address, we flush a range of contiguous mappings
1939 1940           */
1940 1941          if ((uintptr_t)addr != DEMAP_ALL_ADDR) {
1941 1942                  for (size_t i = 0; i < len; i += MMU_PAGESIZE)
1942 1943                          mmu_tlbflush_entry(addr + i);
1943 1944                  return (0);
1944 1945          }
1945 1946  
1946 1947          /*
1947 1948           * Otherwise we reload cr3 to effect a complete TLB flush.
1948 1949           *
1949 1950           * A reload of cr3 on a VLP process also means we must also recopy in
1950 1951           * the pte values from the struct hat
1951 1952           */
1952 1953          if (hat->hat_flags & HAT_VLP) {
1953 1954  #if defined(__amd64)
1954 1955                  x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes;
1955 1956  
1956 1957                  VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1957 1958  #elif defined(__i386)
1958 1959                  reload_pae32(hat, CPU);
1959 1960  #endif
1960 1961          }
1961 1962          reload_cr3();
1962 1963          return (0);
1963 1964  }
1964 1965  
1965 1966  /*
1966 1967   * Flush all TLB entries, including global (ie. kernel) ones.
1967 1968   */
1968 1969  static void
1969 1970  flush_all_tlb_entries(void)
1970 1971  {
1971 1972          ulong_t cr4 = getcr4();
1972 1973  
1973 1974          if (cr4 & CR4_PGE) {
1974 1975                  setcr4(cr4 & ~(ulong_t)CR4_PGE);
1975 1976                  setcr4(cr4);
1976 1977  
1977 1978                  /*
1978 1979                   * 32 bit PAE also needs to always reload_cr3()
1979 1980                   */
1980 1981                  if (mmu.max_level == 2)
1981 1982                          reload_cr3();
1982 1983          } else {
1983 1984                  reload_cr3();
1984 1985          }
1985 1986  }
1986 1987  
1987 1988  #define TLB_CPU_HALTED  (01ul)
1988 1989  #define TLB_INVAL_ALL   (02ul)
1989 1990  #define CAS_TLB_INFO(cpu, old, new)     \
1990 1991          atomic_cas_ulong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
1991 1992  
1992 1993  /*
1993 1994   * Record that a CPU is going idle
1994 1995   */
1995 1996  void
1996 1997  tlb_going_idle(void)
1997 1998  {
1998 1999          atomic_or_ulong((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
1999 2000  }
2000 2001  
2001 2002  /*
2002 2003   * Service a delayed TLB flush if coming out of being idle.
2003 2004   * It will be called from cpu idle notification with interrupt disabled.
2004 2005   */
2005 2006  void
2006 2007  tlb_service(void)
2007 2008  {
2008 2009          ulong_t tlb_info;
2009 2010          ulong_t found;
2010 2011  
2011 2012          /*
2012 2013           * We only have to do something if coming out of being idle.
2013 2014           */
2014 2015          tlb_info = CPU->cpu_m.mcpu_tlb_info;
2015 2016          if (tlb_info & TLB_CPU_HALTED) {
2016 2017                  ASSERT(CPU->cpu_current_hat == kas.a_hat);
2017 2018  
2018 2019                  /*
2019 2020                   * Atomic clear and fetch of old state.
2020 2021                   */
2021 2022                  while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) {
2022 2023                          ASSERT(found & TLB_CPU_HALTED);
2023 2024                          tlb_info = found;
2024 2025                          SMT_PAUSE();
2025 2026                  }
2026 2027                  if (tlb_info & TLB_INVAL_ALL)
2027 2028                          flush_all_tlb_entries();
2028 2029          }
2029 2030  }
2030 2031  #endif /* !__xpv */
2031 2032  
2032 2033  /*
2033 2034   * Internal routine to do cross calls to invalidate a range of pages on
2034 2035   * all CPUs using a given hat.
2035 2036   */
2036 2037  void
2037 2038  hat_tlb_inval_range(hat_t *hat, uintptr_t va, size_t len)
2038 2039  {
2039 2040          extern int      flushes_require_xcalls; /* from mp_startup.c */
2040 2041          cpuset_t        justme;
2041 2042          cpuset_t        cpus_to_shootdown;
2042 2043  #ifndef __xpv
2043 2044          cpuset_t        check_cpus;
2044 2045          cpu_t           *cpup;
2045 2046          int             c;
2046 2047  #endif
2047 2048  
2048 2049          /*
2049 2050           * If the hat is being destroyed, there are no more users, so
2050 2051           * demap need not do anything.
2051 2052           */
2052 2053          if (hat->hat_flags & HAT_FREEING)
2053 2054                  return;
2054 2055  
2055 2056          /*
2056 2057           * If demapping from a shared pagetable, we best demap the
2057 2058           * entire set of user TLBs, since we don't know what addresses
2058 2059           * these were shared at.
2059 2060           */
2060 2061          if (hat->hat_flags & HAT_SHARED) {
2061 2062                  hat = kas.a_hat;
2062 2063                  va = DEMAP_ALL_ADDR;
2063 2064          }
2064 2065  
2065 2066          /*
2066 2067           * if not running with multiple CPUs, don't use cross calls
2067 2068           */
2068 2069          if (panicstr || !flushes_require_xcalls) {
2069 2070  #ifdef __xpv
2070 2071                  if (va == DEMAP_ALL_ADDR) {
2071 2072                          xen_flush_tlb();
2072 2073                  } else {
2073 2074                          for (size_t i = 0; i < len; i += MMU_PAGESIZE)
2074 2075                                  xen_flush_va((caddr_t)(va + i));
2075 2076                  }
2076 2077  #else
2077 2078                  (void) hati_demap_func((xc_arg_t)hat,
2078 2079                      (xc_arg_t)va, (xc_arg_t)len);
2079 2080  #endif
2080 2081                  return;
2081 2082          }
2082 2083  
2083 2084  
2084 2085          /*
2085 2086           * Determine CPUs to shootdown. Kernel changes always do all CPUs.
2086 2087           * Otherwise it's just CPUs currently executing in this hat.
2087 2088           */
2088 2089          kpreempt_disable();
2089 2090          CPUSET_ONLY(justme, CPU->cpu_id);
2090 2091          if (hat == kas.a_hat)
2091 2092                  cpus_to_shootdown = khat_cpuset;
2092 2093          else
2093 2094                  cpus_to_shootdown = hat->hat_cpus;
2094 2095  
2095 2096  #ifndef __xpv
2096 2097          /*
2097 2098           * If any CPUs in the set are idle, just request a delayed flush
2098 2099           * and avoid waking them up.
2099 2100           */
2100 2101          check_cpus = cpus_to_shootdown;
2101 2102          for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) {
2102 2103                  ulong_t tlb_info;
2103 2104  
2104 2105                  if (!CPU_IN_SET(check_cpus, c))
2105 2106                          continue;
2106 2107                  CPUSET_DEL(check_cpus, c);
2107 2108                  cpup = cpu[c];
2108 2109                  if (cpup == NULL)
2109 2110                          continue;
2110 2111  
2111 2112                  tlb_info = cpup->cpu_m.mcpu_tlb_info;
2112 2113                  while (tlb_info == TLB_CPU_HALTED) {
2113 2114                          (void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED,
2114 2115                              TLB_CPU_HALTED | TLB_INVAL_ALL);
2115 2116                          SMT_PAUSE();
2116 2117                          tlb_info = cpup->cpu_m.mcpu_tlb_info;
2117 2118                  }
2118 2119                  if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) {
2119 2120                          HATSTAT_INC(hs_tlb_inval_delayed);
2120 2121                          CPUSET_DEL(cpus_to_shootdown, c);
2121 2122                  }
2122 2123          }
2123 2124  #endif
2124 2125  
2125 2126          if (CPUSET_ISNULL(cpus_to_shootdown) ||
2126 2127              CPUSET_ISEQUAL(cpus_to_shootdown, justme)) {
2127 2128  
2128 2129  #ifdef __xpv
2129 2130                  if (va == DEMAP_ALL_ADDR) {
2130 2131                          xen_flush_tlb();
2131 2132                  } else {
2132 2133                          for (size_t i = 0; i < len; i += MMU_PAGESIZE)
2133 2134                                  xen_flush_va((caddr_t)(va + i));
2134 2135                  }
2135 2136  #else
2136 2137                  (void) hati_demap_func((xc_arg_t)hat,
2137 2138                      (xc_arg_t)va, (xc_arg_t)len);
2138 2139  #endif
2139 2140  
2140 2141          } else {
2141 2142  
2142 2143                  CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id);
2143 2144  #ifdef __xpv
2144 2145                  if (va == DEMAP_ALL_ADDR) {
2145 2146                          xen_gflush_tlb(cpus_to_shootdown);
2146 2147                  } else {
2147 2148                          for (size_t i = 0; i < len; i += MMU_PAGESIZE) {
2148 2149                                  xen_gflush_va((caddr_t)(va + i),
2149 2150                                      cpus_to_shootdown);
2150 2151                          }
2151 2152                  }
2152 2153  #else
2153 2154                  xc_call((xc_arg_t)hat, (xc_arg_t)va, (xc_arg_t)len,
2154 2155                      CPUSET2BV(cpus_to_shootdown), hati_demap_func);
2155 2156  #endif
2156 2157  
2157 2158          }
2158 2159          kpreempt_enable();
2159 2160  }
2160 2161  
2161 2162  void
2162 2163  hat_tlb_inval(hat_t *hat, uintptr_t va)
2163 2164  {
2164 2165          hat_tlb_inval_range(hat, va, MMU_PAGESIZE);
2165 2166  }
2166 2167  
2167 2168  /*
2168 2169   * Interior routine for HAT_UNLOADs from hat_unload_callback(),
2169 2170   * hat_kmap_unload() OR from hat_steal() code.  This routine doesn't
2170 2171   * handle releasing of the htables.
2171 2172   */
2172 2173  void
2173 2174  hat_pte_unmap(
2174 2175          htable_t        *ht,
2175 2176          uint_t          entry,
2176 2177          uint_t          flags,
2177 2178          x86pte_t        old_pte,
2178 2179          void            *pte_ptr,
2179 2180          boolean_t       tlb)
2180 2181  {
2181 2182          hat_t           *hat = ht->ht_hat;
2182 2183          hment_t         *hm = NULL;
2183 2184          page_t          *pp = NULL;
2184 2185          level_t         l = ht->ht_level;
2185 2186          pfn_t           pfn;
2186 2187  
2187 2188          /*
2188 2189           * We always track the locking counts, even if nothing is unmapped
2189 2190           */
2190 2191          if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) {
2191 2192                  ASSERT(ht->ht_lock_cnt > 0);
2192 2193                  HTABLE_LOCK_DEC(ht);
2193 2194          }
2194 2195  
2195 2196          /*
2196 2197           * Figure out which page's mapping list lock to acquire using the PFN
2197 2198           * passed in "old" PTE. We then attempt to invalidate the PTE.
2198 2199           * If another thread, probably a hat_pageunload, has asynchronously
2199 2200           * unmapped/remapped this address we'll loop here.
2200 2201           */
2201 2202          ASSERT(ht->ht_busy > 0);
2202 2203          while (PTE_ISVALID(old_pte)) {
2203 2204                  pfn = PTE2PFN(old_pte, l);
2204 2205                  if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) {
2205 2206                          pp = NULL;
2206 2207                  } else {
2207 2208  #ifdef __xpv
2208 2209                          if (pfn == PFN_INVALID)
2209 2210                                  panic("Invalid PFN, but not PT_NOCONSIST");
2210 2211  #endif
2211 2212                          pp = page_numtopp_nolock(pfn);
2212 2213                          if (pp == NULL) {
2213 2214                                  panic("no page_t, not NOCONSIST: old_pte="
2214 2215                                      FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx",
2215 2216                                      old_pte, (uintptr_t)ht, entry,
2216 2217                                      (uintptr_t)pte_ptr);
2217 2218                          }
2218 2219                          x86_hm_enter(pp);
2219 2220                  }
2220 2221  
2221 2222                  old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr, tlb);
2222 2223  
2223 2224                  /*
2224 2225                   * If the page hadn't changed we've unmapped it and can proceed
2225 2226                   */
2226 2227                  if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn)
2227 2228                          break;
2228 2229  
2229 2230                  /*
2230 2231                   * Otherwise, we'll have to retry with the current old_pte.
2231 2232                   * Drop the hment lock, since the pfn may have changed.
2232 2233                   */
2233 2234                  if (pp != NULL) {
2234 2235                          x86_hm_exit(pp);
2235 2236                          pp = NULL;
2236 2237                  } else {
2237 2238                          ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
2238 2239                  }
2239 2240          }
2240 2241  
2241 2242          /*
2242 2243           * If the old mapping wasn't valid, there's nothing more to do
2243 2244           */
2244 2245          if (!PTE_ISVALID(old_pte)) {
2245 2246                  if (pp != NULL)
2246 2247                          x86_hm_exit(pp);
2247 2248                  return;
2248 2249          }
2249 2250  
2250 2251          /*
2251 2252           * Take care of syncing any MOD/REF bits and removing the hment.
2252 2253           */
2253 2254          if (pp != NULL) {
2254 2255                  if (!(flags & HAT_UNLOAD_NOSYNC))
2255 2256                          hati_sync_pte_to_page(pp, old_pte, l);
2256 2257                  hm = hment_remove(pp, ht, entry);
2257 2258                  x86_hm_exit(pp);
2258 2259                  if (hm != NULL)
2259 2260                          hment_free(hm);
2260 2261          }
2261 2262  
2262 2263          /*
2263 2264           * Handle book keeping in the htable and hat
2264 2265           */
2265 2266          ASSERT(ht->ht_valid_cnt > 0);
2266 2267          HTABLE_DEC(ht->ht_valid_cnt);
2267 2268          PGCNT_DEC(hat, l);
2268 2269  }
2269 2270  
2270 2271  /*
2271 2272   * very cheap unload implementation to special case some kernel addresses
2272 2273   */
2273 2274  static void
2274 2275  hat_kmap_unload(caddr_t addr, size_t len, uint_t flags)
2275 2276  {
2276 2277          uintptr_t       va = (uintptr_t)addr;
2277 2278          uintptr_t       eva = va + len;
2278 2279          pgcnt_t         pg_index;
2279 2280          htable_t        *ht;
2280 2281          uint_t          entry;
2281 2282          x86pte_t        *pte_ptr;
2282 2283          x86pte_t        old_pte;
2283 2284  
2284 2285          for (; va < eva; va += MMU_PAGESIZE) {
2285 2286                  /*
2286 2287                   * Get the PTE
2287 2288                   */
2288 2289                  pg_index = mmu_btop(va - mmu.kmap_addr);
2289 2290                  pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index);
2290 2291                  old_pte = GET_PTE(pte_ptr);
2291 2292  
2292 2293                  /*
2293 2294                   * get the htable / entry
2294 2295                   */
2295 2296                  ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr)
2296 2297                      >> LEVEL_SHIFT(1)];
2297 2298                  entry = htable_va2entry(va, ht);
2298 2299  
2299 2300                  /*
2300 2301                   * use mostly common code to unmap it.
2301 2302                   */
2302 2303                  hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr, B_TRUE);
2303 2304          }
2304 2305  }
2305 2306  
2306 2307  
2307 2308  /*
2308 2309   * unload a range of virtual address space (no callback)
2309 2310   */
2310 2311  void
2311 2312  hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2312 2313  {
2313 2314          uintptr_t va = (uintptr_t)addr;
2314 2315  
2315 2316          XPV_DISALLOW_MIGRATE();
2316 2317          ASSERT(hat == kas.a_hat || va + len <= _userlimit);
2317 2318  
2318 2319          /*
2319 2320           * special case for performance.
2320 2321           */
2321 2322          if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
2322 2323                  ASSERT(hat == kas.a_hat);
2323 2324                  hat_kmap_unload(addr, len, flags);
2324 2325          } else {
2325 2326                  hat_unload_callback(hat, addr, len, flags, NULL);
2326 2327          }
2327 2328          XPV_ALLOW_MIGRATE();
2328 2329  }
2329 2330  
2330 2331  /*
2331 2332   * Do the callbacks for ranges being unloaded.
2332 2333   */
2333 2334  typedef struct range_info {
2334 2335          uintptr_t       rng_va;
2335 2336          ulong_t         rng_cnt;
2336 2337          level_t         rng_level;
2337 2338  } range_info_t;
2338 2339  
2339 2340  /*
2340 2341   * Invalidate the TLB, and perform the callback to the upper level VM system,
2341 2342   * for the specified ranges of contiguous pages.
2342 2343   */
2343 2344  static void
2344 2345  handle_ranges(hat_t *hat, hat_callback_t *cb, uint_t cnt, range_info_t *range)
2345 2346  {
2346 2347          while (cnt > 0) {
2347 2348                  size_t len;
2348 2349  
2349 2350                  --cnt;
2350 2351                  len = range[cnt].rng_cnt << LEVEL_SHIFT(range[cnt].rng_level);
2351 2352                  hat_tlb_inval_range(hat, (uintptr_t)range[cnt].rng_va, len);
2352 2353  
2353 2354                  if (cb != NULL) {
2354 2355                          cb->hcb_start_addr = (caddr_t)range[cnt].rng_va;
2355 2356                          cb->hcb_end_addr = cb->hcb_start_addr;
2356 2357                          cb->hcb_end_addr += len;
2357 2358                          cb->hcb_function(cb);
2358 2359                  }
2359 2360          }
2360 2361  }
2361 2362  
2362 2363  /*
2363 2364   * Unload a given range of addresses (has optional callback)
2364 2365   *
2365 2366   * Flags:
2366 2367   * define       HAT_UNLOAD              0x00
2367 2368   * define       HAT_UNLOAD_NOSYNC       0x02
2368 2369   * define       HAT_UNLOAD_UNLOCK       0x04
2369 2370   * define       HAT_UNLOAD_OTHER        0x08 - not used
2370 2371   * define       HAT_UNLOAD_UNMAP        0x10 - same as HAT_UNLOAD
2371 2372   */
2372 2373  #define MAX_UNLOAD_CNT (8)
2373 2374  void
2374 2375  hat_unload_callback(
2375 2376          hat_t           *hat,
2376 2377          caddr_t         addr,
2377 2378          size_t          len,
2378 2379          uint_t          flags,
2379 2380          hat_callback_t  *cb)
2380 2381  {
2381 2382          uintptr_t       vaddr = (uintptr_t)addr;
2382 2383          uintptr_t       eaddr = vaddr + len;
2383 2384          htable_t        *ht = NULL;
2384 2385          uint_t          entry;
2385 2386          uintptr_t       contig_va = (uintptr_t)-1L;
2386 2387          range_info_t    r[MAX_UNLOAD_CNT];
2387 2388          uint_t          r_cnt = 0;
2388 2389          x86pte_t        old_pte;
2389 2390  
2390 2391          XPV_DISALLOW_MIGRATE();
2391 2392          ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2392 2393          ASSERT(IS_PAGEALIGNED(vaddr));
2393 2394          ASSERT(IS_PAGEALIGNED(eaddr));
2394 2395  
2395 2396          /*
2396 2397           * Special case a single page being unloaded for speed. This happens
2397 2398           * quite frequently, COW faults after a fork() for example.
2398 2399           */
2399 2400          if (cb == NULL && len == MMU_PAGESIZE) {
2400 2401                  ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0);
2401 2402                  if (ht != NULL) {
2402 2403                          if (PTE_ISVALID(old_pte)) {
2403 2404                                  hat_pte_unmap(ht, entry, flags, old_pte,
2404 2405                                      NULL, B_TRUE);
2405 2406                          }
2406 2407                          htable_release(ht);
2407 2408                  }
2408 2409                  XPV_ALLOW_MIGRATE();
2409 2410                  return;
2410 2411          }
2411 2412  
2412 2413          while (vaddr < eaddr) {
2413 2414                  old_pte = htable_walk(hat, &ht, &vaddr, eaddr);
2414 2415                  if (ht == NULL)
2415 2416                          break;
2416 2417  
2417 2418                  ASSERT(!IN_VA_HOLE(vaddr));
2418 2419  
2419 2420                  if (vaddr < (uintptr_t)addr)
2420 2421                          panic("hat_unload_callback(): unmap inside large page");
2421 2422  
2422 2423                  /*
2423 2424                   * We'll do the call backs for contiguous ranges
2424 2425                   */
2425 2426                  if (vaddr != contig_va ||
2426 2427                      (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) {
2427 2428                          if (r_cnt == MAX_UNLOAD_CNT) {
2428 2429                                  handle_ranges(hat, cb, r_cnt, r);
2429 2430                                  r_cnt = 0;
2430 2431                          }
2431 2432                          r[r_cnt].rng_va = vaddr;
2432 2433                          r[r_cnt].rng_cnt = 0;
2433 2434                          r[r_cnt].rng_level = ht->ht_level;
2434 2435                          ++r_cnt;
2435 2436                  }
2436 2437  
2437 2438                  /*
2438 2439                   * Unload one mapping (for a single page) from the page tables.
2439 2440                   * Note that we do not remove the mapping from the TLB yet,
2440 2441                   * as indicated by the tlb=FALSE argument to hat_pte_unmap().
2441 2442                   * handle_ranges() will clear the TLB entries with one call to
2442 2443                   * hat_tlb_inval_range() per contiguous range.  This is
2443 2444                   * safe because the page can not be reused until the
2444 2445                   * callback is made (or we return).
2445 2446                   */
2446 2447                  entry = htable_va2entry(vaddr, ht);
2447 2448                  hat_pte_unmap(ht, entry, flags, old_pte, NULL, B_FALSE);
2448 2449                  ASSERT(ht->ht_level <= mmu.max_page_level);
2449 2450                  vaddr += LEVEL_SIZE(ht->ht_level);
2450 2451                  contig_va = vaddr;
2451 2452                  ++r[r_cnt - 1].rng_cnt;
2452 2453          }
2453 2454          if (ht)
2454 2455                  htable_release(ht);
2455 2456  
2456 2457          /*
2457 2458           * handle last range for callbacks
2458 2459           */
2459 2460          if (r_cnt > 0)
2460 2461                  handle_ranges(hat, cb, r_cnt, r);
2461 2462          XPV_ALLOW_MIGRATE();
2462 2463  }
2463 2464  
2464 2465  /*
2465 2466   * Invalidate a virtual address translation on a slave CPU during
2466 2467   * panic() dumps.
2467 2468   */
2468 2469  void
2469 2470  hat_flush_range(hat_t *hat, caddr_t va, size_t size)
2470 2471  {
2471 2472          ssize_t sz;
2472 2473          caddr_t endva = va + size;
2473 2474  
2474 2475          while (va < endva) {
2475 2476                  sz = hat_getpagesize(hat, va);
2476 2477                  if (sz < 0) {
2477 2478  #ifdef __xpv
2478 2479                          xen_flush_tlb();
2479 2480  #else
2480 2481                          flush_all_tlb_entries();
2481 2482  #endif
2482 2483                          break;
2483 2484                  }
2484 2485  #ifdef __xpv
2485 2486                  xen_flush_va(va);
2486 2487  #else
2487 2488                  mmu_tlbflush_entry(va);
2488 2489  #endif
2489 2490                  va += sz;
2490 2491          }
2491 2492  }
2492 2493  
2493 2494  /*
2494 2495   * synchronize mapping with software data structures
2495 2496   *
2496 2497   * This interface is currently only used by the working set monitor
2497 2498   * driver.
2498 2499   */
2499 2500  /*ARGSUSED*/
2500 2501  void
2501 2502  hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2502 2503  {
2503 2504          uintptr_t       vaddr = (uintptr_t)addr;
2504 2505          uintptr_t       eaddr = vaddr + len;
2505 2506          htable_t        *ht = NULL;
2506 2507          uint_t          entry;
2507 2508          x86pte_t        pte;
2508 2509          x86pte_t        save_pte;
2509 2510          x86pte_t        new;
2510 2511          page_t          *pp;
2511 2512  
2512 2513          ASSERT(!IN_VA_HOLE(vaddr));
2513 2514          ASSERT(IS_PAGEALIGNED(vaddr));
2514 2515          ASSERT(IS_PAGEALIGNED(eaddr));
2515 2516          ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2516 2517  
2517 2518          XPV_DISALLOW_MIGRATE();
2518 2519          for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2519 2520  try_again:
2520 2521                  pte = htable_walk(hat, &ht, &vaddr, eaddr);
2521 2522                  if (ht == NULL)
2522 2523                          break;
2523 2524                  entry = htable_va2entry(vaddr, ht);
2524 2525  
2525 2526                  if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
2526 2527                      PTE_GET(pte, PT_REF | PT_MOD) == 0)
2527 2528                          continue;
2528 2529  
2529 2530                  /*
2530 2531                   * We need to acquire the mapping list lock to protect
2531 2532                   * against hat_pageunload(), hat_unload(), etc.
2532 2533                   */
2533 2534                  pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level));
2534 2535                  if (pp == NULL)
2535 2536                          break;
2536 2537                  x86_hm_enter(pp);
2537 2538                  save_pte = pte;
2538 2539                  pte = x86pte_get(ht, entry);
2539 2540                  if (pte != save_pte) {
2540 2541                          x86_hm_exit(pp);
2541 2542                          goto try_again;
2542 2543                  }
2543 2544                  if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
2544 2545                      PTE_GET(pte, PT_REF | PT_MOD) == 0) {
2545 2546                          x86_hm_exit(pp);
2546 2547                          continue;
2547 2548                  }
2548 2549  
2549 2550                  /*
2550 2551                   * Need to clear ref or mod bits. We may compete with
2551 2552                   * hardware updating the R/M bits and have to try again.
2552 2553                   */
2553 2554                  if (flags == HAT_SYNC_ZERORM) {
2554 2555                          new = pte;
2555 2556                          PTE_CLR(new, PT_REF | PT_MOD);
2556 2557                          pte = hati_update_pte(ht, entry, pte, new);
2557 2558                          if (pte != 0) {
2558 2559                                  x86_hm_exit(pp);
2559 2560                                  goto try_again;
2560 2561                          }
2561 2562                  } else {
2562 2563                          /*
2563 2564                           * sync the PTE to the page_t
2564 2565                           */
2565 2566                          hati_sync_pte_to_page(pp, save_pte, ht->ht_level);
2566 2567                  }
2567 2568                  x86_hm_exit(pp);
2568 2569          }
2569 2570          if (ht)
2570 2571                  htable_release(ht);
2571 2572          XPV_ALLOW_MIGRATE();
2572 2573  }
2573 2574  
2574 2575  /*
2575 2576   * void hat_map(hat, addr, len, flags)
2576 2577   */
2577 2578  /*ARGSUSED*/
2578 2579  void
2579 2580  hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2580 2581  {
2581 2582          /* does nothing */
2582 2583  }
2583 2584  
2584 2585  /*
2585 2586   * uint_t hat_getattr(hat, addr, *attr)
2586 2587   *      returns attr for <hat,addr> in *attr.  returns 0 if there was a
2587 2588   *      mapping and *attr is valid, nonzero if there was no mapping and
2588 2589   *      *attr is not valid.
2589 2590   */
2590 2591  uint_t
2591 2592  hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr)
2592 2593  {
2593 2594          uintptr_t       vaddr = ALIGN2PAGE(addr);
2594 2595          htable_t        *ht = NULL;
2595 2596          x86pte_t        pte;
2596 2597  
2597 2598          ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2598 2599  
2599 2600          if (IN_VA_HOLE(vaddr))
2600 2601                  return ((uint_t)-1);
2601 2602  
2602 2603          ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level);
2603 2604          if (ht == NULL)
2604 2605                  return ((uint_t)-1);
2605 2606  
2606 2607          if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) {
2607 2608                  htable_release(ht);
2608 2609                  return ((uint_t)-1);
2609 2610          }
2610 2611  
2611 2612          *attr = PROT_READ;
2612 2613          if (PTE_GET(pte, PT_WRITABLE))
2613 2614                  *attr |= PROT_WRITE;
2614 2615          if (PTE_GET(pte, PT_USER))
2615 2616                  *attr |= PROT_USER;
2616 2617          if (!PTE_GET(pte, mmu.pt_nx))
2617 2618                  *attr |= PROT_EXEC;
2618 2619          if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
2619 2620                  *attr |= HAT_NOSYNC;
2620 2621          htable_release(ht);
2621 2622          return (0);
2622 2623  }
2623 2624  
2624 2625  /*
2625 2626   * hat_updateattr() applies the given attribute change to an existing mapping
2626 2627   */
2627 2628  #define HAT_LOAD_ATTR           1
2628 2629  #define HAT_SET_ATTR            2
2629 2630  #define HAT_CLR_ATTR            3
2630 2631  
2631 2632  static void
2632 2633  hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what)
2633 2634  {
2634 2635          uintptr_t       vaddr = (uintptr_t)addr;
2635 2636          uintptr_t       eaddr = (uintptr_t)addr + len;
2636 2637          htable_t        *ht = NULL;
2637 2638          uint_t          entry;
2638 2639          x86pte_t        oldpte, newpte;
2639 2640          page_t          *pp;
2640 2641  
2641 2642          XPV_DISALLOW_MIGRATE();
2642 2643          ASSERT(IS_PAGEALIGNED(vaddr));
2643 2644          ASSERT(IS_PAGEALIGNED(eaddr));
2644 2645          ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
2645 2646          for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2646 2647  try_again:
2647 2648                  oldpte = htable_walk(hat, &ht, &vaddr, eaddr);
2648 2649                  if (ht == NULL)
2649 2650                          break;
2650 2651                  if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST)
2651 2652                          continue;
2652 2653  
2653 2654                  pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level));
2654 2655                  if (pp == NULL)
2655 2656                          continue;
2656 2657                  x86_hm_enter(pp);
2657 2658  
2658 2659                  newpte = oldpte;
2659 2660                  /*
2660 2661                   * We found a page table entry in the desired range,
2661 2662                   * figure out the new attributes.
2662 2663                   */
2663 2664                  if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) {
2664 2665                          if ((attr & PROT_WRITE) &&
2665 2666                              !PTE_GET(oldpte, PT_WRITABLE))
2666 2667                                  newpte |= PT_WRITABLE;
2667 2668  
2668 2669                          if ((attr & HAT_NOSYNC) &&
2669 2670                              PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC)
2670 2671                                  newpte |= PT_NOSYNC;
2671 2672  
2672 2673                          if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx))
2673 2674                                  newpte &= ~mmu.pt_nx;
2674 2675                  }
2675 2676  
2676 2677                  if (what == HAT_LOAD_ATTR) {
2677 2678                          if (!(attr & PROT_WRITE) &&
2678 2679                              PTE_GET(oldpte, PT_WRITABLE))
2679 2680                                  newpte &= ~PT_WRITABLE;
2680 2681  
2681 2682                          if (!(attr & HAT_NOSYNC) &&
2682 2683                              PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
2683 2684                                  newpte &= ~PT_SOFTWARE;
2684 2685  
2685 2686                          if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
2686 2687                                  newpte |= mmu.pt_nx;
2687 2688                  }
2688 2689  
2689 2690                  if (what == HAT_CLR_ATTR) {
2690 2691                          if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE))
2691 2692                                  newpte &= ~PT_WRITABLE;
2692 2693  
2693 2694                          if ((attr & HAT_NOSYNC) &&
2694 2695                              PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
2695 2696                                  newpte &= ~PT_SOFTWARE;
2696 2697  
2697 2698                          if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
2698 2699                                  newpte |= mmu.pt_nx;
2699 2700                  }
2700 2701  
2701 2702                  /*
2702 2703                   * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set.
2703 2704                   * x86pte_set() depends on this.
2704 2705                   */
2705 2706                  if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC)
2706 2707                          newpte |= PT_REF | PT_MOD;
2707 2708  
2708 2709                  /*
2709 2710                   * what about PROT_READ or others? this code only handles:
2710 2711                   * EXEC, WRITE, NOSYNC
2711 2712                   */
2712 2713  
2713 2714                  /*
2714 2715                   * If new PTE really changed, update the table.
2715 2716                   */
2716 2717                  if (newpte != oldpte) {
2717 2718                          entry = htable_va2entry(vaddr, ht);
2718 2719                          oldpte = hati_update_pte(ht, entry, oldpte, newpte);
2719 2720                          if (oldpte != 0) {
2720 2721                                  x86_hm_exit(pp);
2721 2722                                  goto try_again;
2722 2723                          }
2723 2724                  }
2724 2725                  x86_hm_exit(pp);
2725 2726          }
2726 2727          if (ht)
2727 2728                  htable_release(ht);
2728 2729          XPV_ALLOW_MIGRATE();
2729 2730  }
2730 2731  
2731 2732  /*
2732 2733   * Various wrappers for hat_updateattr()
2733 2734   */
2734 2735  void
2735 2736  hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2736 2737  {
2737 2738          ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2738 2739          hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR);
2739 2740  }
2740 2741  
2741 2742  void
2742 2743  hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2743 2744  {
2744 2745          ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2745 2746          hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR);
2746 2747  }
2747 2748  
2748 2749  void
2749 2750  hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2750 2751  {
2751 2752          ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2752 2753          hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR);
2753 2754  }
2754 2755  
2755 2756  void
2756 2757  hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot)
2757 2758  {
2758 2759          ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2759 2760          hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR);
2760 2761  }
2761 2762  
2762 2763  /*
2763 2764   * size_t hat_getpagesize(hat, addr)
2764 2765   *      returns pagesize in bytes for <hat, addr>. returns -1 of there is
2765 2766   *      no mapping. This is an advisory call.
2766 2767   */
2767 2768  ssize_t
2768 2769  hat_getpagesize(hat_t *hat, caddr_t addr)
2769 2770  {
2770 2771          uintptr_t       vaddr = ALIGN2PAGE(addr);
2771 2772          htable_t        *ht;
2772 2773          size_t          pagesize;
2773 2774  
2774 2775          ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2775 2776          if (IN_VA_HOLE(vaddr))
2776 2777                  return (-1);
2777 2778          ht = htable_getpage(hat, vaddr, NULL);
2778 2779          if (ht == NULL)
2779 2780                  return (-1);
2780 2781          pagesize = LEVEL_SIZE(ht->ht_level);
2781 2782          htable_release(ht);
2782 2783          return (pagesize);
2783 2784  }
2784 2785  
2785 2786  
2786 2787  
2787 2788  /*
2788 2789   * pfn_t hat_getpfnum(hat, addr)
2789 2790   *      returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
2790 2791   */
2791 2792  pfn_t
2792 2793  hat_getpfnum(hat_t *hat, caddr_t addr)
2793 2794  {
2794 2795          uintptr_t       vaddr = ALIGN2PAGE(addr);
2795 2796          htable_t        *ht;
2796 2797          uint_t          entry;
2797 2798          pfn_t           pfn = PFN_INVALID;
2798 2799  
2799 2800          ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2800 2801          if (khat_running == 0)
2801 2802                  return (PFN_INVALID);
2802 2803  
2803 2804          if (IN_VA_HOLE(vaddr))
2804 2805                  return (PFN_INVALID);
2805 2806  
2806 2807          XPV_DISALLOW_MIGRATE();
2807 2808          /*
2808 2809           * A very common use of hat_getpfnum() is from the DDI for kernel pages.
2809 2810           * Use the kmap_ptes (which also covers the 32 bit heap) to speed
2810 2811           * this up.
2811 2812           */
2812 2813          if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2813 2814                  x86pte_t pte;
2814 2815                  pgcnt_t pg_index;
2815 2816  
2816 2817                  pg_index = mmu_btop(vaddr - mmu.kmap_addr);
2817 2818                  pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index));
2818 2819                  if (PTE_ISVALID(pte))
2819 2820                          /*LINTED [use of constant 0 causes a lint warning] */
2820 2821                          pfn = PTE2PFN(pte, 0);
2821 2822                  XPV_ALLOW_MIGRATE();
2822 2823                  return (pfn);
2823 2824          }
2824 2825  
2825 2826          ht = htable_getpage(hat, vaddr, &entry);
2826 2827          if (ht == NULL) {
2827 2828                  XPV_ALLOW_MIGRATE();
2828 2829                  return (PFN_INVALID);
2829 2830          }
2830 2831          ASSERT(vaddr >= ht->ht_vaddr);
2831 2832          ASSERT(vaddr <= HTABLE_LAST_PAGE(ht));
2832 2833          pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level);
2833 2834          if (ht->ht_level > 0)
2834 2835                  pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level));
2835 2836          htable_release(ht);
2836 2837          XPV_ALLOW_MIGRATE();
2837 2838          return (pfn);
2838 2839  }
2839 2840  
2840 2841  /*
2841 2842   * int hat_probe(hat, addr)
2842 2843   *      return 0 if no valid mapping is present.  Faster version
2843 2844   *      of hat_getattr in certain architectures.
2844 2845   */
2845 2846  int
2846 2847  hat_probe(hat_t *hat, caddr_t addr)
2847 2848  {
2848 2849          uintptr_t       vaddr = ALIGN2PAGE(addr);
2849 2850          uint_t          entry;
2850 2851          htable_t        *ht;
2851 2852          pgcnt_t         pg_off;
2852 2853  
2853 2854          ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2854 2855          ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
2855 2856          if (IN_VA_HOLE(vaddr))
2856 2857                  return (0);
2857 2858  
2858 2859          /*
2859 2860           * Most common use of hat_probe is from segmap. We special case it
2860 2861           * for performance.
2861 2862           */
2862 2863          if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2863 2864                  pg_off = mmu_btop(vaddr - mmu.kmap_addr);
2864 2865                  if (mmu.pae_hat)
2865 2866                          return (PTE_ISVALID(mmu.kmap_ptes[pg_off]));
2866 2867                  else
2867 2868                          return (PTE_ISVALID(
2868 2869                              ((x86pte32_t *)mmu.kmap_ptes)[pg_off]));
2869 2870          }
2870 2871  
2871 2872          ht = htable_getpage(hat, vaddr, &entry);
2872 2873          htable_release(ht);
2873 2874          return (ht != NULL);
2874 2875  }
2875 2876  
2876 2877  /*
2877 2878   * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM.
2878 2879   */
2879 2880  static int
2880 2881  is_it_dism(hat_t *hat, caddr_t va)
2881 2882  {
2882 2883          struct seg *seg;
2883 2884          struct shm_data *shmd;
2884 2885          struct spt_data *sptd;
2885 2886  
2886 2887          seg = as_findseg(hat->hat_as, va, 0);
2887 2888          ASSERT(seg != NULL);
2888 2889          ASSERT(seg->s_base <= va);
2889 2890          shmd = (struct shm_data *)seg->s_data;
2890 2891          ASSERT(shmd != NULL);
2891 2892          sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2892 2893          ASSERT(sptd != NULL);
2893 2894          if (sptd->spt_flags & SHM_PAGEABLE)
2894 2895                  return (1);
2895 2896          return (0);
2896 2897  }
2897 2898  
2898 2899  /*
2899 2900   * Simple implementation of ISM. hat_share() is similar to hat_memload_array(),
2900 2901   * except that we use the ism_hat's existing mappings to determine the pages
2901 2902   * and protections to use for this hat. If we find a full properly aligned
2902 2903   * and sized pagetable, we will attempt to share the pagetable itself.
2903 2904   */
2904 2905  /*ARGSUSED*/
2905 2906  int
2906 2907  hat_share(
2907 2908          hat_t           *hat,
2908 2909          caddr_t         addr,
2909 2910          hat_t           *ism_hat,
2910 2911          caddr_t         src_addr,
2911 2912          size_t          len,    /* almost useless value, see below.. */
2912 2913          uint_t          ismszc)
2913 2914  {
2914 2915          uintptr_t       vaddr_start = (uintptr_t)addr;
2915 2916          uintptr_t       vaddr;
2916 2917          uintptr_t       eaddr = vaddr_start + len;
2917 2918          uintptr_t       ism_addr_start = (uintptr_t)src_addr;
2918 2919          uintptr_t       ism_addr = ism_addr_start;
2919 2920          uintptr_t       e_ism_addr = ism_addr + len;
2920 2921          htable_t        *ism_ht = NULL;
2921 2922          htable_t        *ht;
2922 2923          x86pte_t        pte;
2923 2924          page_t          *pp;
2924 2925          pfn_t           pfn;
2925 2926          level_t         l;
2926 2927          pgcnt_t         pgcnt;
2927 2928          uint_t          prot;
2928 2929          int             is_dism;
2929 2930          int             flags;
2930 2931  
2931 2932          /*
2932 2933           * We might be asked to share an empty DISM hat by as_dup()
2933 2934           */
2934 2935          ASSERT(hat != kas.a_hat);
2935 2936          ASSERT(eaddr <= _userlimit);
2936 2937          if (!(ism_hat->hat_flags & HAT_SHARED)) {
2937 2938                  ASSERT(hat_get_mapped_size(ism_hat) == 0);
2938 2939                  return (0);
2939 2940          }
2940 2941          XPV_DISALLOW_MIGRATE();
2941 2942  
2942 2943          /*
2943 2944           * The SPT segment driver often passes us a size larger than there are
2944 2945           * valid mappings. That's because it rounds the segment size up to a
2945 2946           * large pagesize, even if the actual memory mapped by ism_hat is less.
2946 2947           */
2947 2948          ASSERT(IS_PAGEALIGNED(vaddr_start));
2948 2949          ASSERT(IS_PAGEALIGNED(ism_addr_start));
2949 2950          ASSERT(ism_hat->hat_flags & HAT_SHARED);
2950 2951          is_dism = is_it_dism(hat, addr);
2951 2952          while (ism_addr < e_ism_addr) {
2952 2953                  /*
2953 2954                   * use htable_walk to get the next valid ISM mapping
2954 2955                   */
2955 2956                  pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr);
2956 2957                  if (ism_ht == NULL)
2957 2958                          break;
2958 2959  
2959 2960                  /*
2960 2961                   * First check to see if we already share the page table.
2961 2962                   */
2962 2963                  l = ism_ht->ht_level;
2963 2964                  vaddr = vaddr_start + (ism_addr - ism_addr_start);
2964 2965                  ht = htable_lookup(hat, vaddr, l);
2965 2966                  if (ht != NULL) {
2966 2967                          if (ht->ht_flags & HTABLE_SHARED_PFN)
2967 2968                                  goto shared;
2968 2969                          htable_release(ht);
2969 2970                          goto not_shared;
2970 2971                  }
2971 2972  
2972 2973                  /*
2973 2974                   * Can't ever share top table.
2974 2975                   */
2975 2976                  if (l == mmu.max_level)
2976 2977                          goto not_shared;
2977 2978  
2978 2979                  /*
2979 2980                   * Avoid level mismatches later due to DISM faults.
2980 2981                   */
2981 2982                  if (is_dism && l > 0)
2982 2983                          goto not_shared;
2983 2984  
2984 2985                  /*
2985 2986                   * addresses and lengths must align
2986 2987                   * table must be fully populated
2987 2988                   * no lower level page tables
2988 2989                   */
2989 2990                  if (ism_addr != ism_ht->ht_vaddr ||
2990 2991                      (vaddr & LEVEL_OFFSET(l + 1)) != 0)
2991 2992                          goto not_shared;
2992 2993  
2993 2994                  /*
2994 2995                   * The range of address space must cover a full table.
2995 2996                   */
2996 2997                  if (e_ism_addr - ism_addr < LEVEL_SIZE(l + 1))
2997 2998                          goto not_shared;
2998 2999  
2999 3000                  /*
3000 3001                   * All entries in the ISM page table must be leaf PTEs.
3001 3002                   */
3002 3003                  if (l > 0) {
3003 3004                          int e;
3004 3005  
3005 3006                          /*
3006 3007                           * We know the 0th is from htable_walk() above.
3007 3008                           */
3008 3009                          for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) {
3009 3010                                  x86pte_t pte;
3010 3011                                  pte = x86pte_get(ism_ht, e);
3011 3012                                  if (!PTE_ISPAGE(pte, l))
3012 3013                                          goto not_shared;
3013 3014                          }
3014 3015                  }
3015 3016  
3016 3017                  /*
3017 3018                   * share the page table
3018 3019                   */
3019 3020                  ht = htable_create(hat, vaddr, l, ism_ht);
3020 3021  shared:
3021 3022                  ASSERT(ht->ht_flags & HTABLE_SHARED_PFN);
3022 3023                  ASSERT(ht->ht_shares == ism_ht);
3023 3024                  hat->hat_ism_pgcnt +=
3024 3025                      (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) <<
3025 3026                      (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
3026 3027                  ht->ht_valid_cnt = ism_ht->ht_valid_cnt;
3027 3028                  htable_release(ht);
3028 3029                  ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1);
3029 3030                  htable_release(ism_ht);
3030 3031                  ism_ht = NULL;
3031 3032                  continue;
3032 3033  
3033 3034  not_shared:
3034 3035                  /*
3035 3036                   * Unable to share the page table. Instead we will
3036 3037                   * create new mappings from the values in the ISM mappings.
3037 3038                   * Figure out what level size mappings to use;
3038 3039                   */
3039 3040                  for (l = ism_ht->ht_level; l > 0; --l) {
3040 3041                          if (LEVEL_SIZE(l) <= eaddr - vaddr &&
3041 3042                              (vaddr & LEVEL_OFFSET(l)) == 0)
3042 3043                                  break;
3043 3044                  }
3044 3045  
3045 3046                  /*
3046 3047                   * The ISM mapping might be larger than the share area,
3047 3048                   * be careful to truncate it if needed.
3048 3049                   */
3049 3050                  if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) {
3050 3051                          pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level));
3051 3052                  } else {
3052 3053                          pgcnt = mmu_btop(eaddr - vaddr);
3053 3054                          l = 0;
3054 3055                  }
3055 3056  
3056 3057                  pfn = PTE2PFN(pte, ism_ht->ht_level);
3057 3058                  ASSERT(pfn != PFN_INVALID);
3058 3059                  while (pgcnt > 0) {
3059 3060                          /*
3060 3061                           * Make a new pte for the PFN for this level.
3061 3062                           * Copy protections for the pte from the ISM pte.
3062 3063                           */
3063 3064                          pp = page_numtopp_nolock(pfn);
3064 3065                          ASSERT(pp != NULL);
3065 3066  
3066 3067                          prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK;
3067 3068                          if (PTE_GET(pte, PT_WRITABLE))
3068 3069                                  prot |= PROT_WRITE;
3069 3070                          if (!PTE_GET(pte, PT_NX))
3070 3071                                  prot |= PROT_EXEC;
3071 3072  
3072 3073                          flags = HAT_LOAD;
3073 3074                          if (!is_dism)
3074 3075                                  flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST;
3075 3076                          while (hati_load_common(hat, vaddr, pp, prot, flags,
3076 3077                              l, pfn) != 0) {
3077 3078                                  if (l == 0)
3078 3079                                          panic("hati_load_common() failure");
3079 3080                                  --l;
3080 3081                          }
3081 3082  
3082 3083                          vaddr += LEVEL_SIZE(l);
3083 3084                          ism_addr += LEVEL_SIZE(l);
3084 3085                          pfn += mmu_btop(LEVEL_SIZE(l));
3085 3086                          pgcnt -= mmu_btop(LEVEL_SIZE(l));
3086 3087                  }
3087 3088          }
3088 3089          if (ism_ht != NULL)
3089 3090                  htable_release(ism_ht);
3090 3091          XPV_ALLOW_MIGRATE();
3091 3092          return (0);
3092 3093  }
3093 3094  
3094 3095  
3095 3096  /*
3096 3097   * hat_unshare() is similar to hat_unload_callback(), but
3097 3098   * we have to look for empty shared pagetables. Note that
3098 3099   * hat_unshare() is always invoked against an entire segment.
3099 3100   */
3100 3101  /*ARGSUSED*/
3101 3102  void
3102 3103  hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc)
3103 3104  {
3104 3105          uint64_t        vaddr = (uintptr_t)addr;
3105 3106          uintptr_t       eaddr = vaddr + len;
3106 3107          htable_t        *ht = NULL;
3107 3108          uint_t          need_demaps = 0;
3108 3109          int             flags = HAT_UNLOAD_UNMAP;
3109 3110          level_t         l;
3110 3111  
3111 3112          ASSERT(hat != kas.a_hat);
3112 3113          ASSERT(eaddr <= _userlimit);
3113 3114          ASSERT(IS_PAGEALIGNED(vaddr));
3114 3115          ASSERT(IS_PAGEALIGNED(eaddr));
3115 3116          XPV_DISALLOW_MIGRATE();
3116 3117  
3117 3118          /*
3118 3119           * First go through and remove any shared pagetables.
3119 3120           *
3120 3121           * Note that it's ok to delay the TLB shootdown till the entire range is
3121 3122           * finished, because if hat_pageunload() were to unload a shared
3122 3123           * pagetable page, its hat_tlb_inval() will do a global TLB invalidate.
3123 3124           */
3124 3125          l = mmu.max_page_level;
3125 3126          if (l == mmu.max_level)
3126 3127                  --l;
3127 3128          for (; l >= 0; --l) {
3128 3129                  for (vaddr = (uintptr_t)addr; vaddr < eaddr;
3129 3130                      vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) {
3130 3131                          ASSERT(!IN_VA_HOLE(vaddr));
3131 3132                          /*
3132 3133                           * find a pagetable that maps the current address
3133 3134                           */
3134 3135                          ht = htable_lookup(hat, vaddr, l);
3135 3136                          if (ht == NULL)
3136 3137                                  continue;
3137 3138                          if (ht->ht_flags & HTABLE_SHARED_PFN) {
3138 3139                                  /*
3139 3140                                   * clear page count, set valid_cnt to 0,
3140 3141                                   * let htable_release() finish the job
3141 3142                                   */
3142 3143                                  hat->hat_ism_pgcnt -= ht->ht_valid_cnt <<
3143 3144                                      (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
3144 3145                                  ht->ht_valid_cnt = 0;
3145 3146                                  need_demaps = 1;
3146 3147                          }
3147 3148                          htable_release(ht);
3148 3149                  }
3149 3150          }
3150 3151  
3151 3152          /*
3152 3153           * flush the TLBs - since we're probably dealing with MANY mappings
3153 3154           * we do just one CR3 reload.
3154 3155           */
3155 3156          if (!(hat->hat_flags & HAT_FREEING) && need_demaps)
3156 3157                  hat_tlb_inval(hat, DEMAP_ALL_ADDR);
3157 3158  
3158 3159          /*
3159 3160           * Now go back and clean up any unaligned mappings that
3160 3161           * couldn't share pagetables.
3161 3162           */
3162 3163          if (!is_it_dism(hat, addr))
3163 3164                  flags |= HAT_UNLOAD_UNLOCK;
3164 3165          hat_unload(hat, addr, len, flags);
3165 3166          XPV_ALLOW_MIGRATE();
3166 3167  }
3167 3168  
3168 3169  
3169 3170  /*
3170 3171   * hat_reserve() does nothing
3171 3172   */
3172 3173  /*ARGSUSED*/
3173 3174  void
3174 3175  hat_reserve(struct as *as, caddr_t addr, size_t len)
3175 3176  {
3176 3177  }
3177 3178  
3178 3179  
3179 3180  /*
3180 3181   * Called when all mappings to a page should have write permission removed.
3181 3182   * Mostly stolen from hat_pagesync()
3182 3183   */
3183 3184  static void
3184 3185  hati_page_clrwrt(struct page *pp)
3185 3186  {
3186 3187          hment_t         *hm = NULL;
3187 3188          htable_t        *ht;
3188 3189          uint_t          entry;
3189 3190          x86pte_t        old;
3190 3191          x86pte_t        new;
3191 3192          uint_t          pszc = 0;
3192 3193  
3193 3194          XPV_DISALLOW_MIGRATE();
3194 3195  next_size:
3195 3196          /*
3196 3197           * walk thru the mapping list clearing write permission
3197 3198           */
3198 3199          x86_hm_enter(pp);
3199 3200          while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
3200 3201                  if (ht->ht_level < pszc)
3201 3202                          continue;
3202 3203                  old = x86pte_get(ht, entry);
3203 3204  
3204 3205                  for (;;) {
3205 3206                          /*
3206 3207                           * Is this mapping of interest?
3207 3208                           */
3208 3209                          if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum ||
3209 3210                              PTE_GET(old, PT_WRITABLE) == 0)
3210 3211                                  break;
3211 3212  
3212 3213                          /*
3213 3214                           * Clear ref/mod writable bits. This requires cross
3214 3215                           * calls to ensure any executing TLBs see cleared bits.
3215 3216                           */
3216 3217                          new = old;
3217 3218                          PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE);
3218 3219                          old = hati_update_pte(ht, entry, old, new);
3219 3220                          if (old != 0)
3220 3221                                  continue;
3221 3222  
3222 3223                          break;
3223 3224                  }
3224 3225          }
3225 3226          x86_hm_exit(pp);
3226 3227          while (pszc < pp->p_szc) {
3227 3228                  page_t *tpp;
3228 3229                  pszc++;
3229 3230                  tpp = PP_GROUPLEADER(pp, pszc);
3230 3231                  if (pp != tpp) {
3231 3232                          pp = tpp;
3232 3233                          goto next_size;
3233 3234                  }
3234 3235          }
3235 3236          XPV_ALLOW_MIGRATE();
3236 3237  }
3237 3238  
3238 3239  /*
3239 3240   * void hat_page_setattr(pp, flag)
3240 3241   * void hat_page_clrattr(pp, flag)
3241 3242   *      used to set/clr ref/mod bits.
3242 3243   */
3243 3244  void
3244 3245  hat_page_setattr(struct page *pp, uint_t flag)
3245 3246  {
3246 3247          vnode_t         *vp = pp->p_vnode;
3247 3248          kmutex_t        *vphm = NULL;
3248 3249          page_t          **listp;
3249 3250          int             noshuffle;
3250 3251  
3251 3252          noshuffle = flag & P_NSH;
3252 3253          flag &= ~P_NSH;
3253 3254  
3254 3255          if (PP_GETRM(pp, flag) == flag)
3255 3256                  return;
3256 3257  
3257 3258          if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
3258 3259              !noshuffle) {
3259 3260                  vphm = page_vnode_mutex(vp);
3260 3261                  mutex_enter(vphm);
3261 3262          }
3262 3263  
3263 3264          PP_SETRM(pp, flag);
3264 3265  
3265 3266          if (vphm != NULL) {
3266 3267  
3267 3268                  /*
3268 3269                   * Some File Systems examine v_pages for NULL w/o
3269 3270                   * grabbing the vphm mutex. Must not let it become NULL when
3270 3271                   * pp is the only page on the list.
3271 3272                   */
3272 3273                  if (pp->p_vpnext != pp) {
3273 3274                          page_vpsub(&vp->v_pages, pp);
3274 3275                          if (vp->v_pages != NULL)
3275 3276                                  listp = &vp->v_pages->p_vpprev->p_vpnext;
3276 3277                          else
3277 3278                                  listp = &vp->v_pages;
3278 3279                          page_vpadd(listp, pp);
3279 3280                  }
3280 3281                  mutex_exit(vphm);
3281 3282          }
3282 3283  }
3283 3284  
3284 3285  void
3285 3286  hat_page_clrattr(struct page *pp, uint_t flag)
3286 3287  {
3287 3288          vnode_t         *vp = pp->p_vnode;
3288 3289          ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
3289 3290  
3290 3291          /*
3291 3292           * Caller is expected to hold page's io lock for VMODSORT to work
3292 3293           * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
3293 3294           * bit is cleared.
3294 3295           * We don't have assert to avoid tripping some existing third party
3295 3296           * code. The dirty page is moved back to top of the v_page list
3296 3297           * after IO is done in pvn_write_done().
3297 3298           */
3298 3299          PP_CLRRM(pp, flag);
3299 3300  
3300 3301          if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
3301 3302  
3302 3303                  /*
3303 3304                   * VMODSORT works by removing write permissions and getting
3304 3305                   * a fault when a page is made dirty. At this point
3305 3306                   * we need to remove write permission from all mappings
3306 3307                   * to this page.
3307 3308                   */
3308 3309                  hati_page_clrwrt(pp);
3309 3310          }
3310 3311  }
3311 3312  
3312 3313  /*
3313 3314   *      If flag is specified, returns 0 if attribute is disabled
3314 3315   *      and non zero if enabled.  If flag specifes multiple attributes
3315 3316   *      then returns 0 if ALL attributes are disabled.  This is an advisory
  
    | 
      ↓ open down ↓ | 
    3275 lines elided | 
    
      ↑ open up ↑ | 
  
3316 3317   *      call.
3317 3318   */
3318 3319  uint_t
3319 3320  hat_page_getattr(struct page *pp, uint_t flag)
3320 3321  {
3321 3322          return (PP_GETRM(pp, flag));
3322 3323  }
3323 3324  
3324 3325  
3325 3326  /*
3326      - * common code used by hat_pageunload() and hment_steal()
     3327 + * common code used by hat_page_inval() and hment_steal()
3327 3328   */
3328 3329  hment_t *
3329 3330  hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry)
3330 3331  {
3331 3332          x86pte_t old_pte;
3332 3333          pfn_t pfn = pp->p_pagenum;
3333 3334          hment_t *hm;
3334 3335  
3335 3336          /*
3336 3337           * We need to acquire a hold on the htable in order to
3337 3338           * do the invalidate. We know the htable must exist, since
3338 3339           * unmap's don't release the htable until after removing any
3339 3340           * hment. Having x86_hm_enter() keeps that from proceeding.
3340 3341           */
3341 3342          htable_acquire(ht);
3342 3343  
3343 3344          /*
3344 3345           * Invalidate the PTE and remove the hment.
3345 3346           */
3346 3347          old_pte = x86pte_inval(ht, entry, 0, NULL, B_TRUE);
3347 3348          if (PTE2PFN(old_pte, ht->ht_level) != pfn) {
3348 3349                  panic("x86pte_inval() failure found PTE = " FMT_PTE
3349 3350                      " pfn being unmapped is %lx ht=0x%lx entry=0x%x",
3350 3351                      old_pte, pfn, (uintptr_t)ht, entry);
3351 3352          }
3352 3353  
3353 3354          /*
3354 3355           * Clean up all the htable information for this mapping
3355 3356           */
3356 3357          ASSERT(ht->ht_valid_cnt > 0);
3357 3358          HTABLE_DEC(ht->ht_valid_cnt);
3358 3359          PGCNT_DEC(ht->ht_hat, ht->ht_level);
3359 3360  
3360 3361          /*
3361 3362           * sync ref/mod bits to the page_t
3362 3363           */
3363 3364          if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC)
3364 3365                  hati_sync_pte_to_page(pp, old_pte, ht->ht_level);
3365 3366  
3366 3367          /*
3367 3368           * Remove the mapping list entry for this page.
3368 3369           */
3369 3370          hm = hment_remove(pp, ht, entry);
3370 3371  
3371 3372          /*
  
    | 
      ↓ open down ↓ | 
    35 lines elided | 
    
      ↑ open up ↑ | 
  
3372 3373           * drop the mapping list lock so that we might free the
3373 3374           * hment and htable.
3374 3375           */
3375 3376          x86_hm_exit(pp);
3376 3377          htable_release(ht);
3377 3378          return (hm);
3378 3379  }
3379 3380  
3380 3381  extern int      vpm_enable;
3381 3382  /*
3382      - * Unload all translations to a page. If the page is a subpage of a large
     3383 + * Unload translations to a page. If the page is a subpage of a large
3383 3384   * page, the large page mappings are also removed.
3384      - *
3385      - * The forceflags are unused.
     3385 + * If curhat is not NULL, then we only unload the translation
     3386 + * for the given process, otherwise all translations are unloaded.
3386 3387   */
3387      -
3388      -/*ARGSUSED*/
3389      -static int
3390      -hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag)
     3388 +void
     3389 +hat_page_inval(struct page *pp, uint_t pg_szcd, struct hat *curhat)
3391 3390  {
3392 3391          page_t          *cur_pp = pp;
3393 3392          hment_t         *hm;
3394 3393          hment_t         *prev;
3395 3394          htable_t        *ht;
3396 3395          uint_t          entry;
3397 3396          level_t         level;
     3397 +        ulong_t         cnt;
3398 3398  
3399 3399          XPV_DISALLOW_MIGRATE();
3400 3400  
3401      -        /*
3402      -         * prevent recursion due to kmem_free()
3403      -         */
3404      -        ++curthread->t_hatdepth;
3405      -        ASSERT(curthread->t_hatdepth < 16);
3406      -
3407 3401  #if defined(__amd64)
3408 3402          /*
3409 3403           * clear the vpm ref.
3410 3404           */
3411 3405          if (vpm_enable) {
3412 3406                  pp->p_vpmref = 0;
3413 3407          }
3414 3408  #endif
3415 3409          /*
3416 3410           * The loop with next_size handles pages with multiple pagesize mappings
3417 3411           */
3418 3412  next_size:
     3413 +        if (curhat != NULL)
     3414 +                cnt = hat_page_getshare(cur_pp);
3419 3415          for (;;) {
3420 3416  
3421 3417                  /*
3422 3418                   * Get a mapping list entry
3423 3419                   */
3424 3420                  x86_hm_enter(cur_pp);
3425 3421                  for (prev = NULL; ; prev = hm) {
3426 3422                          hm = hment_walk(cur_pp, &ht, &entry, prev);
3427 3423                          if (hm == NULL) {
3428 3424                                  x86_hm_exit(cur_pp);
3429 3425  
     3426 +curproc_done:
3430 3427                                  /*
3431 3428                                   * If not part of a larger page, we're done.
3432 3429                                   */
3433 3430                                  if (cur_pp->p_szc <= pg_szcd) {
3434      -                                        ASSERT(curthread->t_hatdepth > 0);
3435      -                                        --curthread->t_hatdepth;
3436 3431                                          XPV_ALLOW_MIGRATE();
3437      -                                        return (0);
     3432 +                                        return;
3438 3433                                  }
3439 3434  
3440 3435                                  /*
3441 3436                                   * Else check the next larger page size.
3442 3437                                   * hat_page_demote() may decrease p_szc
3443 3438                                   * but that's ok we'll just take an extra
3444 3439                                   * trip discover there're no larger mappings
3445 3440                                   * and return.
3446 3441                                   */
3447 3442                                  ++pg_szcd;
3448 3443                                  cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd);
3449 3444                                  goto next_size;
3450 3445                          }
3451 3446  
3452 3447                          /*
3453 3448                           * If this mapping size matches, remove it.
3454 3449                           */
3455 3450                          level = ht->ht_level;
3456      -                        if (level == pg_szcd)
3457      -                                break;
     3451 +                        if (level == pg_szcd) {
     3452 +                                if (curhat == NULL || ht->ht_hat == curhat)
     3453 +                                        break;
     3454 +                                /*
     3455 +                                 * Unloading only the given process but it's
     3456 +                                 * not the hat for the current process. Leave
     3457 +                                 * entry in place. Also do a safety check to
     3458 +                                 * ensure we don't get in an infinite loop
     3459 +                                 */
     3460 +                                if (cnt-- == 0) {
     3461 +                                        x86_hm_exit(cur_pp);
     3462 +                                        goto curproc_done;
     3463 +                                }
     3464 +                        }
3458 3465                  }
3459 3466  
3460 3467                  /*
3461 3468                   * Remove the mapping list entry for this page.
3462 3469                   * Note this does the x86_hm_exit() for us.
3463 3470                   */
3464 3471                  hm = hati_page_unmap(cur_pp, ht, entry);
3465 3472                  if (hm != NULL)
3466 3473                          hment_free(hm);
     3474 +
     3475 +                /* Perform check above for being part of a larger page. */
     3476 +                if (curhat != NULL)
     3477 +                        goto curproc_done;
3467 3478          }
3468 3479  }
3469 3480  
     3481 +/*
     3482 + * Unload translations to a page. If unloadflag is HAT_CURPROC_PGUNLOAD, then
     3483 + * we only unload the translation for the current process, otherwise all
     3484 + * translations are unloaded.
     3485 + */
     3486 +static int
     3487 +hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t unloadflag)
     3488 +{
     3489 +        struct hat      *curhat = NULL;
     3490 +
     3491 +        /*
     3492 +         * prevent recursion due to kmem_free()
     3493 +         */
     3494 +        ++curthread->t_hatdepth;
     3495 +        ASSERT(curthread->t_hatdepth < 16);
     3496 +
     3497 +        if (unloadflag == HAT_CURPROC_PGUNLOAD)
     3498 +                curhat = curthread->t_procp->p_as->a_hat;
     3499 +
     3500 +        hat_page_inval(pp, pg_szcd, curhat);
     3501 +
     3502 +        ASSERT(curthread->t_hatdepth > 0);
     3503 +        --curthread->t_hatdepth;
     3504 +        return (0);
     3505 +}
     3506 +
3470 3507  int
3471      -hat_pageunload(struct page *pp, uint_t forceflag)
     3508 +hat_pageunload(struct page *pp, uint_t unloadflag)
3472 3509  {
3473 3510          ASSERT(PAGE_EXCL(pp));
3474      -        return (hati_pageunload(pp, 0, forceflag));
     3511 +        return (hati_pageunload(pp, 0, unloadflag));
3475 3512  }
3476 3513  
3477 3514  /*
3478 3515   * Unload all large mappings to pp and reduce by 1 p_szc field of every large
3479 3516   * page level that included pp.
3480 3517   *
3481 3518   * pp must be locked EXCL. Even though no other constituent pages are locked
3482 3519   * it's legal to unload large mappings to pp because all constituent pages of
3483 3520   * large locked mappings have to be locked SHARED.  therefore if we have EXCL
3484 3521   * lock on one of constituent pages none of the large mappings to pp are
3485 3522   * locked.
3486 3523   *
3487 3524   * Change (always decrease) p_szc field starting from the last constituent
3488 3525   * page and ending with root constituent page so that root's pszc always shows
3489 3526   * the area where hat_page_demote() may be active.
3490 3527   *
3491 3528   * This mechanism is only used for file system pages where it's not always
3492 3529   * possible to get EXCL locks on all constituent pages to demote the size code
3493 3530   * (as is done for anonymous or kernel large pages).
3494 3531   */
3495 3532  void
3496 3533  hat_page_demote(page_t *pp)
3497 3534  {
3498 3535          uint_t          pszc;
3499 3536          uint_t          rszc;
3500 3537          uint_t          szc;
3501 3538          page_t          *rootpp;
3502 3539          page_t          *firstpp;
3503 3540          page_t          *lastpp;
3504 3541          pgcnt_t         pgcnt;
3505 3542  
3506 3543          ASSERT(PAGE_EXCL(pp));
3507 3544          ASSERT(!PP_ISFREE(pp));
3508 3545          ASSERT(page_szc_lock_assert(pp));
3509 3546  
3510 3547          if (pp->p_szc == 0)
3511 3548                  return;
3512 3549  
3513 3550          rootpp = PP_GROUPLEADER(pp, 1);
3514 3551          (void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD);
3515 3552  
3516 3553          /*
3517 3554           * all large mappings to pp are gone
3518 3555           * and no new can be setup since pp is locked exclusively.
3519 3556           *
3520 3557           * Lock the root to make sure there's only one hat_page_demote()
3521 3558           * outstanding within the area of this root's pszc.
3522 3559           *
3523 3560           * Second potential hat_page_demote() is already eliminated by upper
3524 3561           * VM layer via page_szc_lock() but we don't rely on it and use our
3525 3562           * own locking (so that upper layer locking can be changed without
3526 3563           * assumptions that hat depends on upper layer VM to prevent multiple
3527 3564           * hat_page_demote() to be issued simultaneously to the same large
3528 3565           * page).
3529 3566           */
3530 3567  again:
3531 3568          pszc = pp->p_szc;
3532 3569          if (pszc == 0)
3533 3570                  return;
3534 3571          rootpp = PP_GROUPLEADER(pp, pszc);
3535 3572          x86_hm_enter(rootpp);
3536 3573          /*
3537 3574           * If root's p_szc is different from pszc we raced with another
3538 3575           * hat_page_demote().  Drop the lock and try to find the root again.
3539 3576           * If root's p_szc is greater than pszc previous hat_page_demote() is
3540 3577           * not done yet.  Take and release mlist lock of root's root to wait
3541 3578           * for previous hat_page_demote() to complete.
3542 3579           */
3543 3580          if ((rszc = rootpp->p_szc) != pszc) {
3544 3581                  x86_hm_exit(rootpp);
3545 3582                  if (rszc > pszc) {
3546 3583                          /* p_szc of a locked non free page can't increase */
3547 3584                          ASSERT(pp != rootpp);
3548 3585  
3549 3586                          rootpp = PP_GROUPLEADER(rootpp, rszc);
3550 3587                          x86_hm_enter(rootpp);
3551 3588                          x86_hm_exit(rootpp);
3552 3589                  }
3553 3590                  goto again;
3554 3591          }
3555 3592          ASSERT(pp->p_szc == pszc);
3556 3593  
3557 3594          /*
3558 3595           * Decrement by 1 p_szc of every constituent page of a region that
3559 3596           * covered pp. For example if original szc is 3 it gets changed to 2
3560 3597           * everywhere except in region 2 that covered pp. Region 2 that
3561 3598           * covered pp gets demoted to 1 everywhere except in region 1 that
3562 3599           * covered pp. The region 1 that covered pp is demoted to region
3563 3600           * 0. It's done this way because from region 3 we removed level 3
3564 3601           * mappings, from region 2 that covered pp we removed level 2 mappings
3565 3602           * and from region 1 that covered pp we removed level 1 mappings.  All
3566 3603           * changes are done from from high pfn's to low pfn's so that roots
3567 3604           * are changed last allowing one to know the largest region where
3568 3605           * hat_page_demote() is stil active by only looking at the root page.
3569 3606           *
3570 3607           * This algorithm is implemented in 2 while loops. First loop changes
3571 3608           * p_szc of pages to the right of pp's level 1 region and second
3572 3609           * loop changes p_szc of pages of level 1 region that covers pp
3573 3610           * and all pages to the left of level 1 region that covers pp.
3574 3611           * In the first loop p_szc keeps dropping with every iteration
3575 3612           * and in the second loop it keeps increasing with every iteration.
3576 3613           *
3577 3614           * First loop description: Demote pages to the right of pp outside of
3578 3615           * level 1 region that covers pp.  In every iteration of the while
3579 3616           * loop below find the last page of szc region and the first page of
3580 3617           * (szc - 1) region that is immediately to the right of (szc - 1)
3581 3618           * region that covers pp.  From last such page to first such page
3582 3619           * change every page's szc to szc - 1. Decrement szc and continue
3583 3620           * looping until szc is 1. If pp belongs to the last (szc - 1) region
3584 3621           * of szc region skip to the next iteration.
3585 3622           */
3586 3623          szc = pszc;
3587 3624          while (szc > 1) {
3588 3625                  lastpp = PP_GROUPLEADER(pp, szc);
3589 3626                  pgcnt = page_get_pagecnt(szc);
3590 3627                  lastpp += pgcnt - 1;
3591 3628                  firstpp = PP_GROUPLEADER(pp, (szc - 1));
3592 3629                  pgcnt = page_get_pagecnt(szc - 1);
3593 3630                  if (lastpp - firstpp < pgcnt) {
3594 3631                          szc--;
3595 3632                          continue;
3596 3633                  }
3597 3634                  firstpp += pgcnt;
3598 3635                  while (lastpp != firstpp) {
3599 3636                          ASSERT(lastpp->p_szc == pszc);
3600 3637                          lastpp->p_szc = szc - 1;
3601 3638                          lastpp--;
3602 3639                  }
3603 3640                  firstpp->p_szc = szc - 1;
3604 3641                  szc--;
3605 3642          }
3606 3643  
3607 3644          /*
3608 3645           * Second loop description:
3609 3646           * First iteration changes p_szc to 0 of every
3610 3647           * page of level 1 region that covers pp.
3611 3648           * Subsequent iterations find last page of szc region
3612 3649           * immediately to the left of szc region that covered pp
3613 3650           * and first page of (szc + 1) region that covers pp.
3614 3651           * From last to first page change p_szc of every page to szc.
3615 3652           * Increment szc and continue looping until szc is pszc.
3616 3653           * If pp belongs to the fist szc region of (szc + 1) region
3617 3654           * skip to the next iteration.
3618 3655           *
3619 3656           */
3620 3657          szc = 0;
3621 3658          while (szc < pszc) {
3622 3659                  firstpp = PP_GROUPLEADER(pp, (szc + 1));
3623 3660                  if (szc == 0) {
3624 3661                          pgcnt = page_get_pagecnt(1);
3625 3662                          lastpp = firstpp + (pgcnt - 1);
3626 3663                  } else {
3627 3664                          lastpp = PP_GROUPLEADER(pp, szc);
3628 3665                          if (firstpp == lastpp) {
3629 3666                                  szc++;
3630 3667                                  continue;
3631 3668                          }
3632 3669                          lastpp--;
3633 3670                          pgcnt = page_get_pagecnt(szc);
3634 3671                  }
3635 3672                  while (lastpp != firstpp) {
3636 3673                          ASSERT(lastpp->p_szc == pszc);
3637 3674                          lastpp->p_szc = szc;
3638 3675                          lastpp--;
3639 3676                  }
3640 3677                  firstpp->p_szc = szc;
3641 3678                  if (firstpp == rootpp)
3642 3679                          break;
3643 3680                  szc++;
3644 3681          }
3645 3682          x86_hm_exit(rootpp);
3646 3683  }
3647 3684  
3648 3685  /*
3649 3686   * get hw stats from hardware into page struct and reset hw stats
3650 3687   * returns attributes of page
3651 3688   * Flags for hat_pagesync, hat_getstat, hat_sync
3652 3689   *
3653 3690   * define       HAT_SYNC_ZERORM         0x01
3654 3691   *
3655 3692   * Additional flags for hat_pagesync
3656 3693   *
3657 3694   * define       HAT_SYNC_STOPON_REF     0x02
3658 3695   * define       HAT_SYNC_STOPON_MOD     0x04
3659 3696   * define       HAT_SYNC_STOPON_RM      0x06
3660 3697   * define       HAT_SYNC_STOPON_SHARED  0x08
3661 3698   */
3662 3699  uint_t
3663 3700  hat_pagesync(struct page *pp, uint_t flags)
3664 3701  {
3665 3702          hment_t         *hm = NULL;
3666 3703          htable_t        *ht;
3667 3704          uint_t          entry;
3668 3705          x86pte_t        old, save_old;
3669 3706          x86pte_t        new;
3670 3707          uchar_t         nrmbits = P_REF|P_MOD|P_RO;
3671 3708          extern ulong_t  po_share;
3672 3709          page_t          *save_pp = pp;
3673 3710          uint_t          pszc = 0;
3674 3711  
3675 3712          ASSERT(PAGE_LOCKED(pp) || panicstr);
3676 3713  
3677 3714          if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD))
3678 3715                  return (pp->p_nrm & nrmbits);
3679 3716  
3680 3717          if ((flags & HAT_SYNC_ZERORM) == 0) {
3681 3718  
3682 3719                  if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp))
3683 3720                          return (pp->p_nrm & nrmbits);
3684 3721  
3685 3722                  if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp))
3686 3723                          return (pp->p_nrm & nrmbits);
3687 3724  
3688 3725                  if ((flags & HAT_SYNC_STOPON_SHARED) != 0 &&
3689 3726                      hat_page_getshare(pp) > po_share) {
3690 3727                          if (PP_ISRO(pp))
3691 3728                                  PP_SETREF(pp);
3692 3729                          return (pp->p_nrm & nrmbits);
3693 3730                  }
3694 3731          }
3695 3732  
3696 3733          XPV_DISALLOW_MIGRATE();
3697 3734  next_size:
3698 3735          /*
3699 3736           * walk thru the mapping list syncing (and clearing) ref/mod bits.
3700 3737           */
3701 3738          x86_hm_enter(pp);
3702 3739          while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
3703 3740                  if (ht->ht_level < pszc)
3704 3741                          continue;
3705 3742                  old = x86pte_get(ht, entry);
3706 3743  try_again:
3707 3744  
3708 3745                  ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum);
3709 3746  
3710 3747                  if (PTE_GET(old, PT_REF | PT_MOD) == 0)
3711 3748                          continue;
3712 3749  
3713 3750                  save_old = old;
3714 3751                  if ((flags & HAT_SYNC_ZERORM) != 0) {
3715 3752  
3716 3753                          /*
3717 3754                           * Need to clear ref or mod bits. Need to demap
3718 3755                           * to make sure any executing TLBs see cleared bits.
3719 3756                           */
3720 3757                          new = old;
3721 3758                          PTE_CLR(new, PT_REF | PT_MOD);
3722 3759                          old = hati_update_pte(ht, entry, old, new);
3723 3760                          if (old != 0)
3724 3761                                  goto try_again;
3725 3762  
3726 3763                          old = save_old;
3727 3764                  }
3728 3765  
3729 3766                  /*
3730 3767                   * Sync the PTE
3731 3768                   */
3732 3769                  if (!(flags & HAT_SYNC_ZERORM) &&
3733 3770                      PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC)
3734 3771                          hati_sync_pte_to_page(pp, old, ht->ht_level);
3735 3772  
3736 3773                  /*
3737 3774                   * can stop short if we found a ref'd or mod'd page
3738 3775                   */
3739 3776                  if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) ||
3740 3777                      (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) {
3741 3778                          x86_hm_exit(pp);
3742 3779                          goto done;
3743 3780                  }
3744 3781          }
3745 3782          x86_hm_exit(pp);
3746 3783          while (pszc < pp->p_szc) {
3747 3784                  page_t *tpp;
3748 3785                  pszc++;
3749 3786                  tpp = PP_GROUPLEADER(pp, pszc);
3750 3787                  if (pp != tpp) {
3751 3788                          pp = tpp;
3752 3789                          goto next_size;
3753 3790                  }
3754 3791          }
3755 3792  done:
3756 3793          XPV_ALLOW_MIGRATE();
3757 3794          return (save_pp->p_nrm & nrmbits);
3758 3795  }
3759 3796  
3760 3797  /*
3761 3798   * returns approx number of mappings to this pp.  A return of 0 implies
3762 3799   * there are no mappings to the page.
3763 3800   */
3764 3801  ulong_t
3765 3802  hat_page_getshare(page_t *pp)
3766 3803  {
3767 3804          uint_t cnt;
3768 3805          cnt = hment_mapcnt(pp);
3769 3806  #if defined(__amd64)
3770 3807          if (vpm_enable && pp->p_vpmref) {
3771 3808                  cnt += 1;
3772 3809          }
3773 3810  #endif
3774 3811          return (cnt);
3775 3812  }
3776 3813  
3777 3814  /*
3778 3815   * Return 1 the number of mappings exceeds sh_thresh. Return 0
3779 3816   * otherwise.
3780 3817   */
3781 3818  int
3782 3819  hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
3783 3820  {
3784 3821          return (hat_page_getshare(pp) > sh_thresh);
3785 3822  }
3786 3823  
3787 3824  /*
3788 3825   * hat_softlock isn't supported anymore
3789 3826   */
3790 3827  /*ARGSUSED*/
3791 3828  faultcode_t
3792 3829  hat_softlock(
3793 3830          hat_t *hat,
3794 3831          caddr_t addr,
3795 3832          size_t *len,
3796 3833          struct page **page_array,
3797 3834          uint_t flags)
3798 3835  {
3799 3836          return (FC_NOSUPPORT);
3800 3837  }
3801 3838  
3802 3839  
3803 3840  
3804 3841  /*
3805 3842   * Routine to expose supported HAT features to platform independent code.
3806 3843   */
3807 3844  /*ARGSUSED*/
3808 3845  int
3809 3846  hat_supported(enum hat_features feature, void *arg)
3810 3847  {
3811 3848          switch (feature) {
3812 3849  
3813 3850          case HAT_SHARED_PT:     /* this is really ISM */
3814 3851                  return (1);
3815 3852  
3816 3853          case HAT_DYNAMIC_ISM_UNMAP:
3817 3854                  return (0);
3818 3855  
3819 3856          case HAT_VMODSORT:
3820 3857                  return (1);
3821 3858  
3822 3859          case HAT_SHARED_REGIONS:
3823 3860                  return (0);
3824 3861  
3825 3862          default:
3826 3863                  panic("hat_supported() - unknown feature");
3827 3864          }
3828 3865          return (0);
3829 3866  }
3830 3867  
3831 3868  /*
3832 3869   * Called when a thread is exiting and has been switched to the kernel AS
3833 3870   */
3834 3871  void
3835 3872  hat_thread_exit(kthread_t *thd)
3836 3873  {
3837 3874          ASSERT(thd->t_procp->p_as == &kas);
3838 3875          XPV_DISALLOW_MIGRATE();
3839 3876          hat_switch(thd->t_procp->p_as->a_hat);
3840 3877          XPV_ALLOW_MIGRATE();
3841 3878  }
3842 3879  
3843 3880  /*
3844 3881   * Setup the given brand new hat structure as the new HAT on this cpu's mmu.
3845 3882   */
3846 3883  /*ARGSUSED*/
3847 3884  void
3848 3885  hat_setup(hat_t *hat, int flags)
3849 3886  {
3850 3887          XPV_DISALLOW_MIGRATE();
3851 3888          kpreempt_disable();
3852 3889  
3853 3890          hat_switch(hat);
3854 3891  
3855 3892          kpreempt_enable();
3856 3893          XPV_ALLOW_MIGRATE();
3857 3894  }
3858 3895  
3859 3896  /*
3860 3897   * Prepare for a CPU private mapping for the given address.
3861 3898   *
3862 3899   * The address can only be used from a single CPU and can be remapped
3863 3900   * using hat_mempte_remap().  Return the address of the PTE.
3864 3901   *
3865 3902   * We do the htable_create() if necessary and increment the valid count so
3866 3903   * the htable can't disappear.  We also hat_devload() the page table into
3867 3904   * kernel so that the PTE is quickly accessed.
3868 3905   */
3869 3906  hat_mempte_t
3870 3907  hat_mempte_setup(caddr_t addr)
3871 3908  {
3872 3909          uintptr_t       va = (uintptr_t)addr;
3873 3910          htable_t        *ht;
3874 3911          uint_t          entry;
3875 3912          x86pte_t        oldpte;
3876 3913          hat_mempte_t    p;
3877 3914  
3878 3915          ASSERT(IS_PAGEALIGNED(va));
3879 3916          ASSERT(!IN_VA_HOLE(va));
3880 3917          ++curthread->t_hatdepth;
3881 3918          XPV_DISALLOW_MIGRATE();
3882 3919          ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0);
3883 3920          if (ht == NULL) {
3884 3921                  ht = htable_create(kas.a_hat, va, 0, NULL);
3885 3922                  entry = htable_va2entry(va, ht);
3886 3923                  ASSERT(ht->ht_level == 0);
3887 3924                  oldpte = x86pte_get(ht, entry);
3888 3925          }
3889 3926          if (PTE_ISVALID(oldpte))
3890 3927                  panic("hat_mempte_setup(): address already mapped"
3891 3928                      "ht=%p, entry=%d, pte=" FMT_PTE, (void *)ht, entry, oldpte);
3892 3929  
3893 3930          /*
3894 3931           * increment ht_valid_cnt so that the pagetable can't disappear
3895 3932           */
3896 3933          HTABLE_INC(ht->ht_valid_cnt);
3897 3934  
3898 3935          /*
3899 3936           * return the PTE physical address to the caller.
3900 3937           */
3901 3938          htable_release(ht);
3902 3939          XPV_ALLOW_MIGRATE();
3903 3940          p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry);
3904 3941          --curthread->t_hatdepth;
3905 3942          return (p);
3906 3943  }
3907 3944  
3908 3945  /*
3909 3946   * Release a CPU private mapping for the given address.
3910 3947   * We decrement the htable valid count so it might be destroyed.
3911 3948   */
3912 3949  /*ARGSUSED1*/
3913 3950  void
3914 3951  hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa)
3915 3952  {
3916 3953          htable_t        *ht;
3917 3954  
3918 3955          XPV_DISALLOW_MIGRATE();
3919 3956          /*
3920 3957           * invalidate any left over mapping and decrement the htable valid count
3921 3958           */
3922 3959  #ifdef __xpv
3923 3960          if (HYPERVISOR_update_va_mapping((uintptr_t)addr, 0,
3924 3961              UVMF_INVLPG | UVMF_LOCAL))
3925 3962                  panic("HYPERVISOR_update_va_mapping() failed");
3926 3963  #else
3927 3964          {
3928 3965                  x86pte_t *pteptr;
3929 3966  
3930 3967                  pteptr = x86pte_mapin(mmu_btop(pte_pa),
3931 3968                      (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
3932 3969                  if (mmu.pae_hat)
3933 3970                          *pteptr = 0;
3934 3971                  else
3935 3972                          *(x86pte32_t *)pteptr = 0;
3936 3973                  mmu_tlbflush_entry(addr);
3937 3974                  x86pte_mapout();
3938 3975          }
3939 3976  #endif
3940 3977  
3941 3978          ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0);
3942 3979          if (ht == NULL)
3943 3980                  panic("hat_mempte_release(): invalid address");
3944 3981          ASSERT(ht->ht_level == 0);
3945 3982          HTABLE_DEC(ht->ht_valid_cnt);
3946 3983          htable_release(ht);
3947 3984          XPV_ALLOW_MIGRATE();
3948 3985  }
3949 3986  
3950 3987  /*
3951 3988   * Apply a temporary CPU private mapping to a page. We flush the TLB only
3952 3989   * on this CPU, so this ought to have been called with preemption disabled.
3953 3990   */
3954 3991  void
3955 3992  hat_mempte_remap(
3956 3993          pfn_t           pfn,
3957 3994          caddr_t         addr,
3958 3995          hat_mempte_t    pte_pa,
3959 3996          uint_t          attr,
3960 3997          uint_t          flags)
3961 3998  {
3962 3999          uintptr_t       va = (uintptr_t)addr;
3963 4000          x86pte_t        pte;
3964 4001  
3965 4002          /*
3966 4003           * Remap the given PTE to the new page's PFN. Invalidate only
3967 4004           * on this CPU.
3968 4005           */
3969 4006  #ifdef DEBUG
3970 4007          htable_t        *ht;
3971 4008          uint_t          entry;
3972 4009  
3973 4010          ASSERT(IS_PAGEALIGNED(va));
3974 4011          ASSERT(!IN_VA_HOLE(va));
3975 4012          ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0);
3976 4013          ASSERT(ht != NULL);
3977 4014          ASSERT(ht->ht_level == 0);
3978 4015          ASSERT(ht->ht_valid_cnt > 0);
3979 4016          ASSERT(ht->ht_pfn == mmu_btop(pte_pa));
3980 4017          htable_release(ht);
3981 4018  #endif
3982 4019          XPV_DISALLOW_MIGRATE();
3983 4020          pte = hati_mkpte(pfn, attr, 0, flags);
3984 4021  #ifdef __xpv
3985 4022          if (HYPERVISOR_update_va_mapping(va, pte, UVMF_INVLPG | UVMF_LOCAL))
3986 4023                  panic("HYPERVISOR_update_va_mapping() failed");
3987 4024  #else
3988 4025          {
3989 4026                  x86pte_t *pteptr;
3990 4027  
3991 4028                  pteptr = x86pte_mapin(mmu_btop(pte_pa),
3992 4029                      (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
3993 4030                  if (mmu.pae_hat)
3994 4031                          *(x86pte_t *)pteptr = pte;
3995 4032                  else
3996 4033                          *(x86pte32_t *)pteptr = (x86pte32_t)pte;
3997 4034                  mmu_tlbflush_entry(addr);
3998 4035                  x86pte_mapout();
3999 4036          }
4000 4037  #endif
4001 4038          XPV_ALLOW_MIGRATE();
4002 4039  }
4003 4040  
4004 4041  
4005 4042  
4006 4043  /*
4007 4044   * Hat locking functions
4008 4045   * XXX - these two functions are currently being used by hatstats
4009 4046   *      they can be removed by using a per-as mutex for hatstats.
4010 4047   */
4011 4048  void
4012 4049  hat_enter(hat_t *hat)
4013 4050  {
4014 4051          mutex_enter(&hat->hat_mutex);
4015 4052  }
4016 4053  
4017 4054  void
4018 4055  hat_exit(hat_t *hat)
4019 4056  {
4020 4057          mutex_exit(&hat->hat_mutex);
4021 4058  }
4022 4059  
4023 4060  /*
4024 4061   * HAT part of cpu initialization.
4025 4062   */
4026 4063  void
4027 4064  hat_cpu_online(struct cpu *cpup)
4028 4065  {
4029 4066          if (cpup != CPU) {
4030 4067                  x86pte_cpu_init(cpup);
4031 4068                  hat_vlp_setup(cpup);
4032 4069          }
4033 4070          CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id);
4034 4071  }
4035 4072  
4036 4073  /*
4037 4074   * HAT part of cpu deletion.
4038 4075   * (currently, we only call this after the cpu is safely passivated.)
4039 4076   */
4040 4077  void
4041 4078  hat_cpu_offline(struct cpu *cpup)
4042 4079  {
4043 4080          ASSERT(cpup != CPU);
4044 4081  
4045 4082          CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id);
4046 4083          hat_vlp_teardown(cpup);
4047 4084          x86pte_cpu_fini(cpup);
4048 4085  }
4049 4086  
4050 4087  /*
4051 4088   * Function called after all CPUs are brought online.
4052 4089   * Used to remove low address boot mappings.
4053 4090   */
4054 4091  void
4055 4092  clear_boot_mappings(uintptr_t low, uintptr_t high)
4056 4093  {
4057 4094          uintptr_t vaddr = low;
4058 4095          htable_t *ht = NULL;
4059 4096          level_t level;
4060 4097          uint_t entry;
4061 4098          x86pte_t pte;
4062 4099  
4063 4100          /*
4064 4101           * On 1st CPU we can unload the prom mappings, basically we blow away
4065 4102           * all virtual mappings under _userlimit.
4066 4103           */
4067 4104          while (vaddr < high) {
4068 4105                  pte = htable_walk(kas.a_hat, &ht, &vaddr, high);
4069 4106                  if (ht == NULL)
4070 4107                          break;
4071 4108  
4072 4109                  level = ht->ht_level;
4073 4110                  entry = htable_va2entry(vaddr, ht);
4074 4111                  ASSERT(level <= mmu.max_page_level);
4075 4112                  ASSERT(PTE_ISPAGE(pte, level));
4076 4113  
4077 4114                  /*
4078 4115                   * Unload the mapping from the page tables.
4079 4116                   */
4080 4117                  (void) x86pte_inval(ht, entry, 0, NULL, B_TRUE);
4081 4118                  ASSERT(ht->ht_valid_cnt > 0);
4082 4119                  HTABLE_DEC(ht->ht_valid_cnt);
4083 4120                  PGCNT_DEC(ht->ht_hat, ht->ht_level);
4084 4121  
4085 4122                  vaddr += LEVEL_SIZE(ht->ht_level);
4086 4123          }
4087 4124          if (ht)
4088 4125                  htable_release(ht);
4089 4126  }
4090 4127  
4091 4128  /*
4092 4129   * Atomically update a new translation for a single page.  If the
4093 4130   * currently installed PTE doesn't match the value we expect to find,
4094 4131   * it's not updated and we return the PTE we found.
4095 4132   *
4096 4133   * If activating nosync or NOWRITE and the page was modified we need to sync
4097 4134   * with the page_t. Also sync with page_t if clearing ref/mod bits.
4098 4135   */
4099 4136  static x86pte_t
4100 4137  hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new)
4101 4138  {
4102 4139          page_t          *pp;
4103 4140          uint_t          rm = 0;
4104 4141          x86pte_t        replaced;
4105 4142  
4106 4143          if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC &&
4107 4144              PTE_GET(expected, PT_MOD | PT_REF) &&
4108 4145              (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) ||
4109 4146              !PTE_GET(new, PT_MOD | PT_REF))) {
4110 4147  
4111 4148                  ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level)));
4112 4149                  pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level));
4113 4150                  ASSERT(pp != NULL);
4114 4151                  if (PTE_GET(expected, PT_MOD))
4115 4152                          rm |= P_MOD;
4116 4153                  if (PTE_GET(expected, PT_REF))
4117 4154                          rm |= P_REF;
4118 4155                  PTE_CLR(new, PT_MOD | PT_REF);
4119 4156          }
4120 4157  
4121 4158          replaced = x86pte_update(ht, entry, expected, new);
4122 4159          if (replaced != expected)
4123 4160                  return (replaced);
4124 4161  
4125 4162          if (rm) {
4126 4163                  /*
4127 4164                   * sync to all constituent pages of a large page
4128 4165                   */
4129 4166                  pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level);
4130 4167                  ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
4131 4168                  while (pgcnt-- > 0) {
4132 4169                          /*
4133 4170                           * hat_page_demote() can't decrease
4134 4171                           * pszc below this mapping size
4135 4172                           * since large mapping existed after we
4136 4173                           * took mlist lock.
4137 4174                           */
4138 4175                          ASSERT(pp->p_szc >= ht->ht_level);
4139 4176                          hat_page_setattr(pp, rm);
4140 4177                          ++pp;
4141 4178                  }
4142 4179          }
4143 4180  
4144 4181          return (0);
4145 4182  }
4146 4183  
4147 4184  /* ARGSUSED */
4148 4185  void
4149 4186  hat_join_srd(struct hat *hat, vnode_t *evp)
4150 4187  {
4151 4188  }
4152 4189  
4153 4190  /* ARGSUSED */
4154 4191  hat_region_cookie_t
4155 4192  hat_join_region(struct hat *hat,
4156 4193      caddr_t r_saddr,
4157 4194      size_t r_size,
4158 4195      void *r_obj,
4159 4196      u_offset_t r_objoff,
4160 4197      uchar_t r_perm,
4161 4198      uchar_t r_pgszc,
4162 4199      hat_rgn_cb_func_t r_cb_function,
4163 4200      uint_t flags)
4164 4201  {
4165 4202          panic("No shared region support on x86");
4166 4203          return (HAT_INVALID_REGION_COOKIE);
4167 4204  }
4168 4205  
4169 4206  /* ARGSUSED */
4170 4207  void
4171 4208  hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags)
4172 4209  {
4173 4210          panic("No shared region support on x86");
4174 4211  }
4175 4212  
4176 4213  /* ARGSUSED */
4177 4214  void
4178 4215  hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie)
4179 4216  {
4180 4217          panic("No shared region support on x86");
4181 4218  }
4182 4219  
4183 4220  
4184 4221  /*
4185 4222   * Kernel Physical Mapping (kpm) facility
4186 4223   *
4187 4224   * Most of the routines needed to support segkpm are almost no-ops on the
4188 4225   * x86 platform.  We map in the entire segment when it is created and leave
4189 4226   * it mapped in, so there is no additional work required to set up and tear
4190 4227   * down individual mappings.  All of these routines were created to support
4191 4228   * SPARC platforms that have to avoid aliasing in their virtually indexed
4192 4229   * caches.
4193 4230   *
4194 4231   * Most of the routines have sanity checks in them (e.g. verifying that the
4195 4232   * passed-in page is locked).  We don't actually care about most of these
4196 4233   * checks on x86, but we leave them in place to identify problems in the
4197 4234   * upper levels.
4198 4235   */
4199 4236  
4200 4237  /*
4201 4238   * Map in a locked page and return the vaddr.
4202 4239   */
4203 4240  /*ARGSUSED*/
4204 4241  caddr_t
4205 4242  hat_kpm_mapin(struct page *pp, struct kpme *kpme)
4206 4243  {
4207 4244          caddr_t         vaddr;
4208 4245  
4209 4246  #ifdef DEBUG
4210 4247          if (kpm_enable == 0) {
4211 4248                  cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n");
4212 4249                  return ((caddr_t)NULL);
4213 4250          }
4214 4251  
4215 4252          if (pp == NULL || PAGE_LOCKED(pp) == 0) {
4216 4253                  cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n");
4217 4254                  return ((caddr_t)NULL);
4218 4255          }
4219 4256  #endif
4220 4257  
4221 4258          vaddr = hat_kpm_page2va(pp, 1);
4222 4259  
4223 4260          return (vaddr);
4224 4261  }
4225 4262  
4226 4263  /*
4227 4264   * Mapout a locked page.
4228 4265   */
4229 4266  /*ARGSUSED*/
4230 4267  void
4231 4268  hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
4232 4269  {
4233 4270  #ifdef DEBUG
4234 4271          if (kpm_enable == 0) {
4235 4272                  cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n");
4236 4273                  return;
4237 4274          }
4238 4275  
4239 4276          if (IS_KPM_ADDR(vaddr) == 0) {
4240 4277                  cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n");
4241 4278                  return;
4242 4279          }
4243 4280  
4244 4281          if (pp == NULL || PAGE_LOCKED(pp) == 0) {
4245 4282                  cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n");
4246 4283                  return;
4247 4284          }
4248 4285  #endif
4249 4286  }
4250 4287  
4251 4288  /*
4252 4289   * hat_kpm_mapin_pfn is used to obtain a kpm mapping for physical
4253 4290   * memory addresses that are not described by a page_t.  It can
4254 4291   * also be used for normal pages that are not locked, but beware
4255 4292   * this is dangerous - no locking is performed, so the identity of
4256 4293   * the page could change.  hat_kpm_mapin_pfn is not supported when
4257 4294   * vac_colors > 1, because the chosen va depends on the page identity,
4258 4295   * which could change.
4259 4296   * The caller must only pass pfn's for valid physical addresses; violation
4260 4297   * of this rule will cause panic.
4261 4298   */
4262 4299  caddr_t
4263 4300  hat_kpm_mapin_pfn(pfn_t pfn)
4264 4301  {
4265 4302          caddr_t paddr, vaddr;
4266 4303  
4267 4304          if (kpm_enable == 0)
4268 4305                  return ((caddr_t)NULL);
4269 4306  
4270 4307          paddr = (caddr_t)ptob(pfn);
4271 4308          vaddr = (uintptr_t)kpm_vbase + paddr;
4272 4309  
4273 4310          return ((caddr_t)vaddr);
4274 4311  }
4275 4312  
4276 4313  /*ARGSUSED*/
4277 4314  void
4278 4315  hat_kpm_mapout_pfn(pfn_t pfn)
4279 4316  {
4280 4317          /* empty */
4281 4318  }
4282 4319  
4283 4320  /*
4284 4321   * Return the kpm virtual address for a specific pfn
4285 4322   */
4286 4323  caddr_t
4287 4324  hat_kpm_pfn2va(pfn_t pfn)
4288 4325  {
4289 4326          uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn);
4290 4327  
4291 4328          ASSERT(!pfn_is_foreign(pfn));
4292 4329          return ((caddr_t)vaddr);
4293 4330  }
4294 4331  
4295 4332  /*
4296 4333   * Return the kpm virtual address for the page at pp.
4297 4334   */
4298 4335  /*ARGSUSED*/
4299 4336  caddr_t
4300 4337  hat_kpm_page2va(struct page *pp, int checkswap)
4301 4338  {
4302 4339          return (hat_kpm_pfn2va(pp->p_pagenum));
4303 4340  }
4304 4341  
4305 4342  /*
4306 4343   * Return the page frame number for the kpm virtual address vaddr.
4307 4344   */
4308 4345  pfn_t
4309 4346  hat_kpm_va2pfn(caddr_t vaddr)
4310 4347  {
4311 4348          pfn_t           pfn;
4312 4349  
4313 4350          ASSERT(IS_KPM_ADDR(vaddr));
4314 4351  
4315 4352          pfn = (pfn_t)btop(vaddr - kpm_vbase);
4316 4353  
4317 4354          return (pfn);
4318 4355  }
4319 4356  
4320 4357  
4321 4358  /*
4322 4359   * Return the page for the kpm virtual address vaddr.
4323 4360   */
4324 4361  page_t *
4325 4362  hat_kpm_vaddr2page(caddr_t vaddr)
4326 4363  {
4327 4364          pfn_t           pfn;
4328 4365  
4329 4366          ASSERT(IS_KPM_ADDR(vaddr));
4330 4367  
4331 4368          pfn = hat_kpm_va2pfn(vaddr);
4332 4369  
4333 4370          return (page_numtopp_nolock(pfn));
4334 4371  }
4335 4372  
4336 4373  /*
4337 4374   * hat_kpm_fault is called from segkpm_fault when we take a page fault on a
4338 4375   * KPM page.  This should never happen on x86
4339 4376   */
4340 4377  int
4341 4378  hat_kpm_fault(hat_t *hat, caddr_t vaddr)
4342 4379  {
4343 4380          panic("pagefault in seg_kpm.  hat: 0x%p  vaddr: 0x%p",
4344 4381              (void *)hat, (void *)vaddr);
4345 4382  
4346 4383          return (0);
4347 4384  }
4348 4385  
4349 4386  /*ARGSUSED*/
4350 4387  void
4351 4388  hat_kpm_mseghash_clear(int nentries)
4352 4389  {}
4353 4390  
4354 4391  /*ARGSUSED*/
4355 4392  void
4356 4393  hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
4357 4394  {}
4358 4395  
4359 4396  #ifndef __xpv
4360 4397  void
4361 4398  hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs,
4362 4399          offset_t kpm_pages_off)
4363 4400  {
4364 4401          _NOTE(ARGUNUSED(nkpmpgs, kpm_pages_off));
4365 4402          pfn_t base, end;
4366 4403  
4367 4404          /*
4368 4405           * kphysm_add_memory_dynamic() does not set nkpmpgs
4369 4406           * when page_t memory is externally allocated.  That
4370 4407           * code must properly calculate nkpmpgs in all cases
4371 4408           * if nkpmpgs needs to be used at some point.
4372 4409           */
4373 4410  
4374 4411          /*
4375 4412           * The meta (page_t) pages for dynamically added memory are allocated
4376 4413           * either from the incoming memory itself or from existing memory.
4377 4414           * In the former case the base of the incoming pages will be different
4378 4415           * than the base of the dynamic segment so call memseg_get_start() to
4379 4416           * get the actual base of the incoming memory for each case.
4380 4417           */
4381 4418  
4382 4419          base = memseg_get_start(msp);
4383 4420          end = msp->pages_end;
4384 4421  
4385 4422          hat_devload(kas.a_hat, kpm_vbase + mmu_ptob(base),
4386 4423              mmu_ptob(end - base), base, PROT_READ | PROT_WRITE,
4387 4424              HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST);
4388 4425  }
4389 4426  
4390 4427  void
4391 4428  hat_kpm_addmem_mseg_insert(struct memseg *msp)
4392 4429  {
4393 4430          _NOTE(ARGUNUSED(msp));
4394 4431  }
4395 4432  
4396 4433  void
4397 4434  hat_kpm_addmem_memsegs_update(struct memseg *msp)
4398 4435  {
4399 4436          _NOTE(ARGUNUSED(msp));
4400 4437  }
4401 4438  
4402 4439  /*
4403 4440   * Return end of metadata for an already setup memseg.
4404 4441   * X86 platforms don't need per-page meta data to support kpm.
4405 4442   */
4406 4443  caddr_t
4407 4444  hat_kpm_mseg_reuse(struct memseg *msp)
4408 4445  {
4409 4446          return ((caddr_t)msp->epages);
4410 4447  }
4411 4448  
4412 4449  void
4413 4450  hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp)
4414 4451  {
4415 4452          _NOTE(ARGUNUSED(msp, mspp));
4416 4453          ASSERT(0);
4417 4454  }
4418 4455  
4419 4456  void
4420 4457  hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp,
4421 4458          struct memseg *lo, struct memseg *mid, struct memseg *hi)
4422 4459  {
4423 4460          _NOTE(ARGUNUSED(msp, mspp, lo, mid, hi));
4424 4461          ASSERT(0);
4425 4462  }
4426 4463  
4427 4464  /*
4428 4465   * Walk the memsegs chain, applying func to each memseg span.
4429 4466   */
4430 4467  void
4431 4468  hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg)
4432 4469  {
4433 4470          pfn_t   pbase, pend;
4434 4471          void    *base;
4435 4472          size_t  size;
4436 4473          struct memseg *msp;
4437 4474  
4438 4475          for (msp = memsegs; msp; msp = msp->next) {
4439 4476                  pbase = msp->pages_base;
4440 4477                  pend = msp->pages_end;
4441 4478                  base = ptob(pbase) + kpm_vbase;
4442 4479                  size = ptob(pend - pbase);
4443 4480                  func(arg, base, size);
4444 4481          }
4445 4482  }
4446 4483  
4447 4484  #else   /* __xpv */
4448 4485  
4449 4486  /*
4450 4487   * There are specific Hypervisor calls to establish and remove mappings
4451 4488   * to grant table references and the privcmd driver. We have to ensure
4452 4489   * that a page table actually exists.
4453 4490   */
4454 4491  void
4455 4492  hat_prepare_mapping(hat_t *hat, caddr_t addr, uint64_t *pte_ma)
4456 4493  {
4457 4494          maddr_t base_ma;
4458 4495          htable_t *ht;
4459 4496          uint_t entry;
4460 4497  
4461 4498          ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
4462 4499          XPV_DISALLOW_MIGRATE();
4463 4500          ht = htable_create(hat, (uintptr_t)addr, 0, NULL);
4464 4501  
4465 4502          /*
4466 4503           * if an address for pte_ma is passed in, return the MA of the pte
4467 4504           * for this specific address.  This address is only valid as long
4468 4505           * as the htable stays locked.
4469 4506           */
4470 4507          if (pte_ma != NULL) {
4471 4508                  entry = htable_va2entry((uintptr_t)addr, ht);
4472 4509                  base_ma = pa_to_ma(ptob(ht->ht_pfn));
4473 4510                  *pte_ma = base_ma + (entry << mmu.pte_size_shift);
4474 4511          }
4475 4512          XPV_ALLOW_MIGRATE();
4476 4513  }
4477 4514  
4478 4515  void
4479 4516  hat_release_mapping(hat_t *hat, caddr_t addr)
4480 4517  {
4481 4518          htable_t *ht;
4482 4519  
4483 4520          ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
4484 4521          XPV_DISALLOW_MIGRATE();
4485 4522          ht = htable_lookup(hat, (uintptr_t)addr, 0);
4486 4523          ASSERT(ht != NULL);
4487 4524          ASSERT(ht->ht_busy >= 2);
4488 4525          htable_release(ht);
4489 4526          htable_release(ht);
4490 4527          XPV_ALLOW_MIGRATE();
4491 4528  }
4492 4529  #endif  /* __xpv */
  
    | 
      ↓ open down ↓ | 
    1008 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX