Print this page
    
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/i86pc/vm/hat_i86.c
          +++ new/usr/src/uts/i86pc/vm/hat_i86.c
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  
    | 
      ↓ open down ↓ | 
    19 lines elided | 
    
      ↑ open up ↑ | 
  
  20   20   */
  21   21  /*
  22   22   * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
  23   23   */
  24   24  /*
  25   25   * Copyright (c) 2010, Intel Corporation.
  26   26   * All rights reserved.
  27   27   */
  28   28  /*
  29   29   * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
  30      - * Copyright 2014 Joyent, Inc.  All rights reserved.
  31   30   * Copyright (c) 2014, 2015 by Delphix. All rights reserved.
       31 + * Copyright 2014 Joyent, Inc.  All rights reserved.
  32   32   */
  33   33  
  34   34  /*
  35   35   * VM - Hardware Address Translation management for i386 and amd64
  36   36   *
  37   37   * Implementation of the interfaces described in <common/vm/hat.h>
  38   38   *
  39   39   * Nearly all the details of how the hardware is managed should not be
  40   40   * visible outside this layer except for misc. machine specific functions
  41   41   * that work in conjunction with this code.
  42   42   *
  43   43   * Routines used only inside of i86pc/vm start with hati_ for HAT Internal.
  44   44   */
  45   45  
  46   46  #include <sys/machparam.h>
  47   47  #include <sys/machsystm.h>
  48   48  #include <sys/mman.h>
  49   49  #include <sys/types.h>
  50   50  #include <sys/systm.h>
  51   51  #include <sys/cpuvar.h>
  52   52  #include <sys/thread.h>
  53   53  #include <sys/proc.h>
  54   54  #include <sys/cpu.h>
  55   55  #include <sys/kmem.h>
  56   56  #include <sys/disp.h>
  57   57  #include <sys/shm.h>
  58   58  #include <sys/sysmacros.h>
  59   59  #include <sys/machparam.h>
  60   60  #include <sys/vmem.h>
  61   61  #include <sys/vmsystm.h>
  62   62  #include <sys/promif.h>
  63   63  #include <sys/var.h>
  64   64  #include <sys/x86_archext.h>
  65   65  #include <sys/atomic.h>
  66   66  #include <sys/bitmap.h>
  67   67  #include <sys/controlregs.h>
  68   68  #include <sys/bootconf.h>
  69   69  #include <sys/bootsvcs.h>
  70   70  #include <sys/bootinfo.h>
  71   71  #include <sys/archsystm.h>
  72   72  
  73   73  #include <vm/seg_kmem.h>
  74   74  #include <vm/hat_i86.h>
  75   75  #include <vm/as.h>
  76   76  #include <vm/seg.h>
  77   77  #include <vm/page.h>
  78   78  #include <vm/seg_kp.h>
  79   79  #include <vm/seg_kpm.h>
  80   80  #include <vm/vm_dep.h>
  81   81  #ifdef __xpv
  82   82  #include <sys/hypervisor.h>
  83   83  #endif
  84   84  #include <vm/kboot_mmu.h>
  85   85  #include <vm/seg_spt.h>
  86   86  
  87   87  #include <sys/cmn_err.h>
  88   88  
  89   89  /*
  90   90   * Basic parameters for hat operation.
  91   91   */
  92   92  struct hat_mmu_info mmu;
  93   93  
  94   94  /*
  95   95   * The page that is the kernel's top level pagetable.
  96   96   *
  97   97   * For 32 bit PAE support on i86pc, the kernel hat will use the 1st 4 entries
  98   98   * on this 4K page for its top level page table. The remaining groups of
  99   99   * 4 entries are used for per processor copies of user VLP pagetables for
 100  100   * running threads.  See hat_switch() and reload_pae32() for details.
 101  101   *
 102  102   * vlp_page[0..3] - level==2 PTEs for kernel HAT
 103  103   * vlp_page[4..7] - level==2 PTEs for user thread on cpu 0
 104  104   * vlp_page[8..11]  - level==2 PTE for user thread on cpu 1
 105  105   * etc...
 106  106   */
 107  107  static x86pte_t *vlp_page;
 108  108  
 109  109  /*
 110  110   * forward declaration of internal utility routines
 111  111   */
 112  112  static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected,
 113  113          x86pte_t new);
 114  114  
 115  115  /*
 116  116   * The kernel address space exists in all HATs. To implement this the
 117  117   * kernel reserves a fixed number of entries in the topmost level(s) of page
 118  118   * tables. The values are setup during startup and then copied to every user
 119  119   * hat created by hat_alloc(). This means that kernelbase must be:
 120  120   *
 121  121   *        4Meg aligned for 32 bit kernels
 122  122   *      512Gig aligned for x86_64 64 bit kernel
 123  123   *
 124  124   * The hat_kernel_range_ts describe what needs to be copied from kernel hat
 125  125   * to each user hat.
 126  126   */
 127  127  typedef struct hat_kernel_range {
 128  128          level_t         hkr_level;
 129  129          uintptr_t       hkr_start_va;
 130  130          uintptr_t       hkr_end_va;     /* zero means to end of memory */
 131  131  } hat_kernel_range_t;
 132  132  #define NUM_KERNEL_RANGE 2
 133  133  static hat_kernel_range_t kernel_ranges[NUM_KERNEL_RANGE];
 134  134  static int num_kernel_ranges;
 135  135  
 136  136  uint_t use_boot_reserve = 1;    /* cleared after early boot process */
 137  137  uint_t can_steal_post_boot = 0; /* set late in boot to enable stealing */
 138  138  
 139  139  /*
 140  140   * enable_1gpg: controls 1g page support for user applications.
 141  141   * By default, 1g pages are exported to user applications. enable_1gpg can
 142  142   * be set to 0 to not export.
 143  143   */
 144  144  int     enable_1gpg = 1;
 145  145  
 146  146  /*
 147  147   * AMD shanghai processors provide better management of 1gb ptes in its tlb.
 148  148   * By default, 1g page support will be disabled for pre-shanghai AMD
 149  149   * processors that don't have optimal tlb support for the 1g page size.
 150  150   * chk_optimal_1gtlb can be set to 0 to force 1g page support on sub-optimal
 151  151   * processors.
 152  152   */
 153  153  int     chk_optimal_1gtlb = 1;
 154  154  
 155  155  
 156  156  #ifdef DEBUG
 157  157  uint_t  map1gcnt;
 158  158  #endif
 159  159  
 160  160  
 161  161  /*
 162  162   * A cpuset for all cpus. This is used for kernel address cross calls, since
 163  163   * the kernel addresses apply to all cpus.
 164  164   */
 165  165  cpuset_t khat_cpuset;
 166  166  
 167  167  /*
 168  168   * management stuff for hat structures
 169  169   */
 170  170  kmutex_t        hat_list_lock;
 171  171  kcondvar_t      hat_list_cv;
 172  172  kmem_cache_t    *hat_cache;
 173  173  kmem_cache_t    *hat_hash_cache;
 174  174  kmem_cache_t    *vlp_hash_cache;
 175  175  
 176  176  /*
 177  177   * Simple statistics
 178  178   */
 179  179  struct hatstats hatstat;
 180  180  
 181  181  /*
 182  182   * Some earlier hypervisor versions do not emulate cmpxchg of PTEs
 183  183   * correctly.  For such hypervisors we must set PT_USER for kernel
 184  184   * entries ourselves (normally the emulation would set PT_USER for
 185  185   * kernel entries and PT_USER|PT_GLOBAL for user entries).  pt_kern is
 186  186   * thus set appropriately.  Note that dboot/kbm is OK, as only the full
 187  187   * HAT uses cmpxchg() and the other paths (hypercall etc.) were never
 188  188   * incorrect.
 189  189   */
 190  190  int pt_kern;
 191  191  
 192  192  /*
 193  193   * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's.
 194  194   */
 195  195  extern void atomic_orb(uchar_t *addr, uchar_t val);
 196  196  extern void atomic_andb(uchar_t *addr, uchar_t val);
 197  197  
 198  198  #ifndef __xpv
 199  199  extern pfn_t memseg_get_start(struct memseg *);
 200  200  #endif
 201  201  
 202  202  #define PP_GETRM(pp, rmmask)    (pp->p_nrm & rmmask)
 203  203  #define PP_ISMOD(pp)            PP_GETRM(pp, P_MOD)
 204  204  #define PP_ISREF(pp)            PP_GETRM(pp, P_REF)
 205  205  #define PP_ISRO(pp)             PP_GETRM(pp, P_RO)
 206  206  
 207  207  #define PP_SETRM(pp, rm)        atomic_orb(&(pp->p_nrm), rm)
 208  208  #define PP_SETMOD(pp)           PP_SETRM(pp, P_MOD)
 209  209  #define PP_SETREF(pp)           PP_SETRM(pp, P_REF)
 210  210  #define PP_SETRO(pp)            PP_SETRM(pp, P_RO)
 211  211  
 212  212  #define PP_CLRRM(pp, rm)        atomic_andb(&(pp->p_nrm), ~(rm))
 213  213  #define PP_CLRMOD(pp)           PP_CLRRM(pp, P_MOD)
 214  214  #define PP_CLRREF(pp)           PP_CLRRM(pp, P_REF)
 215  215  #define PP_CLRRO(pp)            PP_CLRRM(pp, P_RO)
 216  216  #define PP_CLRALL(pp)           PP_CLRRM(pp, P_MOD | P_REF | P_RO)
 217  217  
 218  218  /*
 219  219   * kmem cache constructor for struct hat
 220  220   */
 221  221  /*ARGSUSED*/
 222  222  static int
 223  223  hati_constructor(void *buf, void *handle, int kmflags)
 224  224  {
 225  225          hat_t   *hat = buf;
 226  226  
 227  227          mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
 228  228          bzero(hat->hat_pages_mapped,
 229  229              sizeof (pgcnt_t) * (mmu.max_page_level + 1));
 230  230          hat->hat_ism_pgcnt = 0;
 231  231          hat->hat_stats = 0;
 232  232          hat->hat_flags = 0;
 233  233          CPUSET_ZERO(hat->hat_cpus);
 234  234          hat->hat_htable = NULL;
 235  235          hat->hat_ht_hash = NULL;
 236  236          return (0);
 237  237  }
 238  238  
 239  239  /*
 240  240   * Allocate a hat structure for as. We also create the top level
 241  241   * htable and initialize it to contain the kernel hat entries.
 242  242   */
 243  243  hat_t *
 244  244  hat_alloc(struct as *as)
 245  245  {
 246  246          hat_t                   *hat;
 247  247          htable_t                *ht;    /* top level htable */
 248  248          uint_t                  use_vlp;
 249  249          uint_t                  r;
 250  250          hat_kernel_range_t      *rp;
 251  251          uintptr_t               va;
 252  252          uintptr_t               eva;
 253  253          uint_t                  start;
 254  254          uint_t                  cnt;
 255  255          htable_t                *src;
 256  256  
 257  257          /*
 258  258           * Once we start creating user process HATs we can enable
 259  259           * the htable_steal() code.
 260  260           */
 261  261          if (can_steal_post_boot == 0)
 262  262                  can_steal_post_boot = 1;
 263  263  
 264  264          ASSERT(AS_WRITE_HELD(as));
 265  265          hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
 266  266          hat->hat_as = as;
 267  267          mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
 268  268          ASSERT(hat->hat_flags == 0);
 269  269  
 270  270  #if defined(__xpv)
 271  271          /*
 272  272           * No VLP stuff on the hypervisor due to the 64-bit split top level
 273  273           * page tables.  On 32-bit it's not needed as the hypervisor takes
 274  274           * care of copying the top level PTEs to a below 4Gig page.
 275  275           */
 276  276          use_vlp = 0;
 277  277  #else   /* __xpv */
 278  278          /* 32 bit processes uses a VLP style hat when running with PAE */
 279  279  #if defined(__amd64)
 280  280          use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32);
 281  281  #elif defined(__i386)
 282  282          use_vlp = mmu.pae_hat;
 283  283  #endif
 284  284  #endif  /* __xpv */
 285  285          if (use_vlp) {
 286  286                  hat->hat_flags = HAT_VLP;
 287  287                  bzero(hat->hat_vlp_ptes, VLP_SIZE);
 288  288          }
 289  289  
 290  290          /*
 291  291           * Allocate the htable hash
 292  292           */
 293  293          if ((hat->hat_flags & HAT_VLP)) {
 294  294                  hat->hat_num_hash = mmu.vlp_hash_cnt;
 295  295                  hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP);
 296  296          } else {
 297  297                  hat->hat_num_hash = mmu.hash_cnt;
 298  298                  hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP);
 299  299          }
 300  300          bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *));
 301  301  
 302  302          /*
 303  303           * Initialize Kernel HAT entries at the top of the top level page
 304  304           * tables for the new hat.
 305  305           */
 306  306          hat->hat_htable = NULL;
 307  307          hat->hat_ht_cached = NULL;
 308  308          XPV_DISALLOW_MIGRATE();
 309  309          ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL);
 310  310          hat->hat_htable = ht;
 311  311  
 312  312  #if defined(__amd64)
 313  313          if (hat->hat_flags & HAT_VLP)
 314  314                  goto init_done;
 315  315  #endif
 316  316  
 317  317          for (r = 0; r < num_kernel_ranges; ++r) {
 318  318                  rp = &kernel_ranges[r];
 319  319                  for (va = rp->hkr_start_va; va != rp->hkr_end_va;
 320  320                      va += cnt * LEVEL_SIZE(rp->hkr_level)) {
 321  321  
 322  322                          if (rp->hkr_level == TOP_LEVEL(hat))
 323  323                                  ht = hat->hat_htable;
 324  324                          else
 325  325                                  ht = htable_create(hat, va, rp->hkr_level,
 326  326                                      NULL);
 327  327  
 328  328                          start = htable_va2entry(va, ht);
 329  329                          cnt = HTABLE_NUM_PTES(ht) - start;
 330  330                          eva = va +
 331  331                              ((uintptr_t)cnt << LEVEL_SHIFT(rp->hkr_level));
 332  332                          if (rp->hkr_end_va != 0 &&
 333  333                              (eva > rp->hkr_end_va || eva == 0))
 334  334                                  cnt = htable_va2entry(rp->hkr_end_va, ht) -
 335  335                                      start;
 336  336  
 337  337  #if defined(__i386) && !defined(__xpv)
 338  338                          if (ht->ht_flags & HTABLE_VLP) {
 339  339                                  bcopy(&vlp_page[start],
 340  340                                      &hat->hat_vlp_ptes[start],
 341  341                                      cnt * sizeof (x86pte_t));
 342  342                                  continue;
 343  343                          }
 344  344  #endif
 345  345                          src = htable_lookup(kas.a_hat, va, rp->hkr_level);
 346  346                          ASSERT(src != NULL);
 347  347                          x86pte_copy(src, ht, start, cnt);
 348  348                          htable_release(src);
 349  349                  }
 350  350          }
 351  351  
 352  352  init_done:
 353  353  
 354  354  #if defined(__xpv)
 355  355          /*
 356  356           * Pin top level page tables after initializing them
 357  357           */
 358  358          xen_pin(hat->hat_htable->ht_pfn, mmu.max_level);
 359  359  #if defined(__amd64)
 360  360          xen_pin(hat->hat_user_ptable, mmu.max_level);
 361  361  #endif
 362  362  #endif
 363  363          XPV_ALLOW_MIGRATE();
 364  364  
 365  365          /*
 366  366           * Put it at the start of the global list of all hats (used by stealing)
 367  367           *
 368  368           * kas.a_hat is not in the list but is instead used to find the
 369  369           * first and last items in the list.
 370  370           *
 371  371           * - kas.a_hat->hat_next points to the start of the user hats.
 372  372           *   The list ends where hat->hat_next == NULL
 373  373           *
 374  374           * - kas.a_hat->hat_prev points to the last of the user hats.
 375  375           *   The list begins where hat->hat_prev == NULL
 376  376           */
 377  377          mutex_enter(&hat_list_lock);
 378  378          hat->hat_prev = NULL;
 379  379          hat->hat_next = kas.a_hat->hat_next;
 380  380          if (hat->hat_next)
 381  381                  hat->hat_next->hat_prev = hat;
 382  382          else
 383  383                  kas.a_hat->hat_prev = hat;
 384  384          kas.a_hat->hat_next = hat;
 385  385          mutex_exit(&hat_list_lock);
 386  386  
 387  387          return (hat);
 388  388  }
 389  389  
 390  390  /*
 391  391   * process has finished executing but as has not been cleaned up yet.
 392  392   */
 393  393  /*ARGSUSED*/
 394  394  void
 395  395  hat_free_start(hat_t *hat)
 396  396  {
 397  397          ASSERT(AS_WRITE_HELD(hat->hat_as));
 398  398  
 399  399          /*
 400  400           * If the hat is currently a stealing victim, wait for the stealing
 401  401           * to finish.  Once we mark it as HAT_FREEING, htable_steal()
 402  402           * won't look at its pagetables anymore.
 403  403           */
 404  404          mutex_enter(&hat_list_lock);
 405  405          while (hat->hat_flags & HAT_VICTIM)
 406  406                  cv_wait(&hat_list_cv, &hat_list_lock);
 407  407          hat->hat_flags |= HAT_FREEING;
 408  408          mutex_exit(&hat_list_lock);
 409  409  }
 410  410  
 411  411  /*
 412  412   * An address space is being destroyed, so we destroy the associated hat.
 413  413   */
 414  414  void
 415  415  hat_free_end(hat_t *hat)
 416  416  {
 417  417          kmem_cache_t *cache;
 418  418  
 419  419          ASSERT(hat->hat_flags & HAT_FREEING);
 420  420  
 421  421          /*
 422  422           * must not be running on the given hat
 423  423           */
 424  424          ASSERT(CPU->cpu_current_hat != hat);
 425  425  
 426  426          /*
 427  427           * Remove it from the list of HATs
 428  428           */
 429  429          mutex_enter(&hat_list_lock);
 430  430          if (hat->hat_prev)
 431  431                  hat->hat_prev->hat_next = hat->hat_next;
 432  432          else
 433  433                  kas.a_hat->hat_next = hat->hat_next;
 434  434          if (hat->hat_next)
 435  435                  hat->hat_next->hat_prev = hat->hat_prev;
 436  436          else
 437  437                  kas.a_hat->hat_prev = hat->hat_prev;
 438  438          mutex_exit(&hat_list_lock);
 439  439          hat->hat_next = hat->hat_prev = NULL;
 440  440  
 441  441  #if defined(__xpv)
 442  442          /*
 443  443           * On the hypervisor, unpin top level page table(s)
 444  444           */
 445  445          xen_unpin(hat->hat_htable->ht_pfn);
 446  446  #if defined(__amd64)
 447  447          xen_unpin(hat->hat_user_ptable);
 448  448  #endif
 449  449  #endif
 450  450  
 451  451          /*
 452  452           * Make a pass through the htables freeing them all up.
 453  453           */
 454  454          htable_purge_hat(hat);
 455  455  
 456  456          /*
 457  457           * Decide which kmem cache the hash table came from, then free it.
 458  458           */
 459  459          if (hat->hat_flags & HAT_VLP)
 460  460                  cache = vlp_hash_cache;
 461  461          else
 462  462                  cache = hat_hash_cache;
 463  463          kmem_cache_free(cache, hat->hat_ht_hash);
 464  464          hat->hat_ht_hash = NULL;
 465  465  
 466  466          hat->hat_flags = 0;
 467  467          kmem_cache_free(hat_cache, hat);
 468  468  }
 469  469  
 470  470  /*
 471  471   * round kernelbase down to a supported value to use for _userlimit
 472  472   *
 473  473   * userlimit must be aligned down to an entry in the top level htable.
 474  474   * The one exception is for 32 bit HAT's running PAE.
 475  475   */
 476  476  uintptr_t
 477  477  hat_kernelbase(uintptr_t va)
 478  478  {
 479  479  #if defined(__i386)
 480  480          va &= LEVEL_MASK(1);
 481  481  #endif
 482  482          if (IN_VA_HOLE(va))
 483  483                  panic("_userlimit %p will fall in VA hole\n", (void *)va);
 484  484          return (va);
 485  485  }
 486  486  
 487  487  /*
 488  488   *
 489  489   */
 490  490  static void
 491  491  set_max_page_level()
 492  492  {
 493  493          level_t lvl;
 494  494  
 495  495          if (!kbm_largepage_support) {
 496  496                  lvl = 0;
 497  497          } else {
 498  498                  if (is_x86_feature(x86_featureset, X86FSET_1GPG)) {
 499  499                          lvl = 2;
 500  500                          if (chk_optimal_1gtlb &&
 501  501                              cpuid_opteron_erratum(CPU, 6671130)) {
 502  502                                  lvl = 1;
 503  503                          }
 504  504                          if (plat_mnode_xcheck(LEVEL_SIZE(2) >>
 505  505                              LEVEL_SHIFT(0))) {
 506  506                                  lvl = 1;
 507  507                          }
 508  508                  } else {
 509  509                          lvl = 1;
 510  510                  }
 511  511          }
 512  512          mmu.max_page_level = lvl;
 513  513  
 514  514          if ((lvl == 2) && (enable_1gpg == 0))
 515  515                  mmu.umax_page_level = 1;
 516  516          else
 517  517                  mmu.umax_page_level = lvl;
 518  518  }
 519  519  
 520  520  /*
 521  521   * Initialize hat data structures based on processor MMU information.
 522  522   */
 523  523  void
 524  524  mmu_init(void)
 525  525  {
 526  526          uint_t max_htables;
 527  527          uint_t pa_bits;
 528  528          uint_t va_bits;
 529  529          int i;
 530  530  
 531  531          /*
 532  532           * If CPU enabled the page table global bit, use it for the kernel
 533  533           * This is bit 7 in CR4 (PGE - Page Global Enable).
 534  534           */
 535  535          if (is_x86_feature(x86_featureset, X86FSET_PGE) &&
 536  536              (getcr4() & CR4_PGE) != 0)
 537  537                  mmu.pt_global = PT_GLOBAL;
 538  538  
 539  539          /*
 540  540           * Detect NX and PAE usage.
 541  541           */
 542  542          mmu.pae_hat = kbm_pae_support;
 543  543          if (kbm_nx_support)
 544  544                  mmu.pt_nx = PT_NX;
 545  545          else
 546  546                  mmu.pt_nx = 0;
 547  547  
 548  548          /*
 549  549           * Use CPU info to set various MMU parameters
 550  550           */
 551  551          cpuid_get_addrsize(CPU, &pa_bits, &va_bits);
 552  552  
 553  553          if (va_bits < sizeof (void *) * NBBY) {
 554  554                  mmu.hole_start = (1ul << (va_bits - 1));
 555  555                  mmu.hole_end = 0ul - mmu.hole_start - 1;
 556  556          } else {
 557  557                  mmu.hole_end = 0;
 558  558                  mmu.hole_start = mmu.hole_end - 1;
 559  559          }
 560  560  #if defined(OPTERON_ERRATUM_121)
 561  561          /*
 562  562           * If erratum 121 has already been detected at this time, hole_start
 563  563           * contains the value to be subtracted from mmu.hole_start.
 564  564           */
 565  565          ASSERT(hole_start == 0 || opteron_erratum_121 != 0);
 566  566          hole_start = mmu.hole_start - hole_start;
 567  567  #else
 568  568          hole_start = mmu.hole_start;
 569  569  #endif
 570  570          hole_end = mmu.hole_end;
 571  571  
 572  572          mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1);
 573  573          if (mmu.pae_hat == 0 && pa_bits > 32)
 574  574                  mmu.highest_pfn = PFN_4G - 1;
 575  575  
 576  576          if (mmu.pae_hat) {
 577  577                  mmu.pte_size = 8;       /* 8 byte PTEs */
 578  578                  mmu.pte_size_shift = 3;
 579  579          } else {
 580  580                  mmu.pte_size = 4;       /* 4 byte PTEs */
 581  581                  mmu.pte_size_shift = 2;
 582  582          }
 583  583  
 584  584          if (mmu.pae_hat && !is_x86_feature(x86_featureset, X86FSET_PAE))
 585  585                  panic("Processor does not support PAE");
 586  586  
 587  587          if (!is_x86_feature(x86_featureset, X86FSET_CX8))
 588  588                  panic("Processor does not support cmpxchg8b instruction");
 589  589  
 590  590  #if defined(__amd64)
 591  591  
 592  592          mmu.num_level = 4;
 593  593          mmu.max_level = 3;
 594  594          mmu.ptes_per_table = 512;
 595  595          mmu.top_level_count = 512;
 596  596  
 597  597          mmu.level_shift[0] = 12;
 598  598          mmu.level_shift[1] = 21;
 599  599          mmu.level_shift[2] = 30;
 600  600          mmu.level_shift[3] = 39;
 601  601  
 602  602  #elif defined(__i386)
 603  603  
 604  604          if (mmu.pae_hat) {
 605  605                  mmu.num_level = 3;
 606  606                  mmu.max_level = 2;
 607  607                  mmu.ptes_per_table = 512;
 608  608                  mmu.top_level_count = 4;
 609  609  
 610  610                  mmu.level_shift[0] = 12;
 611  611                  mmu.level_shift[1] = 21;
 612  612                  mmu.level_shift[2] = 30;
 613  613  
 614  614          } else {
 615  615                  mmu.num_level = 2;
 616  616                  mmu.max_level = 1;
 617  617                  mmu.ptes_per_table = 1024;
 618  618                  mmu.top_level_count = 1024;
 619  619  
 620  620                  mmu.level_shift[0] = 12;
 621  621                  mmu.level_shift[1] = 22;
 622  622          }
 623  623  
 624  624  #endif  /* __i386 */
 625  625  
 626  626          for (i = 0; i < mmu.num_level; ++i) {
 627  627                  mmu.level_size[i] = 1UL << mmu.level_shift[i];
 628  628                  mmu.level_offset[i] = mmu.level_size[i] - 1;
 629  629                  mmu.level_mask[i] = ~mmu.level_offset[i];
 630  630          }
 631  631  
 632  632          set_max_page_level();
 633  633  
 634  634          mmu_page_sizes = mmu.max_page_level + 1;
 635  635          mmu_exported_page_sizes = mmu.umax_page_level + 1;
 636  636  
 637  637          /* restrict legacy applications from using pagesizes 1g and above */
 638  638          mmu_legacy_page_sizes =
 639  639              (mmu_exported_page_sizes > 2) ? 2 : mmu_exported_page_sizes;
 640  640  
 641  641  
 642  642          for (i = 0; i <= mmu.max_page_level; ++i) {
 643  643                  mmu.pte_bits[i] = PT_VALID | pt_kern;
 644  644                  if (i > 0)
 645  645                          mmu.pte_bits[i] |= PT_PAGESIZE;
 646  646          }
 647  647  
 648  648          /*
 649  649           * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level.
 650  650           */
 651  651          for (i = 1; i < mmu.num_level; ++i)
 652  652                  mmu.ptp_bits[i] = PT_PTPBITS;
 653  653  
 654  654  #if defined(__i386)
 655  655          mmu.ptp_bits[2] = PT_VALID;
 656  656  #endif
 657  657  
 658  658          /*
 659  659           * Compute how many hash table entries to have per process for htables.
 660  660           * We start with 1 page's worth of entries.
 661  661           *
 662  662           * If physical memory is small, reduce the amount need to cover it.
 663  663           */
 664  664          max_htables = physmax / mmu.ptes_per_table;
 665  665          mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *);
 666  666          while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables)
 667  667                  mmu.hash_cnt >>= 1;
 668  668          mmu.vlp_hash_cnt = mmu.hash_cnt;
 669  669  
 670  670  #if defined(__amd64)
 671  671          /*
 672  672           * If running in 64 bits and physical memory is large,
 673  673           * increase the size of the cache to cover all of memory for
 674  674           * a 64 bit process.
 675  675           */
 676  676  #define HASH_MAX_LENGTH 4
 677  677          while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables)
 678  678                  mmu.hash_cnt <<= 1;
 679  679  #endif
 680  680  }
 681  681  
 682  682  
 683  683  /*
 684  684   * initialize hat data structures
 685  685   */
 686  686  void
 687  687  hat_init()
 688  688  {
 689  689  #if defined(__i386)
 690  690          /*
 691  691           * _userlimit must be aligned correctly
 692  692           */
 693  693          if ((_userlimit & LEVEL_MASK(1)) != _userlimit) {
 694  694                  prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n",
 695  695                      (void *)_userlimit, (void *)LEVEL_SIZE(1));
 696  696                  halt("hat_init(): Unable to continue");
 697  697          }
 698  698  #endif
 699  699  
 700  700          cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL);
 701  701  
 702  702          /*
 703  703           * initialize kmem caches
 704  704           */
 705  705          htable_init();
 706  706          hment_init();
 707  707  
 708  708          hat_cache = kmem_cache_create("hat_t",
 709  709              sizeof (hat_t), 0, hati_constructor, NULL, NULL,
 710  710              NULL, 0, 0);
 711  711  
 712  712          hat_hash_cache = kmem_cache_create("HatHash",
 713  713              mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
 714  714              NULL, 0, 0);
 715  715  
 716  716          /*
 717  717           * VLP hats can use a smaller hash table size on large memroy machines
 718  718           */
 719  719          if (mmu.hash_cnt == mmu.vlp_hash_cnt) {
 720  720                  vlp_hash_cache = hat_hash_cache;
 721  721          } else {
 722  722                  vlp_hash_cache = kmem_cache_create("HatVlpHash",
 723  723                      mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
 724  724                      NULL, 0, 0);
 725  725          }
 726  726  
 727  727          /*
 728  728           * Set up the kernel's hat
 729  729           */
 730  730          AS_LOCK_ENTER(&kas, RW_WRITER);
 731  731          kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP);
 732  732          mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
 733  733          kas.a_hat->hat_as = &kas;
 734  734          kas.a_hat->hat_flags = 0;
 735  735          AS_LOCK_EXIT(&kas);
 736  736  
 737  737          CPUSET_ZERO(khat_cpuset);
 738  738          CPUSET_ADD(khat_cpuset, CPU->cpu_id);
 739  739  
 740  740          /*
 741  741           * The kernel hat's next pointer serves as the head of the hat list .
 742  742           * The kernel hat's prev pointer tracks the last hat on the list for
 743  743           * htable_steal() to use.
 744  744           */
 745  745          kas.a_hat->hat_next = NULL;
 746  746          kas.a_hat->hat_prev = NULL;
 747  747  
 748  748          /*
 749  749           * Allocate an htable hash bucket for the kernel
 750  750           * XX64 - tune for 64 bit procs
 751  751           */
 752  752          kas.a_hat->hat_num_hash = mmu.hash_cnt;
 753  753          kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP);
 754  754          bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *));
 755  755  
 756  756          /*
 757  757           * zero out the top level and cached htable pointers
 758  758           */
 759  759          kas.a_hat->hat_ht_cached = NULL;
 760  760          kas.a_hat->hat_htable = NULL;
 761  761  
 762  762          /*
 763  763           * Pre-allocate hrm_hashtab before enabling the collection of
 764  764           * refmod statistics.  Allocating on the fly would mean us
 765  765           * running the risk of suffering recursive mutex enters or
 766  766           * deadlocks.
 767  767           */
 768  768          hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
 769  769              KM_SLEEP);
 770  770  }
 771  771  
 772  772  /*
 773  773   * Prepare CPU specific pagetables for VLP processes on 64 bit kernels.
 774  774   *
 775  775   * Each CPU has a set of 2 pagetables that are reused for any 32 bit
 776  776   * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and
 777  777   * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes.
 778  778   */
 779  779  /*ARGSUSED*/
 780  780  static void
 781  781  hat_vlp_setup(struct cpu *cpu)
 782  782  {
 783  783  #if defined(__amd64) && !defined(__xpv)
 784  784          struct hat_cpu_info *hci = cpu->cpu_hat_info;
 785  785          pfn_t pfn;
 786  786  
 787  787          /*
 788  788           * allocate the level==2 page table for the bottom most
 789  789           * 512Gig of address space (this is where 32 bit apps live)
 790  790           */
 791  791          ASSERT(hci != NULL);
 792  792          hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
 793  793  
 794  794          /*
 795  795           * Allocate a top level pagetable and copy the kernel's
 796  796           * entries into it. Then link in hci_vlp_l2ptes in the 1st entry.
 797  797           */
 798  798          hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
 799  799          hci->hci_vlp_pfn =
 800  800              hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes);
 801  801          ASSERT(hci->hci_vlp_pfn != PFN_INVALID);
 802  802          bcopy(vlp_page, hci->hci_vlp_l3ptes, MMU_PAGESIZE);
 803  803  
 804  804          pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes);
 805  805          ASSERT(pfn != PFN_INVALID);
 806  806          hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2);
 807  807  #endif /* __amd64 && !__xpv */
 808  808  }
 809  809  
 810  810  /*ARGSUSED*/
 811  811  static void
 812  812  hat_vlp_teardown(cpu_t *cpu)
 813  813  {
 814  814  #if defined(__amd64) && !defined(__xpv)
 815  815          struct hat_cpu_info *hci;
 816  816  
 817  817          if ((hci = cpu->cpu_hat_info) == NULL)
 818  818                  return;
 819  819          if (hci->hci_vlp_l2ptes)
 820  820                  kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE);
 821  821          if (hci->hci_vlp_l3ptes)
 822  822                  kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE);
 823  823  #endif
 824  824  }
 825  825  
 826  826  #define NEXT_HKR(r, l, s, e) {                  \
 827  827          kernel_ranges[r].hkr_level = l;         \
 828  828          kernel_ranges[r].hkr_start_va = s;      \
 829  829          kernel_ranges[r].hkr_end_va = e;        \
 830  830          ++r;                                    \
 831  831  }
 832  832  
 833  833  /*
 834  834   * Finish filling in the kernel hat.
 835  835   * Pre fill in all top level kernel page table entries for the kernel's
 836  836   * part of the address range.  From this point on we can't use any new
 837  837   * kernel large pages if they need PTE's at max_level
 838  838   *
 839  839   * create the kmap mappings.
 840  840   */
 841  841  void
 842  842  hat_init_finish(void)
 843  843  {
 844  844          size_t          size;
 845  845          uint_t          r = 0;
 846  846          uintptr_t       va;
 847  847          hat_kernel_range_t *rp;
 848  848  
 849  849  
 850  850          /*
 851  851           * We are now effectively running on the kernel hat.
 852  852           * Clearing use_boot_reserve shuts off using the pre-allocated boot
 853  853           * reserve for all HAT allocations.  From here on, the reserves are
 854  854           * only used when avoiding recursion in kmem_alloc().
 855  855           */
 856  856          use_boot_reserve = 0;
 857  857          htable_adjust_reserve();
 858  858  
 859  859          /*
 860  860           * User HATs are initialized with copies of all kernel mappings in
 861  861           * higher level page tables. Ensure that those entries exist.
 862  862           */
 863  863  #if defined(__amd64)
 864  864  
 865  865          NEXT_HKR(r, 3, kernelbase, 0);
 866  866  #if defined(__xpv)
 867  867          NEXT_HKR(r, 3, HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END);
 868  868  #endif
 869  869  
 870  870  #elif defined(__i386)
 871  871  
 872  872  #if !defined(__xpv)
 873  873          if (mmu.pae_hat) {
 874  874                  va = kernelbase;
 875  875                  if ((va & LEVEL_MASK(2)) != va) {
 876  876                          va = P2ROUNDUP(va, LEVEL_SIZE(2));
 877  877                          NEXT_HKR(r, 1, kernelbase, va);
 878  878                  }
 879  879                  if (va != 0)
 880  880                          NEXT_HKR(r, 2, va, 0);
 881  881          } else
 882  882  #endif /* __xpv */
 883  883                  NEXT_HKR(r, 1, kernelbase, 0);
 884  884  
 885  885  #endif /* __i386 */
 886  886  
 887  887          num_kernel_ranges = r;
 888  888  
 889  889          /*
 890  890           * Create all the kernel pagetables that will have entries
 891  891           * shared to user HATs.
 892  892           */
 893  893          for (r = 0; r < num_kernel_ranges; ++r) {
 894  894                  rp = &kernel_ranges[r];
 895  895                  for (va = rp->hkr_start_va; va != rp->hkr_end_va;
 896  896                      va += LEVEL_SIZE(rp->hkr_level)) {
 897  897                          htable_t *ht;
 898  898  
 899  899                          if (IN_HYPERVISOR_VA(va))
 900  900                                  continue;
 901  901  
 902  902                          /* can/must skip if a page mapping already exists */
 903  903                          if (rp->hkr_level <= mmu.max_page_level &&
 904  904                              (ht = htable_getpage(kas.a_hat, va, NULL)) !=
 905  905                              NULL) {
 906  906                                  htable_release(ht);
 907  907                                  continue;
 908  908                          }
 909  909  
 910  910                          (void) htable_create(kas.a_hat, va, rp->hkr_level - 1,
 911  911                              NULL);
 912  912                  }
 913  913          }
 914  914  
 915  915          /*
 916  916           * 32 bit PAE metal kernels use only 4 of the 512 entries in the
 917  917           * page holding the top level pagetable. We use the remainder for
 918  918           * the "per CPU" page tables for VLP processes.
 919  919           * Map the top level kernel pagetable into the kernel to make
 920  920           * it easy to use bcopy access these tables.
 921  921           */
 922  922          if (mmu.pae_hat) {
 923  923                  vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
 924  924                  hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE,
 925  925                      kas.a_hat->hat_htable->ht_pfn,
 926  926  #if !defined(__xpv)
 927  927                      PROT_WRITE |
 928  928  #endif
 929  929                      PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK,
 930  930                      HAT_LOAD | HAT_LOAD_NOCONSIST);
 931  931          }
 932  932          hat_vlp_setup(CPU);
 933  933  
 934  934          /*
 935  935           * Create kmap (cached mappings of kernel PTEs)
 936  936           * for 32 bit we map from segmap_start .. ekernelheap
 937  937           * for 64 bit we map from segmap_start .. segmap_start + segmapsize;
 938  938           */
 939  939  #if defined(__i386)
 940  940          size = (uintptr_t)ekernelheap - segmap_start;
 941  941  #elif defined(__amd64)
 942  942          size = segmapsize;
 943  943  #endif
 944  944          hat_kmap_init((uintptr_t)segmap_start, size);
 945  945  }
 946  946  
 947  947  /*
 948  948   * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references
 949  949   * are 32 bit, so for safety we must use atomic_cas_64() to install these.
 950  950   */
 951  951  #ifdef __i386
 952  952  static void
 953  953  reload_pae32(hat_t *hat, cpu_t *cpu)
 954  954  {
 955  955          x86pte_t *src;
 956  956          x86pte_t *dest;
 957  957          x86pte_t pte;
 958  958          int i;
 959  959  
 960  960          /*
 961  961           * Load the 4 entries of the level 2 page table into this
 962  962           * cpu's range of the vlp_page and point cr3 at them.
 963  963           */
 964  964          ASSERT(mmu.pae_hat);
 965  965          src = hat->hat_vlp_ptes;
 966  966          dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES;
 967  967          for (i = 0; i < VLP_NUM_PTES; ++i) {
 968  968                  for (;;) {
 969  969                          pte = dest[i];
 970  970                          if (pte == src[i])
 971  971                                  break;
 972  972                          if (atomic_cas_64(dest + i, pte, src[i]) != src[i])
 973  973                                  break;
 974  974                  }
 975  975          }
 976  976  }
 977  977  #endif
 978  978  
 979  979  /*
 980  980   * Switch to a new active hat, maintaining bit masks to track active CPUs.
 981  981   *
 982  982   * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it
 983  983   * remains a 32-bit value.
 984  984   */
 985  985  void
 986  986  hat_switch(hat_t *hat)
 987  987  {
 988  988          uint64_t        newcr3;
 989  989          cpu_t           *cpu = CPU;
 990  990          hat_t           *old = cpu->cpu_current_hat;
 991  991  
 992  992          /*
 993  993           * set up this information first, so we don't miss any cross calls
 994  994           */
 995  995          if (old != NULL) {
 996  996                  if (old == hat)
 997  997                          return;
 998  998                  if (old != kas.a_hat)
 999  999                          CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id);
1000 1000          }
1001 1001  
1002 1002          /*
1003 1003           * Add this CPU to the active set for this HAT.
1004 1004           */
1005 1005          if (hat != kas.a_hat) {
1006 1006                  CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id);
1007 1007          }
1008 1008          cpu->cpu_current_hat = hat;
1009 1009  
1010 1010          /*
1011 1011           * now go ahead and load cr3
1012 1012           */
1013 1013          if (hat->hat_flags & HAT_VLP) {
1014 1014  #if defined(__amd64)
1015 1015                  x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
1016 1016  
1017 1017                  VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1018 1018                  newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn);
1019 1019  #elif defined(__i386)
1020 1020                  reload_pae32(hat, cpu);
1021 1021                  newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) +
1022 1022                      (cpu->cpu_id + 1) * VLP_SIZE;
1023 1023  #endif
1024 1024          } else {
1025 1025                  newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn);
1026 1026          }
1027 1027  #ifdef __xpv
1028 1028          {
1029 1029                  struct mmuext_op t[2];
1030 1030                  uint_t retcnt;
1031 1031                  uint_t opcnt = 1;
1032 1032  
1033 1033                  t[0].cmd = MMUEXT_NEW_BASEPTR;
1034 1034                  t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
1035 1035  #if defined(__amd64)
1036 1036                  /*
1037 1037                   * There's an interesting problem here, as to what to
1038 1038                   * actually specify when switching to the kernel hat.
1039 1039                   * For now we'll reuse the kernel hat again.
1040 1040                   */
1041 1041                  t[1].cmd = MMUEXT_NEW_USER_BASEPTR;
1042 1042                  if (hat == kas.a_hat)
1043 1043                          t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
1044 1044                  else
1045 1045                          t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable);
1046 1046                  ++opcnt;
1047 1047  #endif  /* __amd64 */
1048 1048                  if (HYPERVISOR_mmuext_op(t, opcnt, &retcnt, DOMID_SELF) < 0)
1049 1049                          panic("HYPERVISOR_mmu_update() failed");
1050 1050                  ASSERT(retcnt == opcnt);
1051 1051  
1052 1052          }
1053 1053  #else
1054 1054          setcr3(newcr3);
1055 1055  #endif
1056 1056          ASSERT(cpu == CPU);
1057 1057  }
1058 1058  
1059 1059  /*
1060 1060   * Utility to return a valid x86pte_t from protections, pfn, and level number
1061 1061   */
1062 1062  static x86pte_t
1063 1063  hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags)
1064 1064  {
1065 1065          x86pte_t        pte;
1066 1066          uint_t          cache_attr = attr & HAT_ORDER_MASK;
1067 1067  
1068 1068          pte = MAKEPTE(pfn, level);
1069 1069  
1070 1070          if (attr & PROT_WRITE)
1071 1071                  PTE_SET(pte, PT_WRITABLE);
1072 1072  
1073 1073          if (attr & PROT_USER)
1074 1074                  PTE_SET(pte, PT_USER);
1075 1075  
1076 1076          if (!(attr & PROT_EXEC))
1077 1077                  PTE_SET(pte, mmu.pt_nx);
1078 1078  
1079 1079          /*
1080 1080           * Set the software bits used track ref/mod sync's and hments.
1081 1081           * If not using REF/MOD, set them to avoid h/w rewriting PTEs.
1082 1082           */
1083 1083          if (flags & HAT_LOAD_NOCONSIST)
1084 1084                  PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD);
1085 1085          else if (attr & HAT_NOSYNC)
1086 1086                  PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD);
1087 1087  
1088 1088          /*
1089 1089           * Set the caching attributes in the PTE. The combination
1090 1090           * of attributes are poorly defined, so we pay attention
1091 1091           * to them in the given order.
1092 1092           *
1093 1093           * The test for HAT_STRICTORDER is different because it's defined
1094 1094           * as "0" - which was a stupid thing to do, but is too late to change!
1095 1095           */
1096 1096          if (cache_attr == HAT_STRICTORDER) {
1097 1097                  PTE_SET(pte, PT_NOCACHE);
1098 1098          /*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */
1099 1099          } else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) {
1100 1100                  /* nothing to set */;
1101 1101          } else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) {
1102 1102                  PTE_SET(pte, PT_NOCACHE);
1103 1103                  if (is_x86_feature(x86_featureset, X86FSET_PAT))
1104 1104                          PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE);
1105 1105                  else
1106 1106                          PTE_SET(pte, PT_WRITETHRU);
1107 1107          } else {
1108 1108                  panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr);
1109 1109          }
1110 1110  
1111 1111          return (pte);
1112 1112  }
1113 1113  
1114 1114  /*
1115 1115   * Duplicate address translations of the parent to the child.
1116 1116   * This function really isn't used anymore.
1117 1117   */
1118 1118  /*ARGSUSED*/
1119 1119  int
1120 1120  hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag)
1121 1121  {
1122 1122          ASSERT((uintptr_t)addr < kernelbase);
1123 1123          ASSERT(new != kas.a_hat);
1124 1124          ASSERT(old != kas.a_hat);
1125 1125          return (0);
1126 1126  }
1127 1127  
1128 1128  /*
1129 1129   * Allocate any hat resources required for a process being swapped in.
1130 1130   */
1131 1131  /*ARGSUSED*/
1132 1132  void
1133 1133  hat_swapin(hat_t *hat)
1134 1134  {
1135 1135          /* do nothing - we let everything fault back in */
1136 1136  }
1137 1137  
1138 1138  /*
1139 1139   * Unload all translations associated with an address space of a process
1140 1140   * that is being swapped out.
1141 1141   */
1142 1142  void
1143 1143  hat_swapout(hat_t *hat)
1144 1144  {
1145 1145          uintptr_t       vaddr = (uintptr_t)0;
1146 1146          uintptr_t       eaddr = _userlimit;
1147 1147          htable_t        *ht = NULL;
1148 1148          level_t         l;
1149 1149  
1150 1150          XPV_DISALLOW_MIGRATE();
1151 1151          /*
1152 1152           * We can't just call hat_unload(hat, 0, _userlimit...)  here, because
1153 1153           * seg_spt and shared pagetables can't be swapped out.
1154 1154           * Take a look at segspt_shmswapout() - it's a big no-op.
1155 1155           *
1156 1156           * Instead we'll walk through all the address space and unload
1157 1157           * any mappings which we are sure are not shared, not locked.
1158 1158           */
1159 1159          ASSERT(IS_PAGEALIGNED(vaddr));
1160 1160          ASSERT(IS_PAGEALIGNED(eaddr));
1161 1161          ASSERT(AS_LOCK_HELD(hat->hat_as));
1162 1162          if ((uintptr_t)hat->hat_as->a_userlimit < eaddr)
1163 1163                  eaddr = (uintptr_t)hat->hat_as->a_userlimit;
1164 1164  
1165 1165          while (vaddr < eaddr) {
1166 1166                  (void) htable_walk(hat, &ht, &vaddr, eaddr);
1167 1167                  if (ht == NULL)
1168 1168                          break;
1169 1169  
1170 1170                  ASSERT(!IN_VA_HOLE(vaddr));
1171 1171  
1172 1172                  /*
1173 1173                   * If the page table is shared skip its entire range.
1174 1174                   */
1175 1175                  l = ht->ht_level;
1176 1176                  if (ht->ht_flags & HTABLE_SHARED_PFN) {
1177 1177                          vaddr = ht->ht_vaddr + LEVEL_SIZE(l + 1);
1178 1178                          htable_release(ht);
1179 1179                          ht = NULL;
1180 1180                          continue;
1181 1181                  }
1182 1182  
1183 1183                  /*
1184 1184                   * If the page table has no locked entries, unload this one.
1185 1185                   */
1186 1186                  if (ht->ht_lock_cnt == 0)
1187 1187                          hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l),
1188 1188                              HAT_UNLOAD_UNMAP);
1189 1189  
1190 1190                  /*
1191 1191                   * If we have a level 0 page table with locked entries,
1192 1192                   * skip the entire page table, otherwise skip just one entry.
1193 1193                   */
1194 1194                  if (ht->ht_lock_cnt > 0 && l == 0)
1195 1195                          vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
1196 1196                  else
1197 1197                          vaddr += LEVEL_SIZE(l);
1198 1198          }
1199 1199          if (ht)
1200 1200                  htable_release(ht);
1201 1201  
1202 1202          /*
1203 1203           * We're in swapout because the system is low on memory, so
1204 1204           * go back and flush all the htables off the cached list.
1205 1205           */
1206 1206          htable_purge_hat(hat);
1207 1207          XPV_ALLOW_MIGRATE();
1208 1208  }
1209 1209  
1210 1210  /*
1211 1211   * returns number of bytes that have valid mappings in hat.
1212 1212   */
1213 1213  size_t
1214 1214  hat_get_mapped_size(hat_t *hat)
1215 1215  {
1216 1216          size_t total = 0;
1217 1217          int l;
1218 1218  
1219 1219          for (l = 0; l <= mmu.max_page_level; l++)
1220 1220                  total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l));
1221 1221          total += hat->hat_ism_pgcnt;
1222 1222  
1223 1223          return (total);
1224 1224  }
1225 1225  
1226 1226  /*
1227 1227   * enable/disable collection of stats for hat.
1228 1228   */
1229 1229  int
1230 1230  hat_stats_enable(hat_t *hat)
1231 1231  {
1232 1232          atomic_inc_32(&hat->hat_stats);
1233 1233          return (1);
1234 1234  }
1235 1235  
1236 1236  void
1237 1237  hat_stats_disable(hat_t *hat)
1238 1238  {
1239 1239          atomic_dec_32(&hat->hat_stats);
1240 1240  }
1241 1241  
1242 1242  /*
1243 1243   * Utility to sync the ref/mod bits from a page table entry to the page_t
1244 1244   * We must be holding the mapping list lock when this is called.
1245 1245   */
1246 1246  static void
1247 1247  hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level)
1248 1248  {
1249 1249          uint_t  rm = 0;
1250 1250          pgcnt_t pgcnt;
1251 1251  
1252 1252          if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
1253 1253                  return;
1254 1254  
1255 1255          if (PTE_GET(pte, PT_REF))
1256 1256                  rm |= P_REF;
1257 1257  
1258 1258          if (PTE_GET(pte, PT_MOD))
1259 1259                  rm |= P_MOD;
1260 1260  
1261 1261          if (rm == 0)
1262 1262                  return;
1263 1263  
1264 1264          /*
1265 1265           * sync to all constituent pages of a large page
1266 1266           */
1267 1267          ASSERT(x86_hm_held(pp));
1268 1268          pgcnt = page_get_pagecnt(level);
1269 1269          ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
1270 1270          for (; pgcnt > 0; --pgcnt) {
1271 1271                  /*
1272 1272                   * hat_page_demote() can't decrease
1273 1273                   * pszc below this mapping size
1274 1274                   * since this large mapping existed after we
1275 1275                   * took mlist lock.
1276 1276                   */
1277 1277                  ASSERT(pp->p_szc >= level);
1278 1278                  hat_page_setattr(pp, rm);
1279 1279                  ++pp;
1280 1280          }
1281 1281  }
1282 1282  
1283 1283  /*
1284 1284   * This the set of PTE bits for PFN, permissions and caching
1285 1285   * that are allowed to change on a HAT_LOAD_REMAP
1286 1286   */
1287 1287  #define PT_REMAP_BITS                                                   \
1288 1288          (PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU |                \
1289 1289          PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE | PT_IGNORE | PT_REF | PT_MOD)
1290 1290  
1291 1291  #define REMAPASSERT(EX) if (!(EX)) panic("hati_pte_map: " #EX)
1292 1292  /*
1293 1293   * Do the low-level work to get a mapping entered into a HAT's pagetables
1294 1294   * and in the mapping list of the associated page_t.
1295 1295   */
1296 1296  static int
1297 1297  hati_pte_map(
1298 1298          htable_t        *ht,
1299 1299          uint_t          entry,
1300 1300          page_t          *pp,
1301 1301          x86pte_t        pte,
1302 1302          int             flags,
1303 1303          void            *pte_ptr)
1304 1304  {
1305 1305          hat_t           *hat = ht->ht_hat;
1306 1306          x86pte_t        old_pte;
1307 1307          level_t         l = ht->ht_level;
1308 1308          hment_t         *hm;
1309 1309          uint_t          is_consist;
1310 1310          uint_t          is_locked;
1311 1311          int             rv = 0;
1312 1312  
1313 1313          /*
1314 1314           * Is this a consistent (ie. need mapping list lock) mapping?
1315 1315           */
1316 1316          is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0);
1317 1317  
1318 1318          /*
1319 1319           * Track locked mapping count in the htable.  Do this first,
1320 1320           * as we track locking even if there already is a mapping present.
1321 1321           */
1322 1322          is_locked = (flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat;
1323 1323          if (is_locked)
1324 1324                  HTABLE_LOCK_INC(ht);
1325 1325  
1326 1326          /*
1327 1327           * Acquire the page's mapping list lock and get an hment to use.
1328 1328           * Note that hment_prepare() might return NULL.
1329 1329           */
1330 1330          if (is_consist) {
1331 1331                  x86_hm_enter(pp);
1332 1332                  hm = hment_prepare(ht, entry, pp);
1333 1333          }
1334 1334  
1335 1335          /*
1336 1336           * Set the new pte, retrieving the old one at the same time.
1337 1337           */
1338 1338          old_pte = x86pte_set(ht, entry, pte, pte_ptr);
1339 1339  
1340 1340          /*
1341 1341           * Did we get a large page / page table collision?
1342 1342           */
1343 1343          if (old_pte == LPAGE_ERROR) {
1344 1344                  if (is_locked)
1345 1345                          HTABLE_LOCK_DEC(ht);
1346 1346                  rv = -1;
1347 1347                  goto done;
1348 1348          }
1349 1349  
1350 1350          /*
1351 1351           * If the mapping didn't change there is nothing more to do.
1352 1352           */
1353 1353          if (PTE_EQUIV(pte, old_pte))
1354 1354                  goto done;
1355 1355  
1356 1356          /*
1357 1357           * Install a new mapping in the page's mapping list
1358 1358           */
1359 1359          if (!PTE_ISVALID(old_pte)) {
1360 1360                  if (is_consist) {
1361 1361                          hment_assign(ht, entry, pp, hm);
1362 1362                          x86_hm_exit(pp);
1363 1363                  } else {
1364 1364                          ASSERT(flags & HAT_LOAD_NOCONSIST);
1365 1365                  }
1366 1366  #if defined(__amd64)
1367 1367                  if (ht->ht_flags & HTABLE_VLP) {
1368 1368                          cpu_t *cpu = CPU;
1369 1369                          x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
1370 1370                          VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1371 1371                  }
1372 1372  #endif
1373 1373                  HTABLE_INC(ht->ht_valid_cnt);
1374 1374                  PGCNT_INC(hat, l);
1375 1375                  return (rv);
1376 1376          }
1377 1377  
1378 1378          /*
1379 1379           * Remap's are more complicated:
1380 1380           *  - HAT_LOAD_REMAP must be specified if changing the pfn.
1381 1381           *    We also require that NOCONSIST be specified.
1382 1382           *  - Otherwise only permission or caching bits may change.
1383 1383           */
1384 1384          if (!PTE_ISPAGE(old_pte, l))
1385 1385                  panic("non-null/page mapping pte=" FMT_PTE, old_pte);
1386 1386  
1387 1387          if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) {
1388 1388                  REMAPASSERT(flags & HAT_LOAD_REMAP);
1389 1389                  REMAPASSERT(flags & HAT_LOAD_NOCONSIST);
1390 1390                  REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
1391 1391                  REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) ==
1392 1392                      pf_is_memory(PTE2PFN(pte, l)));
1393 1393                  REMAPASSERT(!is_consist);
1394 1394          }
1395 1395  
1396 1396          /*
1397 1397           * We only let remaps change the certain bits in the PTE.
1398 1398           */
1399 1399          if (PTE_GET(old_pte, ~PT_REMAP_BITS) != PTE_GET(pte, ~PT_REMAP_BITS))
1400 1400                  panic("remap bits changed: old_pte="FMT_PTE", pte="FMT_PTE"\n",
1401 1401                      old_pte, pte);
1402 1402  
1403 1403          /*
1404 1404           * We don't create any mapping list entries on a remap, so release
1405 1405           * any allocated hment after we drop the mapping list lock.
1406 1406           */
1407 1407  done:
1408 1408          if (is_consist) {
1409 1409                  x86_hm_exit(pp);
1410 1410                  if (hm != NULL)
1411 1411                          hment_free(hm);
1412 1412          }
1413 1413          return (rv);
1414 1414  }
1415 1415  
1416 1416  /*
1417 1417   * Internal routine to load a single page table entry. This only fails if
1418 1418   * we attempt to overwrite a page table link with a large page.
1419 1419   */
1420 1420  static int
1421 1421  hati_load_common(
1422 1422          hat_t           *hat,
1423 1423          uintptr_t       va,
1424 1424          page_t          *pp,
1425 1425          uint_t          attr,
1426 1426          uint_t          flags,
1427 1427          level_t         level,
1428 1428          pfn_t           pfn)
1429 1429  {
1430 1430          htable_t        *ht;
1431 1431          uint_t          entry;
1432 1432          x86pte_t        pte;
1433 1433          int             rv = 0;
1434 1434  
1435 1435          /*
1436 1436           * The number 16 is arbitrary and here to catch a recursion problem
1437 1437           * early before we blow out the kernel stack.
1438 1438           */
1439 1439          ++curthread->t_hatdepth;
1440 1440          ASSERT(curthread->t_hatdepth < 16);
1441 1441  
1442 1442          ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1443 1443  
1444 1444          if (flags & HAT_LOAD_SHARE)
1445 1445                  hat->hat_flags |= HAT_SHARED;
1446 1446  
1447 1447          /*
1448 1448           * Find the page table that maps this page if it already exists.
1449 1449           */
1450 1450          ht = htable_lookup(hat, va, level);
1451 1451  
1452 1452          /*
1453 1453           * We must have HAT_LOAD_NOCONSIST if page_t is NULL.
1454 1454           */
1455 1455          if (pp == NULL)
1456 1456                  flags |= HAT_LOAD_NOCONSIST;
1457 1457  
1458 1458          if (ht == NULL) {
1459 1459                  ht = htable_create(hat, va, level, NULL);
1460 1460                  ASSERT(ht != NULL);
1461 1461          }
1462 1462          entry = htable_va2entry(va, ht);
1463 1463  
1464 1464          /*
1465 1465           * a bunch of paranoid error checking
1466 1466           */
1467 1467          ASSERT(ht->ht_busy > 0);
1468 1468          if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht))
1469 1469                  panic("hati_load_common: bad htable %p, va %p",
1470 1470                      (void *)ht, (void *)va);
1471 1471          ASSERT(ht->ht_level == level);
1472 1472  
1473 1473          /*
1474 1474           * construct the new PTE
1475 1475           */
1476 1476          if (hat == kas.a_hat)
1477 1477                  attr &= ~PROT_USER;
1478 1478          pte = hati_mkpte(pfn, attr, level, flags);
1479 1479          if (hat == kas.a_hat && va >= kernelbase)
1480 1480                  PTE_SET(pte, mmu.pt_global);
1481 1481  
1482 1482          /*
1483 1483           * establish the mapping
1484 1484           */
1485 1485          rv = hati_pte_map(ht, entry, pp, pte, flags, NULL);
1486 1486  
1487 1487          /*
1488 1488           * release the htable and any reserves
1489 1489           */
1490 1490          htable_release(ht);
1491 1491          --curthread->t_hatdepth;
1492 1492          return (rv);
1493 1493  }
1494 1494  
1495 1495  /*
1496 1496   * special case of hat_memload to deal with some kernel addrs for performance
1497 1497   */
1498 1498  static void
1499 1499  hat_kmap_load(
1500 1500          caddr_t         addr,
1501 1501          page_t          *pp,
1502 1502          uint_t          attr,
1503 1503          uint_t          flags)
1504 1504  {
1505 1505          uintptr_t       va = (uintptr_t)addr;
1506 1506          x86pte_t        pte;
1507 1507          pfn_t           pfn = page_pptonum(pp);
1508 1508          pgcnt_t         pg_off = mmu_btop(va - mmu.kmap_addr);
1509 1509          htable_t        *ht;
1510 1510          uint_t          entry;
1511 1511          void            *pte_ptr;
1512 1512  
1513 1513          /*
1514 1514           * construct the requested PTE
1515 1515           */
1516 1516          attr &= ~PROT_USER;
1517 1517          attr |= HAT_STORECACHING_OK;
1518 1518          pte = hati_mkpte(pfn, attr, 0, flags);
1519 1519          PTE_SET(pte, mmu.pt_global);
1520 1520  
1521 1521          /*
1522 1522           * Figure out the pte_ptr and htable and use common code to finish up
1523 1523           */
1524 1524          if (mmu.pae_hat)
1525 1525                  pte_ptr = mmu.kmap_ptes + pg_off;
1526 1526          else
1527 1527                  pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off;
1528 1528          ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >>
1529 1529              LEVEL_SHIFT(1)];
1530 1530          entry = htable_va2entry(va, ht);
1531 1531          ++curthread->t_hatdepth;
1532 1532          ASSERT(curthread->t_hatdepth < 16);
1533 1533          (void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr);
1534 1534          --curthread->t_hatdepth;
1535 1535  }
1536 1536  
1537 1537  /*
1538 1538   * hat_memload() - load a translation to the given page struct
1539 1539   *
1540 1540   * Flags for hat_memload/hat_devload/hat_*attr.
1541 1541   *
1542 1542   *      HAT_LOAD        Default flags to load a translation to the page.
1543 1543   *
1544 1544   *      HAT_LOAD_LOCK   Lock down mapping resources; hat_map(), hat_memload(),
1545 1545   *                      and hat_devload().
1546 1546   *
1547 1547   *      HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list.
1548 1548   *                      sets PT_NOCONSIST
1549 1549   *
1550 1550   *      HAT_LOAD_SHARE  A flag to hat_memload() to indicate h/w page tables
1551 1551   *                      that map some user pages (not kas) is shared by more
1552 1552   *                      than one process (eg. ISM).
1553 1553   *
1554 1554   *      HAT_LOAD_REMAP  Reload a valid pte with a different page frame.
1555 1555   *
1556 1556   *      HAT_NO_KALLOC   Do not kmem_alloc while creating the mapping; at this
1557 1557   *                      point, it's setting up mapping to allocate internal
1558 1558   *                      hat layer data structures.  This flag forces hat layer
1559 1559   *                      to tap its reserves in order to prevent infinite
1560 1560   *                      recursion.
1561 1561   *
1562 1562   * The following is a protection attribute (like PROT_READ, etc.)
1563 1563   *
1564 1564   *      HAT_NOSYNC      set PT_NOSYNC - this mapping's ref/mod bits
1565 1565   *                      are never cleared.
1566 1566   *
1567 1567   * Installing new valid PTE's and creation of the mapping list
1568 1568   * entry are controlled under the same lock. It's derived from the
1569 1569   * page_t being mapped.
1570 1570   */
1571 1571  static uint_t supported_memload_flags =
1572 1572          HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST |
1573 1573          HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT;
1574 1574  
1575 1575  void
1576 1576  hat_memload(
1577 1577          hat_t           *hat,
1578 1578          caddr_t         addr,
1579 1579          page_t          *pp,
1580 1580          uint_t          attr,
1581 1581          uint_t          flags)
1582 1582  {
1583 1583          uintptr_t       va = (uintptr_t)addr;
1584 1584          level_t         level = 0;
1585 1585          pfn_t           pfn = page_pptonum(pp);
1586 1586  
1587 1587          XPV_DISALLOW_MIGRATE();
1588 1588          ASSERT(IS_PAGEALIGNED(va));
1589 1589          ASSERT(hat == kas.a_hat || va < _userlimit);
1590 1590          ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1591 1591          ASSERT((flags & supported_memload_flags) == flags);
1592 1592  
1593 1593          ASSERT(!IN_VA_HOLE(va));
1594 1594          ASSERT(!PP_ISFREE(pp));
1595 1595  
1596 1596          /*
1597 1597           * kernel address special case for performance.
1598 1598           */
1599 1599          if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
1600 1600                  ASSERT(hat == kas.a_hat);
1601 1601                  hat_kmap_load(addr, pp, attr, flags);
1602 1602                  XPV_ALLOW_MIGRATE();
1603 1603                  return;
1604 1604          }
1605 1605  
1606 1606          /*
1607 1607           * This is used for memory with normal caching enabled, so
1608 1608           * always set HAT_STORECACHING_OK.
1609 1609           */
1610 1610          attr |= HAT_STORECACHING_OK;
1611 1611          if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0)
1612 1612                  panic("unexpected hati_load_common() failure");
1613 1613          XPV_ALLOW_MIGRATE();
1614 1614  }
1615 1615  
1616 1616  /* ARGSUSED */
1617 1617  void
1618 1618  hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
1619 1619      uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
1620 1620  {
1621 1621          hat_memload(hat, addr, pp, attr, flags);
1622 1622  }
1623 1623  
1624 1624  /*
1625 1625   * Load the given array of page structs using large pages when possible
1626 1626   */
1627 1627  void
1628 1628  hat_memload_array(
1629 1629          hat_t           *hat,
1630 1630          caddr_t         addr,
1631 1631          size_t          len,
1632 1632          page_t          **pages,
1633 1633          uint_t          attr,
1634 1634          uint_t          flags)
1635 1635  {
1636 1636          uintptr_t       va = (uintptr_t)addr;
1637 1637          uintptr_t       eaddr = va + len;
1638 1638          level_t         level;
1639 1639          size_t          pgsize;
1640 1640          pgcnt_t         pgindx = 0;
1641 1641          pfn_t           pfn;
1642 1642          pgcnt_t         i;
1643 1643  
1644 1644          XPV_DISALLOW_MIGRATE();
1645 1645          ASSERT(IS_PAGEALIGNED(va));
1646 1646          ASSERT(hat == kas.a_hat || va + len <= _userlimit);
1647 1647          ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1648 1648          ASSERT((flags & supported_memload_flags) == flags);
1649 1649  
1650 1650          /*
1651 1651           * memload is used for memory with full caching enabled, so
1652 1652           * set HAT_STORECACHING_OK.
1653 1653           */
1654 1654          attr |= HAT_STORECACHING_OK;
1655 1655  
1656 1656          /*
1657 1657           * handle all pages using largest possible pagesize
1658 1658           */
1659 1659          while (va < eaddr) {
1660 1660                  /*
1661 1661                   * decide what level mapping to use (ie. pagesize)
1662 1662                   */
1663 1663                  pfn = page_pptonum(pages[pgindx]);
1664 1664                  for (level = mmu.max_page_level; ; --level) {
1665 1665                          pgsize = LEVEL_SIZE(level);
1666 1666                          if (level == 0)
1667 1667                                  break;
1668 1668  
1669 1669                          if (!IS_P2ALIGNED(va, pgsize) ||
1670 1670                              (eaddr - va) < pgsize ||
1671 1671                              !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize))
1672 1672                                  continue;
1673 1673  
1674 1674                          /*
1675 1675                           * To use a large mapping of this size, all the
1676 1676                           * pages we are passed must be sequential subpages
1677 1677                           * of the large page.
1678 1678                           * hat_page_demote() can't change p_szc because
1679 1679                           * all pages are locked.
1680 1680                           */
1681 1681                          if (pages[pgindx]->p_szc >= level) {
1682 1682                                  for (i = 0; i < mmu_btop(pgsize); ++i) {
1683 1683                                          if (pfn + i !=
1684 1684                                              page_pptonum(pages[pgindx + i]))
1685 1685                                                  break;
1686 1686                                          ASSERT(pages[pgindx + i]->p_szc >=
1687 1687                                              level);
1688 1688                                          ASSERT(pages[pgindx] + i ==
1689 1689                                              pages[pgindx + i]);
1690 1690                                  }
1691 1691                                  if (i == mmu_btop(pgsize)) {
1692 1692  #ifdef DEBUG
1693 1693                                          if (level == 2)
1694 1694                                                  map1gcnt++;
1695 1695  #endif
1696 1696                                          break;
1697 1697                                  }
1698 1698                          }
1699 1699                  }
1700 1700  
1701 1701                  /*
1702 1702                   * Load this page mapping. If the load fails, try a smaller
1703 1703                   * pagesize.
1704 1704                   */
1705 1705                  ASSERT(!IN_VA_HOLE(va));
1706 1706                  while (hati_load_common(hat, va, pages[pgindx], attr,
1707 1707                      flags, level, pfn) != 0) {
1708 1708                          if (level == 0)
1709 1709                                  panic("unexpected hati_load_common() failure");
1710 1710                          --level;
1711 1711                          pgsize = LEVEL_SIZE(level);
1712 1712                  }
1713 1713  
1714 1714                  /*
1715 1715                   * move to next page
1716 1716                   */
1717 1717                  va += pgsize;
1718 1718                  pgindx += mmu_btop(pgsize);
1719 1719          }
1720 1720          XPV_ALLOW_MIGRATE();
1721 1721  }
1722 1722  
1723 1723  /* ARGSUSED */
1724 1724  void
1725 1725  hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
1726 1726      struct page **pps, uint_t attr, uint_t flags,
1727 1727      hat_region_cookie_t rcookie)
1728 1728  {
1729 1729          hat_memload_array(hat, addr, len, pps, attr, flags);
1730 1730  }
1731 1731  
1732 1732  /*
1733 1733   * void hat_devload(hat, addr, len, pf, attr, flags)
1734 1734   *      load/lock the given page frame number
1735 1735   *
1736 1736   * Advisory ordering attributes. Apply only to device mappings.
1737 1737   *
1738 1738   * HAT_STRICTORDER: the CPU must issue the references in order, as the
1739 1739   *      programmer specified.  This is the default.
1740 1740   * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds
1741 1741   *      of reordering; store or load with store or load).
1742 1742   * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores
1743 1743   *      to consecutive locations (for example, turn two consecutive byte
1744 1744   *      stores into one halfword store), and it may batch individual loads
1745 1745   *      (for example, turn two consecutive byte loads into one halfword load).
1746 1746   *      This also implies re-ordering.
1747 1747   * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it
1748 1748   *      until another store occurs.  The default is to fetch new data
1749 1749   *      on every load.  This also implies merging.
1750 1750   * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to
1751 1751   *      the device (perhaps with other data) at a later time.  The default is
1752 1752   *      to push the data right away.  This also implies load caching.
1753 1753   *
1754 1754   * Equivalent of hat_memload(), but can be used for device memory where
1755 1755   * there are no page_t's and we support additional flags (write merging, etc).
1756 1756   * Note that we can have large page mappings with this interface.
1757 1757   */
1758 1758  int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK |
1759 1759          HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK |
1760 1760          HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
1761 1761  
1762 1762  void
1763 1763  hat_devload(
1764 1764          hat_t           *hat,
1765 1765          caddr_t         addr,
1766 1766          size_t          len,
1767 1767          pfn_t           pfn,
1768 1768          uint_t          attr,
1769 1769          int             flags)
1770 1770  {
1771 1771          uintptr_t       va = ALIGN2PAGE(addr);
1772 1772          uintptr_t       eva = va + len;
1773 1773          level_t         level;
1774 1774          size_t          pgsize;
1775 1775          page_t          *pp;
1776 1776          int             f;      /* per PTE copy of flags  - maybe modified */
1777 1777          uint_t          a;      /* per PTE copy of attr */
1778 1778  
1779 1779          XPV_DISALLOW_MIGRATE();
1780 1780          ASSERT(IS_PAGEALIGNED(va));
1781 1781          ASSERT(hat == kas.a_hat || eva <= _userlimit);
1782 1782          ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1783 1783          ASSERT((flags & supported_devload_flags) == flags);
1784 1784  
1785 1785          /*
1786 1786           * handle all pages
1787 1787           */
1788 1788          while (va < eva) {
1789 1789  
1790 1790                  /*
1791 1791                   * decide what level mapping to use (ie. pagesize)
1792 1792                   */
1793 1793                  for (level = mmu.max_page_level; ; --level) {
1794 1794                          pgsize = LEVEL_SIZE(level);
1795 1795                          if (level == 0)
1796 1796                                  break;
1797 1797                          if (IS_P2ALIGNED(va, pgsize) &&
1798 1798                              (eva - va) >= pgsize &&
1799 1799                              IS_P2ALIGNED(pfn, mmu_btop(pgsize))) {
1800 1800  #ifdef DEBUG
1801 1801                                  if (level == 2)
1802 1802                                          map1gcnt++;
1803 1803  #endif
1804 1804                                  break;
1805 1805                          }
1806 1806                  }
1807 1807  
1808 1808                  /*
1809 1809                   * If this is just memory then allow caching (this happens
1810 1810                   * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used
1811 1811                   * to override that. If we don't have a page_t then make sure
1812 1812                   * NOCONSIST is set.
1813 1813                   */
1814 1814                  a = attr;
1815 1815                  f = flags;
1816 1816                  if (!pf_is_memory(pfn))
1817 1817                          f |= HAT_LOAD_NOCONSIST;
1818 1818                  else if (!(a & HAT_PLAT_NOCACHE))
1819 1819                          a |= HAT_STORECACHING_OK;
1820 1820  
1821 1821                  if (f & HAT_LOAD_NOCONSIST)
1822 1822                          pp = NULL;
1823 1823                  else
1824 1824                          pp = page_numtopp_nolock(pfn);
1825 1825  
1826 1826                  /*
1827 1827                   * Check to make sure we are really trying to map a valid
1828 1828                   * memory page. The caller wishing to intentionally map
1829 1829                   * free memory pages will have passed the HAT_LOAD_NOCONSIST
1830 1830                   * flag, then pp will be NULL.
1831 1831                   */
1832 1832                  if (pp != NULL) {
1833 1833                          if (PP_ISFREE(pp)) {
1834 1834                                  panic("hat_devload: loading "
1835 1835                                      "a mapping to free page %p", (void *)pp);
1836 1836                          }
1837 1837  
1838 1838                          if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
1839 1839                                  panic("hat_devload: loading a mapping "
1840 1840                                      "to an unlocked page %p",
1841 1841                                      (void *)pp);
1842 1842                          }
1843 1843                  }
1844 1844  
1845 1845                  /*
1846 1846                   * load this page mapping
1847 1847                   */
1848 1848                  ASSERT(!IN_VA_HOLE(va));
1849 1849                  while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) {
1850 1850                          if (level == 0)
1851 1851                                  panic("unexpected hati_load_common() failure");
1852 1852                          --level;
1853 1853                          pgsize = LEVEL_SIZE(level);
1854 1854                  }
1855 1855  
1856 1856                  /*
1857 1857                   * move to next page
1858 1858                   */
1859 1859                  va += pgsize;
1860 1860                  pfn += mmu_btop(pgsize);
1861 1861          }
1862 1862          XPV_ALLOW_MIGRATE();
1863 1863  }
1864 1864  
1865 1865  /*
1866 1866   * void hat_unlock(hat, addr, len)
1867 1867   *      unlock the mappings to a given range of addresses
1868 1868   *
1869 1869   * Locks are tracked by ht_lock_cnt in the htable.
1870 1870   */
1871 1871  void
1872 1872  hat_unlock(hat_t *hat, caddr_t addr, size_t len)
1873 1873  {
1874 1874          uintptr_t       vaddr = (uintptr_t)addr;
1875 1875          uintptr_t       eaddr = vaddr + len;
1876 1876          htable_t        *ht = NULL;
1877 1877  
1878 1878          /*
1879 1879           * kernel entries are always locked, we don't track lock counts
1880 1880           */
1881 1881          ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
1882 1882          ASSERT(IS_PAGEALIGNED(vaddr));
1883 1883          ASSERT(IS_PAGEALIGNED(eaddr));
1884 1884          if (hat == kas.a_hat)
1885 1885                  return;
1886 1886          if (eaddr > _userlimit)
1887 1887                  panic("hat_unlock() address out of range - above _userlimit");
1888 1888  
1889 1889          XPV_DISALLOW_MIGRATE();
1890 1890          ASSERT(AS_LOCK_HELD(hat->hat_as));
1891 1891          while (vaddr < eaddr) {
1892 1892                  (void) htable_walk(hat, &ht, &vaddr, eaddr);
1893 1893                  if (ht == NULL)
1894 1894                          break;
1895 1895  
1896 1896                  ASSERT(!IN_VA_HOLE(vaddr));
1897 1897  
1898 1898                  if (ht->ht_lock_cnt < 1)
1899 1899                          panic("hat_unlock(): lock_cnt < 1, "
1900 1900                              "htable=%p, vaddr=%p\n", (void *)ht, (void *)vaddr);
1901 1901                  HTABLE_LOCK_DEC(ht);
1902 1902  
1903 1903                  vaddr += LEVEL_SIZE(ht->ht_level);
1904 1904          }
1905 1905          if (ht)
1906 1906                  htable_release(ht);
1907 1907          XPV_ALLOW_MIGRATE();
1908 1908  }
1909 1909  
1910 1910  /* ARGSUSED */
1911 1911  void
1912 1912  hat_unlock_region(struct hat *hat, caddr_t addr, size_t len,
1913 1913      hat_region_cookie_t rcookie)
1914 1914  {
1915 1915          panic("No shared region support on x86");
1916 1916  }
1917 1917  
1918 1918  #if !defined(__xpv)
1919 1919  /*
1920 1920   * Cross call service routine to demap a virtual page on
1921 1921   * the current CPU or flush all mappings in TLB.
1922 1922   */
1923 1923  /*ARGSUSED*/
1924 1924  static int
1925 1925  hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
1926 1926  {
1927 1927          hat_t   *hat = (hat_t *)a1;
1928 1928          caddr_t addr = (caddr_t)a2;
1929 1929          size_t len = (size_t)a3;
1930 1930  
1931 1931          /*
1932 1932           * If the target hat isn't the kernel and this CPU isn't operating
1933 1933           * in the target hat, we can ignore the cross call.
1934 1934           */
1935 1935          if (hat != kas.a_hat && hat != CPU->cpu_current_hat)
1936 1936                  return (0);
1937 1937  
1938 1938          /*
1939 1939           * For a normal address, we flush a range of contiguous mappings
1940 1940           */
1941 1941          if ((uintptr_t)addr != DEMAP_ALL_ADDR) {
1942 1942                  for (size_t i = 0; i < len; i += MMU_PAGESIZE)
1943 1943                          mmu_tlbflush_entry(addr + i);
1944 1944                  return (0);
1945 1945          }
1946 1946  
1947 1947          /*
1948 1948           * Otherwise we reload cr3 to effect a complete TLB flush.
1949 1949           *
1950 1950           * A reload of cr3 on a VLP process also means we must also recopy in
1951 1951           * the pte values from the struct hat
1952 1952           */
1953 1953          if (hat->hat_flags & HAT_VLP) {
1954 1954  #if defined(__amd64)
1955 1955                  x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes;
1956 1956  
1957 1957                  VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1958 1958  #elif defined(__i386)
1959 1959                  reload_pae32(hat, CPU);
1960 1960  #endif
1961 1961          }
1962 1962          reload_cr3();
1963 1963          return (0);
1964 1964  }
1965 1965  
1966 1966  /*
1967 1967   * Flush all TLB entries, including global (ie. kernel) ones.
1968 1968   */
1969 1969  static void
1970 1970  flush_all_tlb_entries(void)
1971 1971  {
1972 1972          ulong_t cr4 = getcr4();
1973 1973  
1974 1974          if (cr4 & CR4_PGE) {
1975 1975                  setcr4(cr4 & ~(ulong_t)CR4_PGE);
1976 1976                  setcr4(cr4);
1977 1977  
1978 1978                  /*
1979 1979                   * 32 bit PAE also needs to always reload_cr3()
1980 1980                   */
1981 1981                  if (mmu.max_level == 2)
1982 1982                          reload_cr3();
1983 1983          } else {
1984 1984                  reload_cr3();
1985 1985          }
1986 1986  }
1987 1987  
1988 1988  #define TLB_CPU_HALTED  (01ul)
1989 1989  #define TLB_INVAL_ALL   (02ul)
1990 1990  #define CAS_TLB_INFO(cpu, old, new)     \
1991 1991          atomic_cas_ulong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
1992 1992  
1993 1993  /*
1994 1994   * Record that a CPU is going idle
1995 1995   */
1996 1996  void
1997 1997  tlb_going_idle(void)
1998 1998  {
1999 1999          atomic_or_ulong((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
2000 2000  }
2001 2001  
2002 2002  /*
2003 2003   * Service a delayed TLB flush if coming out of being idle.
2004 2004   * It will be called from cpu idle notification with interrupt disabled.
2005 2005   */
2006 2006  void
2007 2007  tlb_service(void)
2008 2008  {
2009 2009          ulong_t tlb_info;
2010 2010          ulong_t found;
2011 2011  
2012 2012          /*
2013 2013           * We only have to do something if coming out of being idle.
2014 2014           */
2015 2015          tlb_info = CPU->cpu_m.mcpu_tlb_info;
2016 2016          if (tlb_info & TLB_CPU_HALTED) {
2017 2017                  ASSERT(CPU->cpu_current_hat == kas.a_hat);
2018 2018  
2019 2019                  /*
2020 2020                   * Atomic clear and fetch of old state.
2021 2021                   */
2022 2022                  while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) {
2023 2023                          ASSERT(found & TLB_CPU_HALTED);
2024 2024                          tlb_info = found;
2025 2025                          SMT_PAUSE();
2026 2026                  }
2027 2027                  if (tlb_info & TLB_INVAL_ALL)
2028 2028                          flush_all_tlb_entries();
2029 2029          }
2030 2030  }
2031 2031  #endif /* !__xpv */
2032 2032  
2033 2033  /*
2034 2034   * Internal routine to do cross calls to invalidate a range of pages on
2035 2035   * all CPUs using a given hat.
2036 2036   */
2037 2037  void
2038 2038  hat_tlb_inval_range(hat_t *hat, uintptr_t va, size_t len)
2039 2039  {
2040 2040          extern int      flushes_require_xcalls; /* from mp_startup.c */
2041 2041          cpuset_t        justme;
2042 2042          cpuset_t        cpus_to_shootdown;
2043 2043  #ifndef __xpv
2044 2044          cpuset_t        check_cpus;
2045 2045          cpu_t           *cpup;
2046 2046          int             c;
2047 2047  #endif
2048 2048  
2049 2049          /*
2050 2050           * If the hat is being destroyed, there are no more users, so
2051 2051           * demap need not do anything.
2052 2052           */
2053 2053          if (hat->hat_flags & HAT_FREEING)
2054 2054                  return;
2055 2055  
2056 2056          /*
2057 2057           * If demapping from a shared pagetable, we best demap the
2058 2058           * entire set of user TLBs, since we don't know what addresses
2059 2059           * these were shared at.
2060 2060           */
2061 2061          if (hat->hat_flags & HAT_SHARED) {
2062 2062                  hat = kas.a_hat;
2063 2063                  va = DEMAP_ALL_ADDR;
2064 2064          }
2065 2065  
2066 2066          /*
2067 2067           * if not running with multiple CPUs, don't use cross calls
2068 2068           */
2069 2069          if (panicstr || !flushes_require_xcalls) {
2070 2070  #ifdef __xpv
2071 2071                  if (va == DEMAP_ALL_ADDR) {
2072 2072                          xen_flush_tlb();
2073 2073                  } else {
2074 2074                          for (size_t i = 0; i < len; i += MMU_PAGESIZE)
2075 2075                                  xen_flush_va((caddr_t)(va + i));
2076 2076                  }
2077 2077  #else
2078 2078                  (void) hati_demap_func((xc_arg_t)hat,
2079 2079                      (xc_arg_t)va, (xc_arg_t)len);
2080 2080  #endif
2081 2081                  return;
2082 2082          }
2083 2083  
2084 2084  
2085 2085          /*
2086 2086           * Determine CPUs to shootdown. Kernel changes always do all CPUs.
2087 2087           * Otherwise it's just CPUs currently executing in this hat.
2088 2088           */
2089 2089          kpreempt_disable();
2090 2090          CPUSET_ONLY(justme, CPU->cpu_id);
2091 2091          if (hat == kas.a_hat)
2092 2092                  cpus_to_shootdown = khat_cpuset;
2093 2093          else
2094 2094                  cpus_to_shootdown = hat->hat_cpus;
2095 2095  
2096 2096  #ifndef __xpv
2097 2097          /*
2098 2098           * If any CPUs in the set are idle, just request a delayed flush
2099 2099           * and avoid waking them up.
2100 2100           */
2101 2101          check_cpus = cpus_to_shootdown;
2102 2102          for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) {
2103 2103                  ulong_t tlb_info;
2104 2104  
2105 2105                  if (!CPU_IN_SET(check_cpus, c))
2106 2106                          continue;
2107 2107                  CPUSET_DEL(check_cpus, c);
2108 2108                  cpup = cpu[c];
2109 2109                  if (cpup == NULL)
2110 2110                          continue;
2111 2111  
2112 2112                  tlb_info = cpup->cpu_m.mcpu_tlb_info;
2113 2113                  while (tlb_info == TLB_CPU_HALTED) {
2114 2114                          (void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED,
2115 2115                              TLB_CPU_HALTED | TLB_INVAL_ALL);
2116 2116                          SMT_PAUSE();
2117 2117                          tlb_info = cpup->cpu_m.mcpu_tlb_info;
2118 2118                  }
2119 2119                  if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) {
2120 2120                          HATSTAT_INC(hs_tlb_inval_delayed);
2121 2121                          CPUSET_DEL(cpus_to_shootdown, c);
2122 2122                  }
2123 2123          }
2124 2124  #endif
2125 2125  
2126 2126          if (CPUSET_ISNULL(cpus_to_shootdown) ||
2127 2127              CPUSET_ISEQUAL(cpus_to_shootdown, justme)) {
2128 2128  
2129 2129  #ifdef __xpv
2130 2130                  if (va == DEMAP_ALL_ADDR) {
2131 2131                          xen_flush_tlb();
2132 2132                  } else {
2133 2133                          for (size_t i = 0; i < len; i += MMU_PAGESIZE)
2134 2134                                  xen_flush_va((caddr_t)(va + i));
2135 2135                  }
2136 2136  #else
2137 2137                  (void) hati_demap_func((xc_arg_t)hat,
2138 2138                      (xc_arg_t)va, (xc_arg_t)len);
2139 2139  #endif
2140 2140  
2141 2141          } else {
2142 2142  
2143 2143                  CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id);
2144 2144  #ifdef __xpv
2145 2145                  if (va == DEMAP_ALL_ADDR) {
2146 2146                          xen_gflush_tlb(cpus_to_shootdown);
2147 2147                  } else {
2148 2148                          for (size_t i = 0; i < len; i += MMU_PAGESIZE) {
2149 2149                                  xen_gflush_va((caddr_t)(va + i),
2150 2150                                      cpus_to_shootdown);
2151 2151                          }
2152 2152                  }
2153 2153  #else
2154 2154                  xc_call((xc_arg_t)hat, (xc_arg_t)va, (xc_arg_t)len,
2155 2155                      CPUSET2BV(cpus_to_shootdown), hati_demap_func);
2156 2156  #endif
2157 2157  
2158 2158          }
2159 2159          kpreempt_enable();
2160 2160  }
2161 2161  
2162 2162  void
2163 2163  hat_tlb_inval(hat_t *hat, uintptr_t va)
2164 2164  {
2165 2165          hat_tlb_inval_range(hat, va, MMU_PAGESIZE);
2166 2166  }
2167 2167  
2168 2168  /*
2169 2169   * Interior routine for HAT_UNLOADs from hat_unload_callback(),
2170 2170   * hat_kmap_unload() OR from hat_steal() code.  This routine doesn't
2171 2171   * handle releasing of the htables.
2172 2172   */
2173 2173  void
2174 2174  hat_pte_unmap(
2175 2175          htable_t        *ht,
2176 2176          uint_t          entry,
2177 2177          uint_t          flags,
2178 2178          x86pte_t        old_pte,
2179 2179          void            *pte_ptr,
2180 2180          boolean_t       tlb)
2181 2181  {
2182 2182          hat_t           *hat = ht->ht_hat;
2183 2183          hment_t         *hm = NULL;
2184 2184          page_t          *pp = NULL;
2185 2185          level_t         l = ht->ht_level;
2186 2186          pfn_t           pfn;
2187 2187  
2188 2188          /*
2189 2189           * We always track the locking counts, even if nothing is unmapped
2190 2190           */
2191 2191          if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) {
2192 2192                  ASSERT(ht->ht_lock_cnt > 0);
2193 2193                  HTABLE_LOCK_DEC(ht);
2194 2194          }
2195 2195  
2196 2196          /*
2197 2197           * Figure out which page's mapping list lock to acquire using the PFN
2198 2198           * passed in "old" PTE. We then attempt to invalidate the PTE.
2199 2199           * If another thread, probably a hat_pageunload, has asynchronously
2200 2200           * unmapped/remapped this address we'll loop here.
2201 2201           */
2202 2202          ASSERT(ht->ht_busy > 0);
2203 2203          while (PTE_ISVALID(old_pte)) {
2204 2204                  pfn = PTE2PFN(old_pte, l);
2205 2205                  if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) {
2206 2206                          pp = NULL;
2207 2207                  } else {
2208 2208  #ifdef __xpv
2209 2209                          if (pfn == PFN_INVALID)
2210 2210                                  panic("Invalid PFN, but not PT_NOCONSIST");
2211 2211  #endif
2212 2212                          pp = page_numtopp_nolock(pfn);
2213 2213                          if (pp == NULL) {
2214 2214                                  panic("no page_t, not NOCONSIST: old_pte="
2215 2215                                      FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx",
2216 2216                                      old_pte, (uintptr_t)ht, entry,
2217 2217                                      (uintptr_t)pte_ptr);
2218 2218                          }
2219 2219                          x86_hm_enter(pp);
2220 2220                  }
2221 2221  
2222 2222                  old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr, tlb);
2223 2223  
2224 2224                  /*
2225 2225                   * If the page hadn't changed we've unmapped it and can proceed
2226 2226                   */
2227 2227                  if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn)
2228 2228                          break;
2229 2229  
2230 2230                  /*
2231 2231                   * Otherwise, we'll have to retry with the current old_pte.
2232 2232                   * Drop the hment lock, since the pfn may have changed.
2233 2233                   */
2234 2234                  if (pp != NULL) {
2235 2235                          x86_hm_exit(pp);
2236 2236                          pp = NULL;
2237 2237                  } else {
2238 2238                          ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
2239 2239                  }
2240 2240          }
2241 2241  
2242 2242          /*
2243 2243           * If the old mapping wasn't valid, there's nothing more to do
2244 2244           */
2245 2245          if (!PTE_ISVALID(old_pte)) {
2246 2246                  if (pp != NULL)
2247 2247                          x86_hm_exit(pp);
2248 2248                  return;
2249 2249          }
2250 2250  
2251 2251          /*
2252 2252           * Take care of syncing any MOD/REF bits and removing the hment.
2253 2253           */
2254 2254          if (pp != NULL) {
2255 2255                  if (!(flags & HAT_UNLOAD_NOSYNC))
2256 2256                          hati_sync_pte_to_page(pp, old_pte, l);
2257 2257                  hm = hment_remove(pp, ht, entry);
2258 2258                  x86_hm_exit(pp);
2259 2259                  if (hm != NULL)
2260 2260                          hment_free(hm);
2261 2261          }
2262 2262  
2263 2263          /*
2264 2264           * Handle book keeping in the htable and hat
2265 2265           */
2266 2266          ASSERT(ht->ht_valid_cnt > 0);
2267 2267          HTABLE_DEC(ht->ht_valid_cnt);
2268 2268          PGCNT_DEC(hat, l);
2269 2269  }
2270 2270  
2271 2271  /*
2272 2272   * very cheap unload implementation to special case some kernel addresses
2273 2273   */
2274 2274  static void
2275 2275  hat_kmap_unload(caddr_t addr, size_t len, uint_t flags)
2276 2276  {
2277 2277          uintptr_t       va = (uintptr_t)addr;
2278 2278          uintptr_t       eva = va + len;
2279 2279          pgcnt_t         pg_index;
2280 2280          htable_t        *ht;
2281 2281          uint_t          entry;
2282 2282          x86pte_t        *pte_ptr;
2283 2283          x86pte_t        old_pte;
2284 2284  
2285 2285          for (; va < eva; va += MMU_PAGESIZE) {
2286 2286                  /*
2287 2287                   * Get the PTE
2288 2288                   */
2289 2289                  pg_index = mmu_btop(va - mmu.kmap_addr);
2290 2290                  pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index);
2291 2291                  old_pte = GET_PTE(pte_ptr);
2292 2292  
2293 2293                  /*
2294 2294                   * get the htable / entry
2295 2295                   */
2296 2296                  ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr)
2297 2297                      >> LEVEL_SHIFT(1)];
2298 2298                  entry = htable_va2entry(va, ht);
2299 2299  
2300 2300                  /*
2301 2301                   * use mostly common code to unmap it.
2302 2302                   */
2303 2303                  hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr, B_TRUE);
2304 2304          }
2305 2305  }
2306 2306  
2307 2307  
2308 2308  /*
2309 2309   * unload a range of virtual address space (no callback)
2310 2310   */
2311 2311  void
2312 2312  hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2313 2313  {
2314 2314          uintptr_t va = (uintptr_t)addr;
2315 2315  
2316 2316          XPV_DISALLOW_MIGRATE();
2317 2317          ASSERT(hat == kas.a_hat || va + len <= _userlimit);
2318 2318  
2319 2319          /*
2320 2320           * special case for performance.
2321 2321           */
2322 2322          if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
2323 2323                  ASSERT(hat == kas.a_hat);
2324 2324                  hat_kmap_unload(addr, len, flags);
2325 2325          } else {
2326 2326                  hat_unload_callback(hat, addr, len, flags, NULL);
2327 2327          }
2328 2328          XPV_ALLOW_MIGRATE();
2329 2329  }
2330 2330  
2331 2331  /*
2332 2332   * Do the callbacks for ranges being unloaded.
2333 2333   */
2334 2334  typedef struct range_info {
2335 2335          uintptr_t       rng_va;
2336 2336          ulong_t         rng_cnt;
2337 2337          level_t         rng_level;
2338 2338  } range_info_t;
2339 2339  
2340 2340  /*
2341 2341   * Invalidate the TLB, and perform the callback to the upper level VM system,
2342 2342   * for the specified ranges of contiguous pages.
2343 2343   */
2344 2344  static void
2345 2345  handle_ranges(hat_t *hat, hat_callback_t *cb, uint_t cnt, range_info_t *range)
2346 2346  {
2347 2347          while (cnt > 0) {
2348 2348                  size_t len;
2349 2349  
2350 2350                  --cnt;
2351 2351                  len = range[cnt].rng_cnt << LEVEL_SHIFT(range[cnt].rng_level);
2352 2352                  hat_tlb_inval_range(hat, (uintptr_t)range[cnt].rng_va, len);
2353 2353  
2354 2354                  if (cb != NULL) {
2355 2355                          cb->hcb_start_addr = (caddr_t)range[cnt].rng_va;
2356 2356                          cb->hcb_end_addr = cb->hcb_start_addr;
2357 2357                          cb->hcb_end_addr += len;
2358 2358                          cb->hcb_function(cb);
2359 2359                  }
2360 2360          }
2361 2361  }
2362 2362  
2363 2363  /*
2364 2364   * Unload a given range of addresses (has optional callback)
2365 2365   *
2366 2366   * Flags:
2367 2367   * define       HAT_UNLOAD              0x00
2368 2368   * define       HAT_UNLOAD_NOSYNC       0x02
2369 2369   * define       HAT_UNLOAD_UNLOCK       0x04
2370 2370   * define       HAT_UNLOAD_OTHER        0x08 - not used
2371 2371   * define       HAT_UNLOAD_UNMAP        0x10 - same as HAT_UNLOAD
2372 2372   */
2373 2373  #define MAX_UNLOAD_CNT (8)
2374 2374  void
2375 2375  hat_unload_callback(
2376 2376          hat_t           *hat,
2377 2377          caddr_t         addr,
2378 2378          size_t          len,
2379 2379          uint_t          flags,
2380 2380          hat_callback_t  *cb)
2381 2381  {
2382 2382          uintptr_t       vaddr = (uintptr_t)addr;
2383 2383          uintptr_t       eaddr = vaddr + len;
2384 2384          htable_t        *ht = NULL;
2385 2385          uint_t          entry;
2386 2386          uintptr_t       contig_va = (uintptr_t)-1L;
2387 2387          range_info_t    r[MAX_UNLOAD_CNT];
2388 2388          uint_t          r_cnt = 0;
2389 2389          x86pte_t        old_pte;
2390 2390  
2391 2391          XPV_DISALLOW_MIGRATE();
2392 2392          ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2393 2393          ASSERT(IS_PAGEALIGNED(vaddr));
2394 2394          ASSERT(IS_PAGEALIGNED(eaddr));
2395 2395  
2396 2396          /*
2397 2397           * Special case a single page being unloaded for speed. This happens
2398 2398           * quite frequently, COW faults after a fork() for example.
2399 2399           */
2400 2400          if (cb == NULL && len == MMU_PAGESIZE) {
2401 2401                  ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0);
2402 2402                  if (ht != NULL) {
2403 2403                          if (PTE_ISVALID(old_pte)) {
2404 2404                                  hat_pte_unmap(ht, entry, flags, old_pte,
2405 2405                                      NULL, B_TRUE);
2406 2406                          }
2407 2407                          htable_release(ht);
2408 2408                  }
2409 2409                  XPV_ALLOW_MIGRATE();
2410 2410                  return;
2411 2411          }
2412 2412  
2413 2413          while (vaddr < eaddr) {
2414 2414                  old_pte = htable_walk(hat, &ht, &vaddr, eaddr);
2415 2415                  if (ht == NULL)
2416 2416                          break;
2417 2417  
2418 2418                  ASSERT(!IN_VA_HOLE(vaddr));
2419 2419  
2420 2420                  if (vaddr < (uintptr_t)addr)
2421 2421                          panic("hat_unload_callback(): unmap inside large page");
2422 2422  
2423 2423                  /*
2424 2424                   * We'll do the call backs for contiguous ranges
2425 2425                   */
2426 2426                  if (vaddr != contig_va ||
2427 2427                      (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) {
2428 2428                          if (r_cnt == MAX_UNLOAD_CNT) {
2429 2429                                  handle_ranges(hat, cb, r_cnt, r);
2430 2430                                  r_cnt = 0;
2431 2431                          }
2432 2432                          r[r_cnt].rng_va = vaddr;
2433 2433                          r[r_cnt].rng_cnt = 0;
2434 2434                          r[r_cnt].rng_level = ht->ht_level;
2435 2435                          ++r_cnt;
2436 2436                  }
2437 2437  
2438 2438                  /*
2439 2439                   * Unload one mapping (for a single page) from the page tables.
2440 2440                   * Note that we do not remove the mapping from the TLB yet,
2441 2441                   * as indicated by the tlb=FALSE argument to hat_pte_unmap().
2442 2442                   * handle_ranges() will clear the TLB entries with one call to
2443 2443                   * hat_tlb_inval_range() per contiguous range.  This is
2444 2444                   * safe because the page can not be reused until the
2445 2445                   * callback is made (or we return).
2446 2446                   */
2447 2447                  entry = htable_va2entry(vaddr, ht);
2448 2448                  hat_pte_unmap(ht, entry, flags, old_pte, NULL, B_FALSE);
2449 2449                  ASSERT(ht->ht_level <= mmu.max_page_level);
2450 2450                  vaddr += LEVEL_SIZE(ht->ht_level);
2451 2451                  contig_va = vaddr;
2452 2452                  ++r[r_cnt - 1].rng_cnt;
2453 2453          }
2454 2454          if (ht)
2455 2455                  htable_release(ht);
2456 2456  
2457 2457          /*
2458 2458           * handle last range for callbacks
2459 2459           */
2460 2460          if (r_cnt > 0)
2461 2461                  handle_ranges(hat, cb, r_cnt, r);
2462 2462          XPV_ALLOW_MIGRATE();
2463 2463  }
2464 2464  
2465 2465  /*
2466 2466   * Invalidate a virtual address translation on a slave CPU during
2467 2467   * panic() dumps.
2468 2468   */
2469 2469  void
2470 2470  hat_flush_range(hat_t *hat, caddr_t va, size_t size)
2471 2471  {
2472 2472          ssize_t sz;
2473 2473          caddr_t endva = va + size;
2474 2474  
2475 2475          while (va < endva) {
2476 2476                  sz = hat_getpagesize(hat, va);
2477 2477                  if (sz < 0) {
2478 2478  #ifdef __xpv
2479 2479                          xen_flush_tlb();
2480 2480  #else
2481 2481                          flush_all_tlb_entries();
2482 2482  #endif
2483 2483                          break;
2484 2484                  }
2485 2485  #ifdef __xpv
2486 2486                  xen_flush_va(va);
2487 2487  #else
2488 2488                  mmu_tlbflush_entry(va);
2489 2489  #endif
2490 2490                  va += sz;
2491 2491          }
2492 2492  }
2493 2493  
2494 2494  /*
2495 2495   * synchronize mapping with software data structures
2496 2496   *
2497 2497   * This interface is currently only used by the working set monitor
2498 2498   * driver.
2499 2499   */
2500 2500  /*ARGSUSED*/
2501 2501  void
2502 2502  hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2503 2503  {
2504 2504          uintptr_t       vaddr = (uintptr_t)addr;
2505 2505          uintptr_t       eaddr = vaddr + len;
2506 2506          htable_t        *ht = NULL;
2507 2507          uint_t          entry;
2508 2508          x86pte_t        pte;
2509 2509          x86pte_t        save_pte;
2510 2510          x86pte_t        new;
2511 2511          page_t          *pp;
2512 2512  
2513 2513          ASSERT(!IN_VA_HOLE(vaddr));
2514 2514          ASSERT(IS_PAGEALIGNED(vaddr));
2515 2515          ASSERT(IS_PAGEALIGNED(eaddr));
2516 2516          ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2517 2517  
2518 2518          XPV_DISALLOW_MIGRATE();
2519 2519          for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2520 2520  try_again:
2521 2521                  pte = htable_walk(hat, &ht, &vaddr, eaddr);
2522 2522                  if (ht == NULL)
2523 2523                          break;
2524 2524                  entry = htable_va2entry(vaddr, ht);
2525 2525  
2526 2526                  if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
2527 2527                      PTE_GET(pte, PT_REF | PT_MOD) == 0)
2528 2528                          continue;
2529 2529  
2530 2530                  /*
2531 2531                   * We need to acquire the mapping list lock to protect
2532 2532                   * against hat_pageunload(), hat_unload(), etc.
2533 2533                   */
2534 2534                  pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level));
2535 2535                  if (pp == NULL)
2536 2536                          break;
2537 2537                  x86_hm_enter(pp);
2538 2538                  save_pte = pte;
2539 2539                  pte = x86pte_get(ht, entry);
2540 2540                  if (pte != save_pte) {
2541 2541                          x86_hm_exit(pp);
2542 2542                          goto try_again;
2543 2543                  }
2544 2544                  if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
2545 2545                      PTE_GET(pte, PT_REF | PT_MOD) == 0) {
2546 2546                          x86_hm_exit(pp);
2547 2547                          continue;
2548 2548                  }
2549 2549  
2550 2550                  /*
2551 2551                   * Need to clear ref or mod bits. We may compete with
2552 2552                   * hardware updating the R/M bits and have to try again.
2553 2553                   */
2554 2554                  if (flags == HAT_SYNC_ZERORM) {
2555 2555                          new = pte;
2556 2556                          PTE_CLR(new, PT_REF | PT_MOD);
2557 2557                          pte = hati_update_pte(ht, entry, pte, new);
2558 2558                          if (pte != 0) {
2559 2559                                  x86_hm_exit(pp);
2560 2560                                  goto try_again;
2561 2561                          }
2562 2562                  } else {
2563 2563                          /*
2564 2564                           * sync the PTE to the page_t
2565 2565                           */
2566 2566                          hati_sync_pte_to_page(pp, save_pte, ht->ht_level);
2567 2567                  }
2568 2568                  x86_hm_exit(pp);
2569 2569          }
2570 2570          if (ht)
2571 2571                  htable_release(ht);
2572 2572          XPV_ALLOW_MIGRATE();
2573 2573  }
2574 2574  
2575 2575  /*
2576 2576   * void hat_map(hat, addr, len, flags)
2577 2577   */
2578 2578  /*ARGSUSED*/
2579 2579  void
2580 2580  hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2581 2581  {
2582 2582          /* does nothing */
2583 2583  }
2584 2584  
2585 2585  /*
2586 2586   * uint_t hat_getattr(hat, addr, *attr)
2587 2587   *      returns attr for <hat,addr> in *attr.  returns 0 if there was a
2588 2588   *      mapping and *attr is valid, nonzero if there was no mapping and
2589 2589   *      *attr is not valid.
2590 2590   */
2591 2591  uint_t
2592 2592  hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr)
2593 2593  {
2594 2594          uintptr_t       vaddr = ALIGN2PAGE(addr);
2595 2595          htable_t        *ht = NULL;
2596 2596          x86pte_t        pte;
2597 2597  
2598 2598          ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2599 2599  
2600 2600          if (IN_VA_HOLE(vaddr))
2601 2601                  return ((uint_t)-1);
2602 2602  
2603 2603          ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level);
2604 2604          if (ht == NULL)
2605 2605                  return ((uint_t)-1);
2606 2606  
2607 2607          if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) {
2608 2608                  htable_release(ht);
2609 2609                  return ((uint_t)-1);
2610 2610          }
2611 2611  
2612 2612          *attr = PROT_READ;
2613 2613          if (PTE_GET(pte, PT_WRITABLE))
2614 2614                  *attr |= PROT_WRITE;
2615 2615          if (PTE_GET(pte, PT_USER))
2616 2616                  *attr |= PROT_USER;
2617 2617          if (!PTE_GET(pte, mmu.pt_nx))
2618 2618                  *attr |= PROT_EXEC;
2619 2619          if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
2620 2620                  *attr |= HAT_NOSYNC;
2621 2621          htable_release(ht);
2622 2622          return (0);
2623 2623  }
2624 2624  
2625 2625  /*
2626 2626   * hat_updateattr() applies the given attribute change to an existing mapping
2627 2627   */
2628 2628  #define HAT_LOAD_ATTR           1
2629 2629  #define HAT_SET_ATTR            2
2630 2630  #define HAT_CLR_ATTR            3
2631 2631  
2632 2632  static void
2633 2633  hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what)
2634 2634  {
2635 2635          uintptr_t       vaddr = (uintptr_t)addr;
2636 2636          uintptr_t       eaddr = (uintptr_t)addr + len;
2637 2637          htable_t        *ht = NULL;
2638 2638          uint_t          entry;
2639 2639          x86pte_t        oldpte, newpte;
2640 2640          page_t          *pp;
2641 2641  
2642 2642          XPV_DISALLOW_MIGRATE();
2643 2643          ASSERT(IS_PAGEALIGNED(vaddr));
2644 2644          ASSERT(IS_PAGEALIGNED(eaddr));
2645 2645          ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
2646 2646          for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2647 2647  try_again:
2648 2648                  oldpte = htable_walk(hat, &ht, &vaddr, eaddr);
2649 2649                  if (ht == NULL)
2650 2650                          break;
2651 2651                  if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST)
2652 2652                          continue;
2653 2653  
2654 2654                  pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level));
2655 2655                  if (pp == NULL)
2656 2656                          continue;
2657 2657                  x86_hm_enter(pp);
2658 2658  
2659 2659                  newpte = oldpte;
2660 2660                  /*
2661 2661                   * We found a page table entry in the desired range,
2662 2662                   * figure out the new attributes.
2663 2663                   */
2664 2664                  if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) {
2665 2665                          if ((attr & PROT_WRITE) &&
2666 2666                              !PTE_GET(oldpte, PT_WRITABLE))
2667 2667                                  newpte |= PT_WRITABLE;
2668 2668  
2669 2669                          if ((attr & HAT_NOSYNC) &&
2670 2670                              PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC)
2671 2671                                  newpte |= PT_NOSYNC;
2672 2672  
2673 2673                          if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx))
2674 2674                                  newpte &= ~mmu.pt_nx;
2675 2675                  }
2676 2676  
2677 2677                  if (what == HAT_LOAD_ATTR) {
2678 2678                          if (!(attr & PROT_WRITE) &&
2679 2679                              PTE_GET(oldpte, PT_WRITABLE))
2680 2680                                  newpte &= ~PT_WRITABLE;
2681 2681  
2682 2682                          if (!(attr & HAT_NOSYNC) &&
2683 2683                              PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
2684 2684                                  newpte &= ~PT_SOFTWARE;
2685 2685  
2686 2686                          if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
2687 2687                                  newpte |= mmu.pt_nx;
2688 2688                  }
2689 2689  
2690 2690                  if (what == HAT_CLR_ATTR) {
2691 2691                          if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE))
2692 2692                                  newpte &= ~PT_WRITABLE;
2693 2693  
2694 2694                          if ((attr & HAT_NOSYNC) &&
2695 2695                              PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
2696 2696                                  newpte &= ~PT_SOFTWARE;
2697 2697  
2698 2698                          if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
2699 2699                                  newpte |= mmu.pt_nx;
2700 2700                  }
2701 2701  
2702 2702                  /*
2703 2703                   * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set.
2704 2704                   * x86pte_set() depends on this.
2705 2705                   */
2706 2706                  if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC)
2707 2707                          newpte |= PT_REF | PT_MOD;
2708 2708  
2709 2709                  /*
2710 2710                   * what about PROT_READ or others? this code only handles:
2711 2711                   * EXEC, WRITE, NOSYNC
2712 2712                   */
2713 2713  
2714 2714                  /*
2715 2715                   * If new PTE really changed, update the table.
2716 2716                   */
2717 2717                  if (newpte != oldpte) {
2718 2718                          entry = htable_va2entry(vaddr, ht);
2719 2719                          oldpte = hati_update_pte(ht, entry, oldpte, newpte);
2720 2720                          if (oldpte != 0) {
2721 2721                                  x86_hm_exit(pp);
2722 2722                                  goto try_again;
2723 2723                          }
2724 2724                  }
2725 2725                  x86_hm_exit(pp);
2726 2726          }
2727 2727          if (ht)
2728 2728                  htable_release(ht);
2729 2729          XPV_ALLOW_MIGRATE();
2730 2730  }
2731 2731  
2732 2732  /*
2733 2733   * Various wrappers for hat_updateattr()
2734 2734   */
2735 2735  void
2736 2736  hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2737 2737  {
2738 2738          ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2739 2739          hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR);
2740 2740  }
2741 2741  
2742 2742  void
2743 2743  hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2744 2744  {
2745 2745          ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2746 2746          hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR);
2747 2747  }
2748 2748  
2749 2749  void
2750 2750  hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2751 2751  {
2752 2752          ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2753 2753          hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR);
2754 2754  }
2755 2755  
2756 2756  void
2757 2757  hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot)
2758 2758  {
2759 2759          ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2760 2760          hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR);
2761 2761  }
2762 2762  
2763 2763  /*
2764 2764   * size_t hat_getpagesize(hat, addr)
2765 2765   *      returns pagesize in bytes for <hat, addr>. returns -1 of there is
2766 2766   *      no mapping. This is an advisory call.
2767 2767   */
2768 2768  ssize_t
2769 2769  hat_getpagesize(hat_t *hat, caddr_t addr)
2770 2770  {
2771 2771          uintptr_t       vaddr = ALIGN2PAGE(addr);
2772 2772          htable_t        *ht;
2773 2773          size_t          pagesize;
2774 2774  
2775 2775          ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2776 2776          if (IN_VA_HOLE(vaddr))
2777 2777                  return (-1);
2778 2778          ht = htable_getpage(hat, vaddr, NULL);
2779 2779          if (ht == NULL)
2780 2780                  return (-1);
2781 2781          pagesize = LEVEL_SIZE(ht->ht_level);
2782 2782          htable_release(ht);
2783 2783          return (pagesize);
2784 2784  }
2785 2785  
2786 2786  
2787 2787  
2788 2788  /*
2789 2789   * pfn_t hat_getpfnum(hat, addr)
2790 2790   *      returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
2791 2791   */
2792 2792  pfn_t
2793 2793  hat_getpfnum(hat_t *hat, caddr_t addr)
2794 2794  {
2795 2795          uintptr_t       vaddr = ALIGN2PAGE(addr);
2796 2796          htable_t        *ht;
2797 2797          uint_t          entry;
2798 2798          pfn_t           pfn = PFN_INVALID;
2799 2799  
2800 2800          ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2801 2801          if (khat_running == 0)
2802 2802                  return (PFN_INVALID);
2803 2803  
2804 2804          if (IN_VA_HOLE(vaddr))
2805 2805                  return (PFN_INVALID);
2806 2806  
2807 2807          XPV_DISALLOW_MIGRATE();
2808 2808          /*
2809 2809           * A very common use of hat_getpfnum() is from the DDI for kernel pages.
2810 2810           * Use the kmap_ptes (which also covers the 32 bit heap) to speed
2811 2811           * this up.
2812 2812           */
2813 2813          if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2814 2814                  x86pte_t pte;
2815 2815                  pgcnt_t pg_index;
2816 2816  
2817 2817                  pg_index = mmu_btop(vaddr - mmu.kmap_addr);
2818 2818                  pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index));
2819 2819                  if (PTE_ISVALID(pte))
2820 2820                          /*LINTED [use of constant 0 causes a lint warning] */
2821 2821                          pfn = PTE2PFN(pte, 0);
2822 2822                  XPV_ALLOW_MIGRATE();
2823 2823                  return (pfn);
2824 2824          }
2825 2825  
2826 2826          ht = htable_getpage(hat, vaddr, &entry);
2827 2827          if (ht == NULL) {
2828 2828                  XPV_ALLOW_MIGRATE();
2829 2829                  return (PFN_INVALID);
2830 2830          }
2831 2831          ASSERT(vaddr >= ht->ht_vaddr);
2832 2832          ASSERT(vaddr <= HTABLE_LAST_PAGE(ht));
2833 2833          pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level);
2834 2834          if (ht->ht_level > 0)
2835 2835                  pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level));
2836 2836          htable_release(ht);
2837 2837          XPV_ALLOW_MIGRATE();
2838 2838          return (pfn);
2839 2839  }
2840 2840  
2841 2841  /*
2842 2842   * int hat_probe(hat, addr)
2843 2843   *      return 0 if no valid mapping is present.  Faster version
2844 2844   *      of hat_getattr in certain architectures.
2845 2845   */
2846 2846  int
2847 2847  hat_probe(hat_t *hat, caddr_t addr)
2848 2848  {
2849 2849          uintptr_t       vaddr = ALIGN2PAGE(addr);
2850 2850          uint_t          entry;
2851 2851          htable_t        *ht;
2852 2852          pgcnt_t         pg_off;
2853 2853  
2854 2854          ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2855 2855          ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
2856 2856          if (IN_VA_HOLE(vaddr))
2857 2857                  return (0);
2858 2858  
2859 2859          /*
2860 2860           * Most common use of hat_probe is from segmap. We special case it
2861 2861           * for performance.
2862 2862           */
2863 2863          if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2864 2864                  pg_off = mmu_btop(vaddr - mmu.kmap_addr);
2865 2865                  if (mmu.pae_hat)
2866 2866                          return (PTE_ISVALID(mmu.kmap_ptes[pg_off]));
2867 2867                  else
2868 2868                          return (PTE_ISVALID(
2869 2869                              ((x86pte32_t *)mmu.kmap_ptes)[pg_off]));
2870 2870          }
2871 2871  
2872 2872          ht = htable_getpage(hat, vaddr, &entry);
2873 2873          htable_release(ht);
2874 2874          return (ht != NULL);
2875 2875  }
2876 2876  
2877 2877  /*
2878 2878   * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM.
2879 2879   */
2880 2880  static int
2881 2881  is_it_dism(hat_t *hat, caddr_t va)
2882 2882  {
2883 2883          struct seg *seg;
2884 2884          struct shm_data *shmd;
2885 2885          struct spt_data *sptd;
2886 2886  
2887 2887          seg = as_findseg(hat->hat_as, va, 0);
2888 2888          ASSERT(seg != NULL);
2889 2889          ASSERT(seg->s_base <= va);
2890 2890          shmd = (struct shm_data *)seg->s_data;
2891 2891          ASSERT(shmd != NULL);
2892 2892          sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2893 2893          ASSERT(sptd != NULL);
2894 2894          if (sptd->spt_flags & SHM_PAGEABLE)
2895 2895                  return (1);
2896 2896          return (0);
2897 2897  }
2898 2898  
2899 2899  /*
2900 2900   * Simple implementation of ISM. hat_share() is similar to hat_memload_array(),
2901 2901   * except that we use the ism_hat's existing mappings to determine the pages
2902 2902   * and protections to use for this hat. If we find a full properly aligned
2903 2903   * and sized pagetable, we will attempt to share the pagetable itself.
2904 2904   */
2905 2905  /*ARGSUSED*/
2906 2906  int
2907 2907  hat_share(
2908 2908          hat_t           *hat,
2909 2909          caddr_t         addr,
2910 2910          hat_t           *ism_hat,
2911 2911          caddr_t         src_addr,
2912 2912          size_t          len,    /* almost useless value, see below.. */
2913 2913          uint_t          ismszc)
2914 2914  {
2915 2915          uintptr_t       vaddr_start = (uintptr_t)addr;
2916 2916          uintptr_t       vaddr;
2917 2917          uintptr_t       eaddr = vaddr_start + len;
2918 2918          uintptr_t       ism_addr_start = (uintptr_t)src_addr;
2919 2919          uintptr_t       ism_addr = ism_addr_start;
2920 2920          uintptr_t       e_ism_addr = ism_addr + len;
2921 2921          htable_t        *ism_ht = NULL;
2922 2922          htable_t        *ht;
2923 2923          x86pte_t        pte;
2924 2924          page_t          *pp;
2925 2925          pfn_t           pfn;
2926 2926          level_t         l;
2927 2927          pgcnt_t         pgcnt;
2928 2928          uint_t          prot;
2929 2929          int             is_dism;
2930 2930          int             flags;
2931 2931  
2932 2932          /*
2933 2933           * We might be asked to share an empty DISM hat by as_dup()
2934 2934           */
2935 2935          ASSERT(hat != kas.a_hat);
2936 2936          ASSERT(eaddr <= _userlimit);
2937 2937          if (!(ism_hat->hat_flags & HAT_SHARED)) {
2938 2938                  ASSERT(hat_get_mapped_size(ism_hat) == 0);
2939 2939                  return (0);
2940 2940          }
2941 2941          XPV_DISALLOW_MIGRATE();
2942 2942  
2943 2943          /*
2944 2944           * The SPT segment driver often passes us a size larger than there are
2945 2945           * valid mappings. That's because it rounds the segment size up to a
2946 2946           * large pagesize, even if the actual memory mapped by ism_hat is less.
2947 2947           */
2948 2948          ASSERT(IS_PAGEALIGNED(vaddr_start));
2949 2949          ASSERT(IS_PAGEALIGNED(ism_addr_start));
2950 2950          ASSERT(ism_hat->hat_flags & HAT_SHARED);
2951 2951          is_dism = is_it_dism(hat, addr);
2952 2952          while (ism_addr < e_ism_addr) {
2953 2953                  /*
2954 2954                   * use htable_walk to get the next valid ISM mapping
2955 2955                   */
2956 2956                  pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr);
2957 2957                  if (ism_ht == NULL)
2958 2958                          break;
2959 2959  
2960 2960                  /*
2961 2961                   * First check to see if we already share the page table.
2962 2962                   */
2963 2963                  l = ism_ht->ht_level;
2964 2964                  vaddr = vaddr_start + (ism_addr - ism_addr_start);
2965 2965                  ht = htable_lookup(hat, vaddr, l);
2966 2966                  if (ht != NULL) {
2967 2967                          if (ht->ht_flags & HTABLE_SHARED_PFN)
2968 2968                                  goto shared;
2969 2969                          htable_release(ht);
2970 2970                          goto not_shared;
2971 2971                  }
2972 2972  
2973 2973                  /*
2974 2974                   * Can't ever share top table.
2975 2975                   */
2976 2976                  if (l == mmu.max_level)
2977 2977                          goto not_shared;
2978 2978  
2979 2979                  /*
2980 2980                   * Avoid level mismatches later due to DISM faults.
2981 2981                   */
2982 2982                  if (is_dism && l > 0)
2983 2983                          goto not_shared;
2984 2984  
2985 2985                  /*
2986 2986                   * addresses and lengths must align
2987 2987                   * table must be fully populated
2988 2988                   * no lower level page tables
2989 2989                   */
2990 2990                  if (ism_addr != ism_ht->ht_vaddr ||
2991 2991                      (vaddr & LEVEL_OFFSET(l + 1)) != 0)
2992 2992                          goto not_shared;
2993 2993  
2994 2994                  /*
2995 2995                   * The range of address space must cover a full table.
2996 2996                   */
2997 2997                  if (e_ism_addr - ism_addr < LEVEL_SIZE(l + 1))
2998 2998                          goto not_shared;
2999 2999  
3000 3000                  /*
3001 3001                   * All entries in the ISM page table must be leaf PTEs.
3002 3002                   */
3003 3003                  if (l > 0) {
3004 3004                          int e;
3005 3005  
3006 3006                          /*
3007 3007                           * We know the 0th is from htable_walk() above.
3008 3008                           */
3009 3009                          for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) {
3010 3010                                  x86pte_t pte;
3011 3011                                  pte = x86pte_get(ism_ht, e);
3012 3012                                  if (!PTE_ISPAGE(pte, l))
3013 3013                                          goto not_shared;
3014 3014                          }
3015 3015                  }
3016 3016  
3017 3017                  /*
3018 3018                   * share the page table
3019 3019                   */
3020 3020                  ht = htable_create(hat, vaddr, l, ism_ht);
3021 3021  shared:
3022 3022                  ASSERT(ht->ht_flags & HTABLE_SHARED_PFN);
3023 3023                  ASSERT(ht->ht_shares == ism_ht);
3024 3024                  hat->hat_ism_pgcnt +=
3025 3025                      (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) <<
3026 3026                      (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
3027 3027                  ht->ht_valid_cnt = ism_ht->ht_valid_cnt;
3028 3028                  htable_release(ht);
3029 3029                  ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1);
3030 3030                  htable_release(ism_ht);
3031 3031                  ism_ht = NULL;
3032 3032                  continue;
3033 3033  
3034 3034  not_shared:
3035 3035                  /*
3036 3036                   * Unable to share the page table. Instead we will
3037 3037                   * create new mappings from the values in the ISM mappings.
3038 3038                   * Figure out what level size mappings to use;
3039 3039                   */
3040 3040                  for (l = ism_ht->ht_level; l > 0; --l) {
3041 3041                          if (LEVEL_SIZE(l) <= eaddr - vaddr &&
3042 3042                              (vaddr & LEVEL_OFFSET(l)) == 0)
3043 3043                                  break;
3044 3044                  }
3045 3045  
3046 3046                  /*
3047 3047                   * The ISM mapping might be larger than the share area,
3048 3048                   * be careful to truncate it if needed.
3049 3049                   */
3050 3050                  if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) {
3051 3051                          pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level));
3052 3052                  } else {
3053 3053                          pgcnt = mmu_btop(eaddr - vaddr);
3054 3054                          l = 0;
3055 3055                  }
3056 3056  
3057 3057                  pfn = PTE2PFN(pte, ism_ht->ht_level);
3058 3058                  ASSERT(pfn != PFN_INVALID);
3059 3059                  while (pgcnt > 0) {
3060 3060                          /*
3061 3061                           * Make a new pte for the PFN for this level.
3062 3062                           * Copy protections for the pte from the ISM pte.
3063 3063                           */
3064 3064                          pp = page_numtopp_nolock(pfn);
3065 3065                          ASSERT(pp != NULL);
3066 3066  
3067 3067                          prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK;
3068 3068                          if (PTE_GET(pte, PT_WRITABLE))
3069 3069                                  prot |= PROT_WRITE;
3070 3070                          if (!PTE_GET(pte, PT_NX))
3071 3071                                  prot |= PROT_EXEC;
3072 3072  
3073 3073                          flags = HAT_LOAD;
3074 3074                          if (!is_dism)
3075 3075                                  flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST;
3076 3076                          while (hati_load_common(hat, vaddr, pp, prot, flags,
3077 3077                              l, pfn) != 0) {
3078 3078                                  if (l == 0)
3079 3079                                          panic("hati_load_common() failure");
3080 3080                                  --l;
3081 3081                          }
3082 3082  
3083 3083                          vaddr += LEVEL_SIZE(l);
3084 3084                          ism_addr += LEVEL_SIZE(l);
3085 3085                          pfn += mmu_btop(LEVEL_SIZE(l));
3086 3086                          pgcnt -= mmu_btop(LEVEL_SIZE(l));
3087 3087                  }
3088 3088          }
3089 3089          if (ism_ht != NULL)
3090 3090                  htable_release(ism_ht);
3091 3091          XPV_ALLOW_MIGRATE();
3092 3092          return (0);
3093 3093  }
3094 3094  
3095 3095  
3096 3096  /*
3097 3097   * hat_unshare() is similar to hat_unload_callback(), but
3098 3098   * we have to look for empty shared pagetables. Note that
3099 3099   * hat_unshare() is always invoked against an entire segment.
3100 3100   */
3101 3101  /*ARGSUSED*/
3102 3102  void
3103 3103  hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc)
3104 3104  {
3105 3105          uint64_t        vaddr = (uintptr_t)addr;
3106 3106          uintptr_t       eaddr = vaddr + len;
3107 3107          htable_t        *ht = NULL;
3108 3108          uint_t          need_demaps = 0;
3109 3109          int             flags = HAT_UNLOAD_UNMAP;
3110 3110          level_t         l;
3111 3111  
3112 3112          ASSERT(hat != kas.a_hat);
3113 3113          ASSERT(eaddr <= _userlimit);
3114 3114          ASSERT(IS_PAGEALIGNED(vaddr));
3115 3115          ASSERT(IS_PAGEALIGNED(eaddr));
3116 3116          XPV_DISALLOW_MIGRATE();
3117 3117  
3118 3118          /*
3119 3119           * First go through and remove any shared pagetables.
3120 3120           *
3121 3121           * Note that it's ok to delay the TLB shootdown till the entire range is
3122 3122           * finished, because if hat_pageunload() were to unload a shared
3123 3123           * pagetable page, its hat_tlb_inval() will do a global TLB invalidate.
3124 3124           */
3125 3125          l = mmu.max_page_level;
3126 3126          if (l == mmu.max_level)
3127 3127                  --l;
3128 3128          for (; l >= 0; --l) {
3129 3129                  for (vaddr = (uintptr_t)addr; vaddr < eaddr;
3130 3130                      vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) {
3131 3131                          ASSERT(!IN_VA_HOLE(vaddr));
3132 3132                          /*
3133 3133                           * find a pagetable that maps the current address
3134 3134                           */
3135 3135                          ht = htable_lookup(hat, vaddr, l);
3136 3136                          if (ht == NULL)
3137 3137                                  continue;
3138 3138                          if (ht->ht_flags & HTABLE_SHARED_PFN) {
3139 3139                                  /*
3140 3140                                   * clear page count, set valid_cnt to 0,
3141 3141                                   * let htable_release() finish the job
3142 3142                                   */
3143 3143                                  hat->hat_ism_pgcnt -= ht->ht_valid_cnt <<
3144 3144                                      (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
3145 3145                                  ht->ht_valid_cnt = 0;
3146 3146                                  need_demaps = 1;
3147 3147                          }
3148 3148                          htable_release(ht);
3149 3149                  }
3150 3150          }
3151 3151  
3152 3152          /*
3153 3153           * flush the TLBs - since we're probably dealing with MANY mappings
3154 3154           * we do just one CR3 reload.
3155 3155           */
3156 3156          if (!(hat->hat_flags & HAT_FREEING) && need_demaps)
3157 3157                  hat_tlb_inval(hat, DEMAP_ALL_ADDR);
3158 3158  
3159 3159          /*
3160 3160           * Now go back and clean up any unaligned mappings that
3161 3161           * couldn't share pagetables.
3162 3162           */
3163 3163          if (!is_it_dism(hat, addr))
3164 3164                  flags |= HAT_UNLOAD_UNLOCK;
3165 3165          hat_unload(hat, addr, len, flags);
3166 3166          XPV_ALLOW_MIGRATE();
3167 3167  }
3168 3168  
3169 3169  
3170 3170  /*
3171 3171   * hat_reserve() does nothing
3172 3172   */
3173 3173  /*ARGSUSED*/
3174 3174  void
3175 3175  hat_reserve(struct as *as, caddr_t addr, size_t len)
3176 3176  {
3177 3177  }
3178 3178  
3179 3179  
3180 3180  /*
3181 3181   * Called when all mappings to a page should have write permission removed.
3182 3182   * Mostly stolen from hat_pagesync()
3183 3183   */
3184 3184  static void
3185 3185  hati_page_clrwrt(struct page *pp)
3186 3186  {
3187 3187          hment_t         *hm = NULL;
3188 3188          htable_t        *ht;
3189 3189          uint_t          entry;
3190 3190          x86pte_t        old;
3191 3191          x86pte_t        new;
3192 3192          uint_t          pszc = 0;
3193 3193  
3194 3194          XPV_DISALLOW_MIGRATE();
3195 3195  next_size:
3196 3196          /*
3197 3197           * walk thru the mapping list clearing write permission
3198 3198           */
3199 3199          x86_hm_enter(pp);
3200 3200          while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
3201 3201                  if (ht->ht_level < pszc)
3202 3202                          continue;
3203 3203                  old = x86pte_get(ht, entry);
3204 3204  
3205 3205                  for (;;) {
3206 3206                          /*
3207 3207                           * Is this mapping of interest?
3208 3208                           */
3209 3209                          if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum ||
3210 3210                              PTE_GET(old, PT_WRITABLE) == 0)
3211 3211                                  break;
3212 3212  
3213 3213                          /*
3214 3214                           * Clear ref/mod writable bits. This requires cross
3215 3215                           * calls to ensure any executing TLBs see cleared bits.
3216 3216                           */
3217 3217                          new = old;
3218 3218                          PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE);
3219 3219                          old = hati_update_pte(ht, entry, old, new);
3220 3220                          if (old != 0)
3221 3221                                  continue;
3222 3222  
3223 3223                          break;
3224 3224                  }
3225 3225          }
3226 3226          x86_hm_exit(pp);
3227 3227          while (pszc < pp->p_szc) {
3228 3228                  page_t *tpp;
3229 3229                  pszc++;
3230 3230                  tpp = PP_GROUPLEADER(pp, pszc);
3231 3231                  if (pp != tpp) {
3232 3232                          pp = tpp;
3233 3233                          goto next_size;
3234 3234                  }
3235 3235          }
3236 3236          XPV_ALLOW_MIGRATE();
3237 3237  }
3238 3238  
3239 3239  /*
3240 3240   * void hat_page_setattr(pp, flag)
3241 3241   * void hat_page_clrattr(pp, flag)
3242 3242   *      used to set/clr ref/mod bits.
3243 3243   */
3244 3244  void
3245 3245  hat_page_setattr(struct page *pp, uint_t flag)
3246 3246  {
3247 3247          vnode_t         *vp = pp->p_vnode;
3248 3248          kmutex_t        *vphm = NULL;
3249 3249          page_t          **listp;
3250 3250          int             noshuffle;
3251 3251  
3252 3252          noshuffle = flag & P_NSH;
3253 3253          flag &= ~P_NSH;
3254 3254  
3255 3255          if (PP_GETRM(pp, flag) == flag)
3256 3256                  return;
3257 3257  
3258 3258          if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
3259 3259              !noshuffle) {
3260 3260                  vphm = page_vnode_mutex(vp);
3261 3261                  mutex_enter(vphm);
3262 3262          }
3263 3263  
3264 3264          PP_SETRM(pp, flag);
3265 3265  
3266 3266          if (vphm != NULL) {
3267 3267  
3268 3268                  /*
3269 3269                   * Some File Systems examine v_pages for NULL w/o
3270 3270                   * grabbing the vphm mutex. Must not let it become NULL when
3271 3271                   * pp is the only page on the list.
3272 3272                   */
3273 3273                  if (pp->p_vpnext != pp) {
3274 3274                          page_vpsub(&vp->v_pages, pp);
3275 3275                          if (vp->v_pages != NULL)
3276 3276                                  listp = &vp->v_pages->p_vpprev->p_vpnext;
3277 3277                          else
3278 3278                                  listp = &vp->v_pages;
3279 3279                          page_vpadd(listp, pp);
3280 3280                  }
3281 3281                  mutex_exit(vphm);
3282 3282          }
3283 3283  }
3284 3284  
3285 3285  void
3286 3286  hat_page_clrattr(struct page *pp, uint_t flag)
3287 3287  {
3288 3288          vnode_t         *vp = pp->p_vnode;
3289 3289          ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
3290 3290  
3291 3291          /*
3292 3292           * Caller is expected to hold page's io lock for VMODSORT to work
3293 3293           * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
3294 3294           * bit is cleared.
3295 3295           * We don't have assert to avoid tripping some existing third party
3296 3296           * code. The dirty page is moved back to top of the v_page list
3297 3297           * after IO is done in pvn_write_done().
3298 3298           */
3299 3299          PP_CLRRM(pp, flag);
3300 3300  
3301 3301          if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
3302 3302  
3303 3303                  /*
3304 3304                   * VMODSORT works by removing write permissions and getting
3305 3305                   * a fault when a page is made dirty. At this point
3306 3306                   * we need to remove write permission from all mappings
3307 3307                   * to this page.
3308 3308                   */
3309 3309                  hati_page_clrwrt(pp);
3310 3310          }
3311 3311  }
3312 3312  
3313 3313  /*
3314 3314   *      If flag is specified, returns 0 if attribute is disabled
3315 3315   *      and non zero if enabled.  If flag specifes multiple attributes
3316 3316   *      then returns 0 if ALL attributes are disabled.  This is an advisory
3317 3317   *      call.
3318 3318   */
3319 3319  uint_t
3320 3320  hat_page_getattr(struct page *pp, uint_t flag)
3321 3321  {
3322 3322          return (PP_GETRM(pp, flag));
3323 3323  }
3324 3324  
3325 3325  
3326 3326  /*
3327 3327   * common code used by hat_page_inval() and hment_steal()
3328 3328   */
3329 3329  hment_t *
3330 3330  hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry)
3331 3331  {
3332 3332          x86pte_t old_pte;
3333 3333          pfn_t pfn = pp->p_pagenum;
3334 3334          hment_t *hm;
3335 3335  
3336 3336          /*
3337 3337           * We need to acquire a hold on the htable in order to
3338 3338           * do the invalidate. We know the htable must exist, since
3339 3339           * unmap's don't release the htable until after removing any
3340 3340           * hment. Having x86_hm_enter() keeps that from proceeding.
3341 3341           */
3342 3342          htable_acquire(ht);
3343 3343  
3344 3344          /*
3345 3345           * Invalidate the PTE and remove the hment.
3346 3346           */
3347 3347          old_pte = x86pte_inval(ht, entry, 0, NULL, B_TRUE);
3348 3348          if (PTE2PFN(old_pte, ht->ht_level) != pfn) {
3349 3349                  panic("x86pte_inval() failure found PTE = " FMT_PTE
3350 3350                      " pfn being unmapped is %lx ht=0x%lx entry=0x%x",
3351 3351                      old_pte, pfn, (uintptr_t)ht, entry);
3352 3352          }
3353 3353  
3354 3354          /*
3355 3355           * Clean up all the htable information for this mapping
3356 3356           */
3357 3357          ASSERT(ht->ht_valid_cnt > 0);
3358 3358          HTABLE_DEC(ht->ht_valid_cnt);
3359 3359          PGCNT_DEC(ht->ht_hat, ht->ht_level);
3360 3360  
3361 3361          /*
3362 3362           * sync ref/mod bits to the page_t
3363 3363           */
3364 3364          if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC)
3365 3365                  hati_sync_pte_to_page(pp, old_pte, ht->ht_level);
3366 3366  
3367 3367          /*
3368 3368           * Remove the mapping list entry for this page.
3369 3369           */
3370 3370          hm = hment_remove(pp, ht, entry);
3371 3371  
3372 3372          /*
3373 3373           * drop the mapping list lock so that we might free the
3374 3374           * hment and htable.
3375 3375           */
3376 3376          x86_hm_exit(pp);
3377 3377          htable_release(ht);
3378 3378          return (hm);
3379 3379  }
3380 3380  
3381 3381  extern int      vpm_enable;
3382 3382  /*
3383 3383   * Unload translations to a page. If the page is a subpage of a large
3384 3384   * page, the large page mappings are also removed.
3385 3385   * If curhat is not NULL, then we only unload the translation
3386 3386   * for the given process, otherwise all translations are unloaded.
3387 3387   */
3388 3388  void
3389 3389  hat_page_inval(struct page *pp, uint_t pg_szcd, struct hat *curhat)
3390 3390  {
3391 3391          page_t          *cur_pp = pp;
3392 3392          hment_t         *hm;
3393 3393          hment_t         *prev;
3394 3394          htable_t        *ht;
3395 3395          uint_t          entry;
3396 3396          level_t         level;
3397 3397          ulong_t         cnt;
3398 3398  
3399 3399          XPV_DISALLOW_MIGRATE();
3400 3400  
3401 3401  #if defined(__amd64)
3402 3402          /*
3403 3403           * clear the vpm ref.
3404 3404           */
3405 3405          if (vpm_enable) {
3406 3406                  pp->p_vpmref = 0;
3407 3407          }
3408 3408  #endif
3409 3409          /*
3410 3410           * The loop with next_size handles pages with multiple pagesize mappings
3411 3411           */
3412 3412  next_size:
3413 3413          if (curhat != NULL)
3414 3414                  cnt = hat_page_getshare(cur_pp);
3415 3415          for (;;) {
3416 3416  
3417 3417                  /*
3418 3418                   * Get a mapping list entry
3419 3419                   */
3420 3420                  x86_hm_enter(cur_pp);
3421 3421                  for (prev = NULL; ; prev = hm) {
3422 3422                          hm = hment_walk(cur_pp, &ht, &entry, prev);
3423 3423                          if (hm == NULL) {
3424 3424                                  x86_hm_exit(cur_pp);
3425 3425  
3426 3426  curproc_done:
3427 3427                                  /*
3428 3428                                   * If not part of a larger page, we're done.
3429 3429                                   */
3430 3430                                  if (cur_pp->p_szc <= pg_szcd) {
3431 3431                                          XPV_ALLOW_MIGRATE();
3432 3432                                          return;
3433 3433                                  }
3434 3434  
3435 3435                                  /*
3436 3436                                   * Else check the next larger page size.
3437 3437                                   * hat_page_demote() may decrease p_szc
3438 3438                                   * but that's ok we'll just take an extra
3439 3439                                   * trip discover there're no larger mappings
3440 3440                                   * and return.
3441 3441                                   */
3442 3442                                  ++pg_szcd;
3443 3443                                  cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd);
3444 3444                                  goto next_size;
3445 3445                          }
3446 3446  
3447 3447                          /*
3448 3448                           * If this mapping size matches, remove it.
3449 3449                           */
3450 3450                          level = ht->ht_level;
3451 3451                          if (level == pg_szcd) {
3452 3452                                  if (curhat == NULL || ht->ht_hat == curhat)
3453 3453                                          break;
3454 3454                                  /*
3455 3455                                   * Unloading only the given process but it's
3456 3456                                   * not the hat for the current process. Leave
3457 3457                                   * entry in place. Also do a safety check to
3458 3458                                   * ensure we don't get in an infinite loop
3459 3459                                   */
3460 3460                                  if (cnt-- == 0) {
3461 3461                                          x86_hm_exit(cur_pp);
3462 3462                                          goto curproc_done;
3463 3463                                  }
3464 3464                          }
3465 3465                  }
3466 3466  
3467 3467                  /*
3468 3468                   * Remove the mapping list entry for this page.
3469 3469                   * Note this does the x86_hm_exit() for us.
3470 3470                   */
3471 3471                  hm = hati_page_unmap(cur_pp, ht, entry);
3472 3472                  if (hm != NULL)
3473 3473                          hment_free(hm);
3474 3474  
3475 3475                  /* Perform check above for being part of a larger page. */
3476 3476                  if (curhat != NULL)
3477 3477                          goto curproc_done;
3478 3478          }
3479 3479  }
3480 3480  
3481 3481  /*
3482 3482   * Unload translations to a page. If unloadflag is HAT_CURPROC_PGUNLOAD, then
3483 3483   * we only unload the translation for the current process, otherwise all
3484 3484   * translations are unloaded.
3485 3485   */
3486 3486  static int
3487 3487  hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t unloadflag)
3488 3488  {
3489 3489          struct hat      *curhat = NULL;
3490 3490  
3491 3491          /*
3492 3492           * prevent recursion due to kmem_free()
3493 3493           */
3494 3494          ++curthread->t_hatdepth;
3495 3495          ASSERT(curthread->t_hatdepth < 16);
3496 3496  
3497 3497          if (unloadflag == HAT_CURPROC_PGUNLOAD)
3498 3498                  curhat = curthread->t_procp->p_as->a_hat;
3499 3499  
3500 3500          hat_page_inval(pp, pg_szcd, curhat);
3501 3501  
3502 3502          ASSERT(curthread->t_hatdepth > 0);
3503 3503          --curthread->t_hatdepth;
3504 3504          return (0);
3505 3505  }
3506 3506  
3507 3507  int
3508 3508  hat_pageunload(struct page *pp, uint_t unloadflag)
3509 3509  {
3510 3510          ASSERT(PAGE_EXCL(pp));
3511 3511          return (hati_pageunload(pp, 0, unloadflag));
3512 3512  }
3513 3513  
3514 3514  /*
3515 3515   * Unload all large mappings to pp and reduce by 1 p_szc field of every large
3516 3516   * page level that included pp.
3517 3517   *
3518 3518   * pp must be locked EXCL. Even though no other constituent pages are locked
3519 3519   * it's legal to unload large mappings to pp because all constituent pages of
3520 3520   * large locked mappings have to be locked SHARED.  therefore if we have EXCL
3521 3521   * lock on one of constituent pages none of the large mappings to pp are
3522 3522   * locked.
3523 3523   *
3524 3524   * Change (always decrease) p_szc field starting from the last constituent
3525 3525   * page and ending with root constituent page so that root's pszc always shows
3526 3526   * the area where hat_page_demote() may be active.
3527 3527   *
3528 3528   * This mechanism is only used for file system pages where it's not always
3529 3529   * possible to get EXCL locks on all constituent pages to demote the size code
3530 3530   * (as is done for anonymous or kernel large pages).
3531 3531   */
3532 3532  void
3533 3533  hat_page_demote(page_t *pp)
3534 3534  {
3535 3535          uint_t          pszc;
3536 3536          uint_t          rszc;
3537 3537          uint_t          szc;
3538 3538          page_t          *rootpp;
3539 3539          page_t          *firstpp;
3540 3540          page_t          *lastpp;
3541 3541          pgcnt_t         pgcnt;
3542 3542  
3543 3543          ASSERT(PAGE_EXCL(pp));
3544 3544          ASSERT(!PP_ISFREE(pp));
3545 3545          ASSERT(page_szc_lock_assert(pp));
3546 3546  
3547 3547          if (pp->p_szc == 0)
3548 3548                  return;
3549 3549  
3550 3550          rootpp = PP_GROUPLEADER(pp, 1);
3551 3551          (void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD);
3552 3552  
3553 3553          /*
3554 3554           * all large mappings to pp are gone
3555 3555           * and no new can be setup since pp is locked exclusively.
3556 3556           *
3557 3557           * Lock the root to make sure there's only one hat_page_demote()
3558 3558           * outstanding within the area of this root's pszc.
3559 3559           *
3560 3560           * Second potential hat_page_demote() is already eliminated by upper
3561 3561           * VM layer via page_szc_lock() but we don't rely on it and use our
3562 3562           * own locking (so that upper layer locking can be changed without
3563 3563           * assumptions that hat depends on upper layer VM to prevent multiple
3564 3564           * hat_page_demote() to be issued simultaneously to the same large
3565 3565           * page).
3566 3566           */
3567 3567  again:
3568 3568          pszc = pp->p_szc;
3569 3569          if (pszc == 0)
3570 3570                  return;
3571 3571          rootpp = PP_GROUPLEADER(pp, pszc);
3572 3572          x86_hm_enter(rootpp);
3573 3573          /*
3574 3574           * If root's p_szc is different from pszc we raced with another
3575 3575           * hat_page_demote().  Drop the lock and try to find the root again.
3576 3576           * If root's p_szc is greater than pszc previous hat_page_demote() is
3577 3577           * not done yet.  Take and release mlist lock of root's root to wait
3578 3578           * for previous hat_page_demote() to complete.
3579 3579           */
3580 3580          if ((rszc = rootpp->p_szc) != pszc) {
3581 3581                  x86_hm_exit(rootpp);
3582 3582                  if (rszc > pszc) {
3583 3583                          /* p_szc of a locked non free page can't increase */
3584 3584                          ASSERT(pp != rootpp);
3585 3585  
3586 3586                          rootpp = PP_GROUPLEADER(rootpp, rszc);
3587 3587                          x86_hm_enter(rootpp);
3588 3588                          x86_hm_exit(rootpp);
3589 3589                  }
3590 3590                  goto again;
3591 3591          }
3592 3592          ASSERT(pp->p_szc == pszc);
3593 3593  
3594 3594          /*
3595 3595           * Decrement by 1 p_szc of every constituent page of a region that
3596 3596           * covered pp. For example if original szc is 3 it gets changed to 2
3597 3597           * everywhere except in region 2 that covered pp. Region 2 that
3598 3598           * covered pp gets demoted to 1 everywhere except in region 1 that
3599 3599           * covered pp. The region 1 that covered pp is demoted to region
3600 3600           * 0. It's done this way because from region 3 we removed level 3
3601 3601           * mappings, from region 2 that covered pp we removed level 2 mappings
3602 3602           * and from region 1 that covered pp we removed level 1 mappings.  All
3603 3603           * changes are done from from high pfn's to low pfn's so that roots
3604 3604           * are changed last allowing one to know the largest region where
3605 3605           * hat_page_demote() is stil active by only looking at the root page.
3606 3606           *
3607 3607           * This algorithm is implemented in 2 while loops. First loop changes
3608 3608           * p_szc of pages to the right of pp's level 1 region and second
3609 3609           * loop changes p_szc of pages of level 1 region that covers pp
3610 3610           * and all pages to the left of level 1 region that covers pp.
3611 3611           * In the first loop p_szc keeps dropping with every iteration
3612 3612           * and in the second loop it keeps increasing with every iteration.
3613 3613           *
3614 3614           * First loop description: Demote pages to the right of pp outside of
3615 3615           * level 1 region that covers pp.  In every iteration of the while
3616 3616           * loop below find the last page of szc region and the first page of
3617 3617           * (szc - 1) region that is immediately to the right of (szc - 1)
3618 3618           * region that covers pp.  From last such page to first such page
3619 3619           * change every page's szc to szc - 1. Decrement szc and continue
3620 3620           * looping until szc is 1. If pp belongs to the last (szc - 1) region
3621 3621           * of szc region skip to the next iteration.
3622 3622           */
3623 3623          szc = pszc;
3624 3624          while (szc > 1) {
3625 3625                  lastpp = PP_GROUPLEADER(pp, szc);
3626 3626                  pgcnt = page_get_pagecnt(szc);
3627 3627                  lastpp += pgcnt - 1;
3628 3628                  firstpp = PP_GROUPLEADER(pp, (szc - 1));
3629 3629                  pgcnt = page_get_pagecnt(szc - 1);
3630 3630                  if (lastpp - firstpp < pgcnt) {
3631 3631                          szc--;
3632 3632                          continue;
3633 3633                  }
3634 3634                  firstpp += pgcnt;
3635 3635                  while (lastpp != firstpp) {
3636 3636                          ASSERT(lastpp->p_szc == pszc);
3637 3637                          lastpp->p_szc = szc - 1;
3638 3638                          lastpp--;
3639 3639                  }
3640 3640                  firstpp->p_szc = szc - 1;
3641 3641                  szc--;
3642 3642          }
3643 3643  
3644 3644          /*
3645 3645           * Second loop description:
3646 3646           * First iteration changes p_szc to 0 of every
3647 3647           * page of level 1 region that covers pp.
3648 3648           * Subsequent iterations find last page of szc region
3649 3649           * immediately to the left of szc region that covered pp
3650 3650           * and first page of (szc + 1) region that covers pp.
3651 3651           * From last to first page change p_szc of every page to szc.
3652 3652           * Increment szc and continue looping until szc is pszc.
3653 3653           * If pp belongs to the fist szc region of (szc + 1) region
3654 3654           * skip to the next iteration.
3655 3655           *
3656 3656           */
3657 3657          szc = 0;
3658 3658          while (szc < pszc) {
3659 3659                  firstpp = PP_GROUPLEADER(pp, (szc + 1));
3660 3660                  if (szc == 0) {
3661 3661                          pgcnt = page_get_pagecnt(1);
3662 3662                          lastpp = firstpp + (pgcnt - 1);
3663 3663                  } else {
3664 3664                          lastpp = PP_GROUPLEADER(pp, szc);
3665 3665                          if (firstpp == lastpp) {
3666 3666                                  szc++;
3667 3667                                  continue;
3668 3668                          }
3669 3669                          lastpp--;
3670 3670                          pgcnt = page_get_pagecnt(szc);
3671 3671                  }
3672 3672                  while (lastpp != firstpp) {
3673 3673                          ASSERT(lastpp->p_szc == pszc);
3674 3674                          lastpp->p_szc = szc;
3675 3675                          lastpp--;
3676 3676                  }
3677 3677                  firstpp->p_szc = szc;
3678 3678                  if (firstpp == rootpp)
3679 3679                          break;
3680 3680                  szc++;
3681 3681          }
3682 3682          x86_hm_exit(rootpp);
3683 3683  }
3684 3684  
3685 3685  /*
3686 3686   * get hw stats from hardware into page struct and reset hw stats
3687 3687   * returns attributes of page
3688 3688   * Flags for hat_pagesync, hat_getstat, hat_sync
3689 3689   *
3690 3690   * define       HAT_SYNC_ZERORM         0x01
3691 3691   *
3692 3692   * Additional flags for hat_pagesync
3693 3693   *
3694 3694   * define       HAT_SYNC_STOPON_REF     0x02
3695 3695   * define       HAT_SYNC_STOPON_MOD     0x04
3696 3696   * define       HAT_SYNC_STOPON_RM      0x06
3697 3697   * define       HAT_SYNC_STOPON_SHARED  0x08
3698 3698   */
3699 3699  uint_t
3700 3700  hat_pagesync(struct page *pp, uint_t flags)
3701 3701  {
3702 3702          hment_t         *hm = NULL;
3703 3703          htable_t        *ht;
3704 3704          uint_t          entry;
3705 3705          x86pte_t        old, save_old;
3706 3706          x86pte_t        new;
3707 3707          uchar_t         nrmbits = P_REF|P_MOD|P_RO;
3708 3708          extern ulong_t  po_share;
3709 3709          page_t          *save_pp = pp;
3710 3710          uint_t          pszc = 0;
3711 3711  
3712 3712          ASSERT(PAGE_LOCKED(pp) || panicstr);
3713 3713  
3714 3714          if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD))
3715 3715                  return (pp->p_nrm & nrmbits);
3716 3716  
3717 3717          if ((flags & HAT_SYNC_ZERORM) == 0) {
3718 3718  
3719 3719                  if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp))
3720 3720                          return (pp->p_nrm & nrmbits);
3721 3721  
3722 3722                  if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp))
3723 3723                          return (pp->p_nrm & nrmbits);
3724 3724  
3725 3725                  if ((flags & HAT_SYNC_STOPON_SHARED) != 0 &&
3726 3726                      hat_page_getshare(pp) > po_share) {
3727 3727                          if (PP_ISRO(pp))
3728 3728                                  PP_SETREF(pp);
3729 3729                          return (pp->p_nrm & nrmbits);
3730 3730                  }
3731 3731          }
3732 3732  
3733 3733          XPV_DISALLOW_MIGRATE();
3734 3734  next_size:
3735 3735          /*
3736 3736           * walk thru the mapping list syncing (and clearing) ref/mod bits.
3737 3737           */
3738 3738          x86_hm_enter(pp);
3739 3739          while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
3740 3740                  if (ht->ht_level < pszc)
3741 3741                          continue;
3742 3742                  old = x86pte_get(ht, entry);
3743 3743  try_again:
3744 3744  
3745 3745                  ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum);
3746 3746  
3747 3747                  if (PTE_GET(old, PT_REF | PT_MOD) == 0)
3748 3748                          continue;
3749 3749  
3750 3750                  save_old = old;
3751 3751                  if ((flags & HAT_SYNC_ZERORM) != 0) {
3752 3752  
3753 3753                          /*
3754 3754                           * Need to clear ref or mod bits. Need to demap
3755 3755                           * to make sure any executing TLBs see cleared bits.
3756 3756                           */
3757 3757                          new = old;
3758 3758                          PTE_CLR(new, PT_REF | PT_MOD);
3759 3759                          old = hati_update_pte(ht, entry, old, new);
3760 3760                          if (old != 0)
3761 3761                                  goto try_again;
3762 3762  
3763 3763                          old = save_old;
3764 3764                  }
3765 3765  
3766 3766                  /*
3767 3767                   * Sync the PTE
3768 3768                   */
3769 3769                  if (!(flags & HAT_SYNC_ZERORM) &&
3770 3770                      PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC)
3771 3771                          hati_sync_pte_to_page(pp, old, ht->ht_level);
3772 3772  
3773 3773                  /*
3774 3774                   * can stop short if we found a ref'd or mod'd page
3775 3775                   */
3776 3776                  if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) ||
3777 3777                      (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) {
3778 3778                          x86_hm_exit(pp);
3779 3779                          goto done;
3780 3780                  }
3781 3781          }
3782 3782          x86_hm_exit(pp);
3783 3783          while (pszc < pp->p_szc) {
3784 3784                  page_t *tpp;
3785 3785                  pszc++;
3786 3786                  tpp = PP_GROUPLEADER(pp, pszc);
3787 3787                  if (pp != tpp) {
3788 3788                          pp = tpp;
3789 3789                          goto next_size;
3790 3790                  }
3791 3791          }
3792 3792  done:
3793 3793          XPV_ALLOW_MIGRATE();
3794 3794          return (save_pp->p_nrm & nrmbits);
3795 3795  }
3796 3796  
3797 3797  /*
3798 3798   * returns approx number of mappings to this pp.  A return of 0 implies
3799 3799   * there are no mappings to the page.
3800 3800   */
3801 3801  ulong_t
3802 3802  hat_page_getshare(page_t *pp)
3803 3803  {
3804 3804          uint_t cnt;
3805 3805          cnt = hment_mapcnt(pp);
3806 3806  #if defined(__amd64)
3807 3807          if (vpm_enable && pp->p_vpmref) {
3808 3808                  cnt += 1;
3809 3809          }
3810 3810  #endif
3811 3811          return (cnt);
3812 3812  }
3813 3813  
3814 3814  /*
3815 3815   * Return 1 the number of mappings exceeds sh_thresh. Return 0
3816 3816   * otherwise.
3817 3817   */
3818 3818  int
3819 3819  hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
3820 3820  {
3821 3821          return (hat_page_getshare(pp) > sh_thresh);
3822 3822  }
3823 3823  
3824 3824  /*
3825 3825   * hat_softlock isn't supported anymore
3826 3826   */
3827 3827  /*ARGSUSED*/
3828 3828  faultcode_t
3829 3829  hat_softlock(
3830 3830          hat_t *hat,
3831 3831          caddr_t addr,
3832 3832          size_t *len,
3833 3833          struct page **page_array,
3834 3834          uint_t flags)
3835 3835  {
3836 3836          return (FC_NOSUPPORT);
3837 3837  }
3838 3838  
3839 3839  
3840 3840  
3841 3841  /*
3842 3842   * Routine to expose supported HAT features to platform independent code.
3843 3843   */
3844 3844  /*ARGSUSED*/
3845 3845  int
3846 3846  hat_supported(enum hat_features feature, void *arg)
3847 3847  {
3848 3848          switch (feature) {
3849 3849  
3850 3850          case HAT_SHARED_PT:     /* this is really ISM */
3851 3851                  return (1);
3852 3852  
3853 3853          case HAT_DYNAMIC_ISM_UNMAP:
3854 3854                  return (0);
3855 3855  
3856 3856          case HAT_VMODSORT:
3857 3857                  return (1);
3858 3858  
3859 3859          case HAT_SHARED_REGIONS:
3860 3860                  return (0);
3861 3861  
3862 3862          default:
3863 3863                  panic("hat_supported() - unknown feature");
3864 3864          }
3865 3865          return (0);
3866 3866  }
3867 3867  
3868 3868  /*
3869 3869   * Called when a thread is exiting and has been switched to the kernel AS
3870 3870   */
3871 3871  void
3872 3872  hat_thread_exit(kthread_t *thd)
3873 3873  {
3874 3874          ASSERT(thd->t_procp->p_as == &kas);
3875 3875          XPV_DISALLOW_MIGRATE();
3876 3876          hat_switch(thd->t_procp->p_as->a_hat);
3877 3877          XPV_ALLOW_MIGRATE();
3878 3878  }
3879 3879  
3880 3880  /*
3881 3881   * Setup the given brand new hat structure as the new HAT on this cpu's mmu.
3882 3882   */
3883 3883  /*ARGSUSED*/
3884 3884  void
3885 3885  hat_setup(hat_t *hat, int flags)
3886 3886  {
3887 3887          XPV_DISALLOW_MIGRATE();
3888 3888          kpreempt_disable();
3889 3889  
3890 3890          hat_switch(hat);
3891 3891  
3892 3892          kpreempt_enable();
3893 3893          XPV_ALLOW_MIGRATE();
3894 3894  }
3895 3895  
3896 3896  /*
3897 3897   * Prepare for a CPU private mapping for the given address.
3898 3898   *
3899 3899   * The address can only be used from a single CPU and can be remapped
3900 3900   * using hat_mempte_remap().  Return the address of the PTE.
3901 3901   *
3902 3902   * We do the htable_create() if necessary and increment the valid count so
3903 3903   * the htable can't disappear.  We also hat_devload() the page table into
3904 3904   * kernel so that the PTE is quickly accessed.
3905 3905   */
3906 3906  hat_mempte_t
3907 3907  hat_mempte_setup(caddr_t addr)
3908 3908  {
3909 3909          uintptr_t       va = (uintptr_t)addr;
3910 3910          htable_t        *ht;
3911 3911          uint_t          entry;
3912 3912          x86pte_t        oldpte;
3913 3913          hat_mempte_t    p;
3914 3914  
3915 3915          ASSERT(IS_PAGEALIGNED(va));
3916 3916          ASSERT(!IN_VA_HOLE(va));
3917 3917          ++curthread->t_hatdepth;
3918 3918          XPV_DISALLOW_MIGRATE();
3919 3919          ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0);
3920 3920          if (ht == NULL) {
3921 3921                  ht = htable_create(kas.a_hat, va, 0, NULL);
3922 3922                  entry = htable_va2entry(va, ht);
3923 3923                  ASSERT(ht->ht_level == 0);
3924 3924                  oldpte = x86pte_get(ht, entry);
3925 3925          }
3926 3926          if (PTE_ISVALID(oldpte))
3927 3927                  panic("hat_mempte_setup(): address already mapped"
3928 3928                      "ht=%p, entry=%d, pte=" FMT_PTE, (void *)ht, entry, oldpte);
3929 3929  
3930 3930          /*
3931 3931           * increment ht_valid_cnt so that the pagetable can't disappear
3932 3932           */
3933 3933          HTABLE_INC(ht->ht_valid_cnt);
3934 3934  
3935 3935          /*
3936 3936           * return the PTE physical address to the caller.
3937 3937           */
3938 3938          htable_release(ht);
3939 3939          XPV_ALLOW_MIGRATE();
3940 3940          p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry);
3941 3941          --curthread->t_hatdepth;
3942 3942          return (p);
3943 3943  }
3944 3944  
3945 3945  /*
3946 3946   * Release a CPU private mapping for the given address.
3947 3947   * We decrement the htable valid count so it might be destroyed.
3948 3948   */
3949 3949  /*ARGSUSED1*/
3950 3950  void
3951 3951  hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa)
3952 3952  {
3953 3953          htable_t        *ht;
3954 3954  
3955 3955          XPV_DISALLOW_MIGRATE();
3956 3956          /*
3957 3957           * invalidate any left over mapping and decrement the htable valid count
3958 3958           */
3959 3959  #ifdef __xpv
3960 3960          if (HYPERVISOR_update_va_mapping((uintptr_t)addr, 0,
3961 3961              UVMF_INVLPG | UVMF_LOCAL))
3962 3962                  panic("HYPERVISOR_update_va_mapping() failed");
3963 3963  #else
3964 3964          {
3965 3965                  x86pte_t *pteptr;
3966 3966  
3967 3967                  pteptr = x86pte_mapin(mmu_btop(pte_pa),
3968 3968                      (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
3969 3969                  if (mmu.pae_hat)
3970 3970                          *pteptr = 0;
3971 3971                  else
3972 3972                          *(x86pte32_t *)pteptr = 0;
3973 3973                  mmu_tlbflush_entry(addr);
3974 3974                  x86pte_mapout();
3975 3975          }
3976 3976  #endif
3977 3977  
3978 3978          ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0);
3979 3979          if (ht == NULL)
3980 3980                  panic("hat_mempte_release(): invalid address");
3981 3981          ASSERT(ht->ht_level == 0);
3982 3982          HTABLE_DEC(ht->ht_valid_cnt);
3983 3983          htable_release(ht);
3984 3984          XPV_ALLOW_MIGRATE();
3985 3985  }
3986 3986  
3987 3987  /*
3988 3988   * Apply a temporary CPU private mapping to a page. We flush the TLB only
3989 3989   * on this CPU, so this ought to have been called with preemption disabled.
3990 3990   */
3991 3991  void
3992 3992  hat_mempte_remap(
3993 3993          pfn_t           pfn,
3994 3994          caddr_t         addr,
3995 3995          hat_mempte_t    pte_pa,
3996 3996          uint_t          attr,
3997 3997          uint_t          flags)
3998 3998  {
3999 3999          uintptr_t       va = (uintptr_t)addr;
4000 4000          x86pte_t        pte;
4001 4001  
4002 4002          /*
4003 4003           * Remap the given PTE to the new page's PFN. Invalidate only
4004 4004           * on this CPU.
4005 4005           */
4006 4006  #ifdef DEBUG
4007 4007          htable_t        *ht;
4008 4008          uint_t          entry;
4009 4009  
4010 4010          ASSERT(IS_PAGEALIGNED(va));
4011 4011          ASSERT(!IN_VA_HOLE(va));
4012 4012          ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0);
4013 4013          ASSERT(ht != NULL);
4014 4014          ASSERT(ht->ht_level == 0);
4015 4015          ASSERT(ht->ht_valid_cnt > 0);
4016 4016          ASSERT(ht->ht_pfn == mmu_btop(pte_pa));
4017 4017          htable_release(ht);
4018 4018  #endif
4019 4019          XPV_DISALLOW_MIGRATE();
4020 4020          pte = hati_mkpte(pfn, attr, 0, flags);
4021 4021  #ifdef __xpv
4022 4022          if (HYPERVISOR_update_va_mapping(va, pte, UVMF_INVLPG | UVMF_LOCAL))
4023 4023                  panic("HYPERVISOR_update_va_mapping() failed");
4024 4024  #else
4025 4025          {
4026 4026                  x86pte_t *pteptr;
4027 4027  
4028 4028                  pteptr = x86pte_mapin(mmu_btop(pte_pa),
4029 4029                      (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
4030 4030                  if (mmu.pae_hat)
4031 4031                          *(x86pte_t *)pteptr = pte;
4032 4032                  else
4033 4033                          *(x86pte32_t *)pteptr = (x86pte32_t)pte;
4034 4034                  mmu_tlbflush_entry(addr);
4035 4035                  x86pte_mapout();
4036 4036          }
4037 4037  #endif
4038 4038          XPV_ALLOW_MIGRATE();
4039 4039  }
4040 4040  
4041 4041  
4042 4042  
4043 4043  /*
4044 4044   * Hat locking functions
4045 4045   * XXX - these two functions are currently being used by hatstats
4046 4046   *      they can be removed by using a per-as mutex for hatstats.
4047 4047   */
4048 4048  void
4049 4049  hat_enter(hat_t *hat)
4050 4050  {
4051 4051          mutex_enter(&hat->hat_mutex);
4052 4052  }
4053 4053  
4054 4054  void
4055 4055  hat_exit(hat_t *hat)
4056 4056  {
4057 4057          mutex_exit(&hat->hat_mutex);
4058 4058  }
4059 4059  
4060 4060  /*
4061 4061   * HAT part of cpu initialization.
4062 4062   */
4063 4063  void
4064 4064  hat_cpu_online(struct cpu *cpup)
4065 4065  {
4066 4066          if (cpup != CPU) {
4067 4067                  x86pte_cpu_init(cpup);
4068 4068                  hat_vlp_setup(cpup);
4069 4069          }
4070 4070          CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id);
4071 4071  }
4072 4072  
4073 4073  /*
4074 4074   * HAT part of cpu deletion.
4075 4075   * (currently, we only call this after the cpu is safely passivated.)
4076 4076   */
4077 4077  void
4078 4078  hat_cpu_offline(struct cpu *cpup)
4079 4079  {
4080 4080          ASSERT(cpup != CPU);
4081 4081  
4082 4082          CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id);
4083 4083          hat_vlp_teardown(cpup);
4084 4084          x86pte_cpu_fini(cpup);
4085 4085  }
4086 4086  
4087 4087  /*
4088 4088   * Function called after all CPUs are brought online.
4089 4089   * Used to remove low address boot mappings.
4090 4090   */
4091 4091  void
4092 4092  clear_boot_mappings(uintptr_t low, uintptr_t high)
4093 4093  {
4094 4094          uintptr_t vaddr = low;
4095 4095          htable_t *ht = NULL;
4096 4096          level_t level;
4097 4097          uint_t entry;
4098 4098          x86pte_t pte;
4099 4099  
4100 4100          /*
4101 4101           * On 1st CPU we can unload the prom mappings, basically we blow away
4102 4102           * all virtual mappings under _userlimit.
4103 4103           */
4104 4104          while (vaddr < high) {
4105 4105                  pte = htable_walk(kas.a_hat, &ht, &vaddr, high);
4106 4106                  if (ht == NULL)
4107 4107                          break;
4108 4108  
4109 4109                  level = ht->ht_level;
4110 4110                  entry = htable_va2entry(vaddr, ht);
4111 4111                  ASSERT(level <= mmu.max_page_level);
4112 4112                  ASSERT(PTE_ISPAGE(pte, level));
4113 4113  
4114 4114                  /*
4115 4115                   * Unload the mapping from the page tables.
4116 4116                   */
4117 4117                  (void) x86pte_inval(ht, entry, 0, NULL, B_TRUE);
4118 4118                  ASSERT(ht->ht_valid_cnt > 0);
4119 4119                  HTABLE_DEC(ht->ht_valid_cnt);
4120 4120                  PGCNT_DEC(ht->ht_hat, ht->ht_level);
4121 4121  
4122 4122                  vaddr += LEVEL_SIZE(ht->ht_level);
4123 4123          }
4124 4124          if (ht)
4125 4125                  htable_release(ht);
4126 4126  }
4127 4127  
4128 4128  /*
4129 4129   * Atomically update a new translation for a single page.  If the
4130 4130   * currently installed PTE doesn't match the value we expect to find,
4131 4131   * it's not updated and we return the PTE we found.
4132 4132   *
4133 4133   * If activating nosync or NOWRITE and the page was modified we need to sync
4134 4134   * with the page_t. Also sync with page_t if clearing ref/mod bits.
4135 4135   */
4136 4136  static x86pte_t
4137 4137  hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new)
4138 4138  {
4139 4139          page_t          *pp;
4140 4140          uint_t          rm = 0;
4141 4141          x86pte_t        replaced;
4142 4142  
4143 4143          if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC &&
4144 4144              PTE_GET(expected, PT_MOD | PT_REF) &&
4145 4145              (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) ||
4146 4146              !PTE_GET(new, PT_MOD | PT_REF))) {
4147 4147  
4148 4148                  ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level)));
4149 4149                  pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level));
4150 4150                  ASSERT(pp != NULL);
4151 4151                  if (PTE_GET(expected, PT_MOD))
4152 4152                          rm |= P_MOD;
4153 4153                  if (PTE_GET(expected, PT_REF))
4154 4154                          rm |= P_REF;
4155 4155                  PTE_CLR(new, PT_MOD | PT_REF);
4156 4156          }
4157 4157  
4158 4158          replaced = x86pte_update(ht, entry, expected, new);
4159 4159          if (replaced != expected)
4160 4160                  return (replaced);
4161 4161  
4162 4162          if (rm) {
4163 4163                  /*
4164 4164                   * sync to all constituent pages of a large page
4165 4165                   */
4166 4166                  pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level);
4167 4167                  ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
4168 4168                  while (pgcnt-- > 0) {
4169 4169                          /*
4170 4170                           * hat_page_demote() can't decrease
4171 4171                           * pszc below this mapping size
4172 4172                           * since large mapping existed after we
4173 4173                           * took mlist lock.
4174 4174                           */
4175 4175                          ASSERT(pp->p_szc >= ht->ht_level);
4176 4176                          hat_page_setattr(pp, rm);
4177 4177                          ++pp;
4178 4178                  }
4179 4179          }
4180 4180  
4181 4181          return (0);
4182 4182  }
4183 4183  
4184 4184  /* ARGSUSED */
4185 4185  void
4186 4186  hat_join_srd(struct hat *hat, vnode_t *evp)
4187 4187  {
4188 4188  }
4189 4189  
4190 4190  /* ARGSUSED */
4191 4191  hat_region_cookie_t
4192 4192  hat_join_region(struct hat *hat,
4193 4193      caddr_t r_saddr,
4194 4194      size_t r_size,
4195 4195      void *r_obj,
4196 4196      u_offset_t r_objoff,
4197 4197      uchar_t r_perm,
4198 4198      uchar_t r_pgszc,
4199 4199      hat_rgn_cb_func_t r_cb_function,
4200 4200      uint_t flags)
4201 4201  {
4202 4202          panic("No shared region support on x86");
4203 4203          return (HAT_INVALID_REGION_COOKIE);
4204 4204  }
4205 4205  
4206 4206  /* ARGSUSED */
4207 4207  void
4208 4208  hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags)
4209 4209  {
4210 4210          panic("No shared region support on x86");
4211 4211  }
4212 4212  
4213 4213  /* ARGSUSED */
4214 4214  void
4215 4215  hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie)
4216 4216  {
4217 4217          panic("No shared region support on x86");
4218 4218  }
4219 4219  
4220 4220  
4221 4221  /*
4222 4222   * Kernel Physical Mapping (kpm) facility
4223 4223   *
4224 4224   * Most of the routines needed to support segkpm are almost no-ops on the
4225 4225   * x86 platform.  We map in the entire segment when it is created and leave
4226 4226   * it mapped in, so there is no additional work required to set up and tear
4227 4227   * down individual mappings.  All of these routines were created to support
4228 4228   * SPARC platforms that have to avoid aliasing in their virtually indexed
4229 4229   * caches.
4230 4230   *
4231 4231   * Most of the routines have sanity checks in them (e.g. verifying that the
4232 4232   * passed-in page is locked).  We don't actually care about most of these
4233 4233   * checks on x86, but we leave them in place to identify problems in the
4234 4234   * upper levels.
4235 4235   */
4236 4236  
4237 4237  /*
4238 4238   * Map in a locked page and return the vaddr.
4239 4239   */
4240 4240  /*ARGSUSED*/
4241 4241  caddr_t
4242 4242  hat_kpm_mapin(struct page *pp, struct kpme *kpme)
4243 4243  {
4244 4244          caddr_t         vaddr;
4245 4245  
4246 4246  #ifdef DEBUG
4247 4247          if (kpm_enable == 0) {
4248 4248                  cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n");
4249 4249                  return ((caddr_t)NULL);
4250 4250          }
4251 4251  
4252 4252          if (pp == NULL || PAGE_LOCKED(pp) == 0) {
4253 4253                  cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n");
4254 4254                  return ((caddr_t)NULL);
4255 4255          }
4256 4256  #endif
4257 4257  
4258 4258          vaddr = hat_kpm_page2va(pp, 1);
4259 4259  
4260 4260          return (vaddr);
4261 4261  }
4262 4262  
4263 4263  /*
4264 4264   * Mapout a locked page.
4265 4265   */
4266 4266  /*ARGSUSED*/
4267 4267  void
4268 4268  hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
4269 4269  {
4270 4270  #ifdef DEBUG
4271 4271          if (kpm_enable == 0) {
4272 4272                  cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n");
4273 4273                  return;
4274 4274          }
4275 4275  
4276 4276          if (IS_KPM_ADDR(vaddr) == 0) {
4277 4277                  cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n");
4278 4278                  return;
4279 4279          }
4280 4280  
4281 4281          if (pp == NULL || PAGE_LOCKED(pp) == 0) {
4282 4282                  cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n");
4283 4283                  return;
4284 4284          }
4285 4285  #endif
4286 4286  }
4287 4287  
4288 4288  /*
4289 4289   * hat_kpm_mapin_pfn is used to obtain a kpm mapping for physical
4290 4290   * memory addresses that are not described by a page_t.  It can
4291 4291   * also be used for normal pages that are not locked, but beware
4292 4292   * this is dangerous - no locking is performed, so the identity of
4293 4293   * the page could change.  hat_kpm_mapin_pfn is not supported when
4294 4294   * vac_colors > 1, because the chosen va depends on the page identity,
4295 4295   * which could change.
4296 4296   * The caller must only pass pfn's for valid physical addresses; violation
4297 4297   * of this rule will cause panic.
4298 4298   */
4299 4299  caddr_t
4300 4300  hat_kpm_mapin_pfn(pfn_t pfn)
4301 4301  {
4302 4302          caddr_t paddr, vaddr;
4303 4303  
4304 4304          if (kpm_enable == 0)
4305 4305                  return ((caddr_t)NULL);
4306 4306  
4307 4307          paddr = (caddr_t)ptob(pfn);
4308 4308          vaddr = (uintptr_t)kpm_vbase + paddr;
4309 4309  
4310 4310          return ((caddr_t)vaddr);
4311 4311  }
4312 4312  
4313 4313  /*ARGSUSED*/
4314 4314  void
4315 4315  hat_kpm_mapout_pfn(pfn_t pfn)
4316 4316  {
4317 4317          /* empty */
4318 4318  }
4319 4319  
4320 4320  /*
4321 4321   * Return the kpm virtual address for a specific pfn
4322 4322   */
4323 4323  caddr_t
4324 4324  hat_kpm_pfn2va(pfn_t pfn)
4325 4325  {
4326 4326          uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn);
4327 4327  
4328 4328          ASSERT(!pfn_is_foreign(pfn));
4329 4329          return ((caddr_t)vaddr);
4330 4330  }
4331 4331  
4332 4332  /*
4333 4333   * Return the kpm virtual address for the page at pp.
4334 4334   */
4335 4335  /*ARGSUSED*/
4336 4336  caddr_t
4337 4337  hat_kpm_page2va(struct page *pp, int checkswap)
4338 4338  {
4339 4339          return (hat_kpm_pfn2va(pp->p_pagenum));
4340 4340  }
4341 4341  
4342 4342  /*
4343 4343   * Return the page frame number for the kpm virtual address vaddr.
4344 4344   */
4345 4345  pfn_t
4346 4346  hat_kpm_va2pfn(caddr_t vaddr)
4347 4347  {
4348 4348          pfn_t           pfn;
4349 4349  
4350 4350          ASSERT(IS_KPM_ADDR(vaddr));
4351 4351  
4352 4352          pfn = (pfn_t)btop(vaddr - kpm_vbase);
4353 4353  
4354 4354          return (pfn);
4355 4355  }
4356 4356  
4357 4357  
4358 4358  /*
4359 4359   * Return the page for the kpm virtual address vaddr.
4360 4360   */
4361 4361  page_t *
4362 4362  hat_kpm_vaddr2page(caddr_t vaddr)
4363 4363  {
4364 4364          pfn_t           pfn;
4365 4365  
4366 4366          ASSERT(IS_KPM_ADDR(vaddr));
4367 4367  
4368 4368          pfn = hat_kpm_va2pfn(vaddr);
4369 4369  
4370 4370          return (page_numtopp_nolock(pfn));
4371 4371  }
4372 4372  
4373 4373  /*
4374 4374   * hat_kpm_fault is called from segkpm_fault when we take a page fault on a
4375 4375   * KPM page.  This should never happen on x86
4376 4376   */
4377 4377  int
4378 4378  hat_kpm_fault(hat_t *hat, caddr_t vaddr)
4379 4379  {
4380 4380          panic("pagefault in seg_kpm.  hat: 0x%p  vaddr: 0x%p",
4381 4381              (void *)hat, (void *)vaddr);
4382 4382  
4383 4383          return (0);
4384 4384  }
4385 4385  
4386 4386  /*ARGSUSED*/
4387 4387  void
4388 4388  hat_kpm_mseghash_clear(int nentries)
4389 4389  {}
4390 4390  
4391 4391  /*ARGSUSED*/
4392 4392  void
4393 4393  hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
4394 4394  {}
4395 4395  
4396 4396  #ifndef __xpv
4397 4397  void
4398 4398  hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs,
4399 4399          offset_t kpm_pages_off)
4400 4400  {
4401 4401          _NOTE(ARGUNUSED(nkpmpgs, kpm_pages_off));
4402 4402          pfn_t base, end;
4403 4403  
4404 4404          /*
4405 4405           * kphysm_add_memory_dynamic() does not set nkpmpgs
4406 4406           * when page_t memory is externally allocated.  That
4407 4407           * code must properly calculate nkpmpgs in all cases
4408 4408           * if nkpmpgs needs to be used at some point.
4409 4409           */
4410 4410  
4411 4411          /*
4412 4412           * The meta (page_t) pages for dynamically added memory are allocated
4413 4413           * either from the incoming memory itself or from existing memory.
4414 4414           * In the former case the base of the incoming pages will be different
4415 4415           * than the base of the dynamic segment so call memseg_get_start() to
4416 4416           * get the actual base of the incoming memory for each case.
4417 4417           */
4418 4418  
4419 4419          base = memseg_get_start(msp);
4420 4420          end = msp->pages_end;
4421 4421  
4422 4422          hat_devload(kas.a_hat, kpm_vbase + mmu_ptob(base),
4423 4423              mmu_ptob(end - base), base, PROT_READ | PROT_WRITE,
4424 4424              HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST);
4425 4425  }
4426 4426  
4427 4427  void
4428 4428  hat_kpm_addmem_mseg_insert(struct memseg *msp)
4429 4429  {
4430 4430          _NOTE(ARGUNUSED(msp));
4431 4431  }
4432 4432  
4433 4433  void
4434 4434  hat_kpm_addmem_memsegs_update(struct memseg *msp)
4435 4435  {
4436 4436          _NOTE(ARGUNUSED(msp));
4437 4437  }
4438 4438  
4439 4439  /*
4440 4440   * Return end of metadata for an already setup memseg.
4441 4441   * X86 platforms don't need per-page meta data to support kpm.
4442 4442   */
4443 4443  caddr_t
4444 4444  hat_kpm_mseg_reuse(struct memseg *msp)
4445 4445  {
4446 4446          return ((caddr_t)msp->epages);
4447 4447  }
4448 4448  
4449 4449  void
4450 4450  hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp)
4451 4451  {
4452 4452          _NOTE(ARGUNUSED(msp, mspp));
4453 4453          ASSERT(0);
4454 4454  }
4455 4455  
4456 4456  void
4457 4457  hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp,
4458 4458          struct memseg *lo, struct memseg *mid, struct memseg *hi)
4459 4459  {
4460 4460          _NOTE(ARGUNUSED(msp, mspp, lo, mid, hi));
4461 4461          ASSERT(0);
4462 4462  }
4463 4463  
4464 4464  /*
4465 4465   * Walk the memsegs chain, applying func to each memseg span.
4466 4466   */
4467 4467  void
4468 4468  hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg)
4469 4469  {
4470 4470          pfn_t   pbase, pend;
4471 4471          void    *base;
4472 4472          size_t  size;
4473 4473          struct memseg *msp;
4474 4474  
4475 4475          for (msp = memsegs; msp; msp = msp->next) {
4476 4476                  pbase = msp->pages_base;
4477 4477                  pend = msp->pages_end;
4478 4478                  base = ptob(pbase) + kpm_vbase;
4479 4479                  size = ptob(pend - pbase);
4480 4480                  func(arg, base, size);
4481 4481          }
4482 4482  }
4483 4483  
4484 4484  #else   /* __xpv */
4485 4485  
4486 4486  /*
4487 4487   * There are specific Hypervisor calls to establish and remove mappings
4488 4488   * to grant table references and the privcmd driver. We have to ensure
4489 4489   * that a page table actually exists.
4490 4490   */
4491 4491  void
4492 4492  hat_prepare_mapping(hat_t *hat, caddr_t addr, uint64_t *pte_ma)
4493 4493  {
4494 4494          maddr_t base_ma;
4495 4495          htable_t *ht;
4496 4496          uint_t entry;
4497 4497  
4498 4498          ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
4499 4499          XPV_DISALLOW_MIGRATE();
4500 4500          ht = htable_create(hat, (uintptr_t)addr, 0, NULL);
4501 4501  
4502 4502          /*
4503 4503           * if an address for pte_ma is passed in, return the MA of the pte
4504 4504           * for this specific address.  This address is only valid as long
4505 4505           * as the htable stays locked.
4506 4506           */
4507 4507          if (pte_ma != NULL) {
4508 4508                  entry = htable_va2entry((uintptr_t)addr, ht);
4509 4509                  base_ma = pa_to_ma(ptob(ht->ht_pfn));
4510 4510                  *pte_ma = base_ma + (entry << mmu.pte_size_shift);
4511 4511          }
4512 4512          XPV_ALLOW_MIGRATE();
4513 4513  }
4514 4514  
4515 4515  void
4516 4516  hat_release_mapping(hat_t *hat, caddr_t addr)
4517 4517  {
4518 4518          htable_t *ht;
4519 4519  
4520 4520          ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
4521 4521          XPV_DISALLOW_MIGRATE();
4522 4522          ht = htable_lookup(hat, (uintptr_t)addr, 0);
4523 4523          ASSERT(ht != NULL);
4524 4524          ASSERT(ht->ht_busy >= 2);
4525 4525          htable_release(ht);
4526 4526          htable_release(ht);
4527 4527          XPV_ALLOW_MIGRATE();
4528 4528  }
4529 4529  #endif  /* __xpv */
  
    | 
      ↓ open down ↓ | 
    4488 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX